* [PATCH v3 1/7] DSPBRIDGE: enhance dmm_map_object
2010-05-27 16:02 [PATCH v3 0/7] DSPBRIDGE: fix mem+cache API issues Ohad Ben-Cohen
@ 2010-05-27 16:02 ` Ohad Ben-Cohen
2010-05-27 16:02 ` [PATCH v3 2/7] DSPBRIDGE: maintain mapping and page info Ohad Ben-Cohen
` (6 subsequent siblings)
7 siblings, 0 replies; 11+ messages in thread
From: Ohad Ben-Cohen @ 2010-05-27 16:02 UTC (permalink / raw)
To: linux-omap
Cc: Felipe Contreras, Ivan Gomez Castellanos, Kanigeri Hari,
Omar Ramirez Luna, Guzman Lugo Fernando, Menon Nishanth,
Hiroshi Doyu, Ohad Ben-Cohen
Enhance dmm_map_object to support additional mapping
and page information. This will be used to keep
track of mapping resources needed later to invoke
DMA transfers to/from the DSP.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
---
If you want, you can also reach me at < ohadb at ti dot com >.
arch/arm/plat-omap/include/dspbridge/drv.h | 4 ++++
1 files changed, 4 insertions(+), 0 deletions(-)
diff --git a/arch/arm/plat-omap/include/dspbridge/drv.h b/arch/arm/plat-omap/include/dspbridge/drv.h
index 7de3323..b1312aa 100644
--- a/arch/arm/plat-omap/include/dspbridge/drv.h
+++ b/arch/arm/plat-omap/include/dspbridge/drv.h
@@ -88,6 +88,10 @@ struct node_res_object {
struct dmm_map_object {
struct list_head link;
u32 dsp_addr;
+ u32 mpu_addr;
+ u32 size;
+ u32 num_usr_pgs;
+ struct page **pages;
};
/* Used for DMM reserved memory accounting */
--
1.7.0.4
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH v3 2/7] DSPBRIDGE: maintain mapping and page info
2010-05-27 16:02 [PATCH v3 0/7] DSPBRIDGE: fix mem+cache API issues Ohad Ben-Cohen
2010-05-27 16:02 ` [PATCH v3 1/7] DSPBRIDGE: enhance dmm_map_object Ohad Ben-Cohen
@ 2010-05-27 16:02 ` Ohad Ben-Cohen
2010-05-27 16:02 ` [PATCH v3 3/7] DSPBRIDGE: do not call follow_page Ohad Ben-Cohen
` (5 subsequent siblings)
7 siblings, 0 replies; 11+ messages in thread
From: Ohad Ben-Cohen @ 2010-05-27 16:02 UTC (permalink / raw)
To: linux-omap
Cc: Felipe Contreras, Ivan Gomez Castellanos, Kanigeri Hari,
Omar Ramirez Luna, Guzman Lugo Fernando, Menon Nishanth,
Hiroshi Doyu, Ohad Ben-Cohen
Every time the MM application calls proc_map to map
a memory area, remember the details of that mapping,
together with the related page structures.
Whenever a buffer is unmapped, clean up the mapping
information resources.
This information is maintained as part of the
DMM resource tracking mechanism.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
---
If you want, you can also reach me at < ohadb at ti dot com >.
arch/arm/plat-omap/include/dspbridge/dspdefs.h | 3 +-
drivers/dsp/bridge/core/io_sm.c | 11 ++-
drivers/dsp/bridge/core/tiomap3430.c | 9 ++-
drivers/dsp/bridge/rmgr/proc.c | 125 ++++++++++++++++++------
4 files changed, 113 insertions(+), 35 deletions(-)
diff --git a/arch/arm/plat-omap/include/dspbridge/dspdefs.h b/arch/arm/plat-omap/include/dspbridge/dspdefs.h
index 3dfe406..f09bdbd 100644
--- a/arch/arm/plat-omap/include/dspbridge/dspdefs.h
+++ b/arch/arm/plat-omap/include/dspbridge/dspdefs.h
@@ -182,7 +182,8 @@ typedef dsp_status(*fxn_brd_memwrite) (struct bridge_dev_context
typedef dsp_status(*fxn_brd_memmap) (struct bridge_dev_context
* hDevContext, u32 ul_mpu_addr,
u32 ulVirtAddr, u32 ul_num_bytes,
- u32 ulMapAttrs);
+ u32 ulMapAttrs,
+ struct page **mapped_pages);
/*
* ======== bridge_brd_mem_un_map ========
diff --git a/drivers/dsp/bridge/core/io_sm.c b/drivers/dsp/bridge/core/io_sm.c
index d6c1a98..7fda364 100644
--- a/drivers/dsp/bridge/core/io_sm.c
+++ b/drivers/dsp/bridge/core/io_sm.c
@@ -503,7 +503,8 @@ dsp_status bridge_io_on_loaded(struct io_mgr *hio_mgr)
hio_mgr->intf_fxns->
pfn_brd_mem_map(hio_mgr->hbridge_context,
pa_curr, va_curr,
- page_size[i], map_attrs);
+ page_size[i], map_attrs,
+ NULL);
if (DSP_FAILED(status))
goto func_end;
pa_curr += page_size[i];
@@ -568,7 +569,8 @@ dsp_status bridge_io_on_loaded(struct io_mgr *hio_mgr)
hio_mgr->intf_fxns->
pfn_brd_mem_map(hio_mgr->hbridge_context,
pa_curr, va_curr,
- page_size[i], map_attrs);
+ page_size[i], map_attrs,
+ NULL);
dev_dbg(bridge,
"shm MMU PTE entry PA %x"
" VA %x DSP_VA %x Size %x\n",
@@ -636,7 +638,8 @@ dsp_status bridge_io_on_loaded(struct io_mgr *hio_mgr)
hio_mgr->ext_proc_info.ty_tlb[i].
ul_gpp_phys,
hio_mgr->ext_proc_info.ty_tlb[i].
- ul_dsp_virt, 0x100000, map_attrs);
+ ul_dsp_virt, 0x100000, map_attrs,
+ NULL);
}
}
if (DSP_FAILED(status))
@@ -655,7 +658,7 @@ dsp_status bridge_io_on_loaded(struct io_mgr *hio_mgr)
status = hio_mgr->intf_fxns->pfn_brd_mem_map
(hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr,
l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
- map_attrs);
+ map_attrs, NULL);
if (DSP_FAILED(status))
goto func_end;
i++;
diff --git a/drivers/dsp/bridge/core/tiomap3430.c b/drivers/dsp/bridge/core/tiomap3430.c
index c7b0d83..79d42ad 100644
--- a/drivers/dsp/bridge/core/tiomap3430.c
+++ b/drivers/dsp/bridge/core/tiomap3430.c
@@ -101,7 +101,8 @@ static dsp_status bridge_brd_mem_write(struct bridge_dev_context *dev_context,
u32 ul_num_bytes, u32 ulMemType);
static dsp_status bridge_brd_mem_map(struct bridge_dev_context *hDevContext,
u32 ul_mpu_addr, u32 ulVirtAddr,
- u32 ul_num_bytes, u32 ul_map_attr);
+ u32 ul_num_bytes, u32 ul_map_attr,
+ struct page **mapped_pages);
static dsp_status bridge_brd_mem_un_map(struct bridge_dev_context *hDevContext,
u32 ulVirtAddr, u32 ul_num_bytes);
static dsp_status bridge_dev_create(OUT struct bridge_dev_context
@@ -1208,7 +1209,8 @@ static dsp_status bridge_brd_mem_write(struct bridge_dev_context *hDevContext,
*/
static dsp_status bridge_brd_mem_map(struct bridge_dev_context *hDevContext,
u32 ul_mpu_addr, u32 ulVirtAddr,
- u32 ul_num_bytes, u32 ul_map_attr)
+ u32 ul_num_bytes, u32 ul_map_attr,
+ struct page **mapped_pages)
{
u32 attrs;
dsp_status status = DSP_SOK;
@@ -1376,6 +1378,9 @@ static dsp_status bridge_brd_mem_map(struct bridge_dev_context *hDevContext,
if (DSP_FAILED(status))
break;
+ if (mapped_pages)
+ mapped_pages[pg_i] = mapped_page;
+
va += HW_PAGE_SIZE4KB;
ul_mpu_addr += HW_PAGE_SIZE4KB;
} else {
diff --git a/drivers/dsp/bridge/rmgr/proc.c b/drivers/dsp/bridge/rmgr/proc.c
index 7dc9b5c..37258c4 100644
--- a/drivers/dsp/bridge/rmgr/proc.c
+++ b/drivers/dsp/bridge/rmgr/proc.c
@@ -108,6 +108,87 @@ static s32 get_envp_count(char **envp);
static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
s32 cnew_envp, char *szVar);
+/* remember mapping information */
+static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt,
+ u32 mpu_addr, u32 dsp_addr, u32 size)
+{
+ struct dmm_map_object *map_obj;
+
+ u32 num_usr_pgs = size / PG_SIZE4K;
+
+ pr_debug("%s: adding map info: mpu_addr 0x%x virt 0x%x size 0x%x\n",
+ __func__, mpu_addr,
+ dsp_addr, size);
+
+ map_obj = kzalloc(sizeof(struct dmm_map_object), GFP_KERNEL);
+ if (!map_obj) {
+ pr_err("%s: kzalloc failed\n", __func__);
+ return NULL;
+ }
+ INIT_LIST_HEAD(&map_obj->link);
+
+ map_obj->pages = kcalloc(num_usr_pgs, sizeof(struct page *),
+ GFP_KERNEL);
+ if (!map_obj->pages) {
+ pr_err("%s: kzalloc failed\n", __func__);
+ kfree(map_obj);
+ return NULL;
+ }
+
+ map_obj->mpu_addr = mpu_addr;
+ map_obj->dsp_addr = dsp_addr;
+ map_obj->size = size;
+ map_obj->num_usr_pgs = num_usr_pgs;
+
+ spin_lock(&pr_ctxt->dmm_map_lock);
+ list_add(&map_obj->link, &pr_ctxt->dmm_map_list);
+ spin_unlock(&pr_ctxt->dmm_map_lock);
+
+ return map_obj;
+}
+
+static int match_exact_map_obj(struct dmm_map_object *map_obj,
+ u32 dsp_addr, u32 size)
+{
+ if (map_obj->dsp_addr == dsp_addr && map_obj->size != size)
+ pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n",
+ __func__, dsp_addr, map_obj->size, size);
+
+ return map_obj->dsp_addr == dsp_addr &&
+ map_obj->size == size;
+}
+
+static void remove_mapping_information(struct process_context *pr_ctxt,
+ u32 dsp_addr, u32 size)
+{
+ struct dmm_map_object *map_obj;
+
+ pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__,
+ dsp_addr, size);
+
+ spin_lock(&pr_ctxt->dmm_map_lock);
+ list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
+ pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
+ __func__,
+ map_obj->mpu_addr,
+ map_obj->dsp_addr,
+ map_obj->size);
+
+ if (match_exact_map_obj(map_obj, dsp_addr, size)) {
+ pr_debug("%s: match, deleting map info\n", __func__);
+ list_del(&map_obj->link);
+ kfree(map_obj->pages);
+ kfree(map_obj);
+ goto out;
+ }
+ pr_debug("%s: candidate didn't match\n", __func__);
+ }
+
+ pr_err("%s: failed to find given map info\n", __func__);
+out:
+ spin_unlock(&pr_ctxt->dmm_map_lock);
+}
+
/*
* ======== proc_attach ========
* Purpose:
@@ -1074,6 +1155,7 @@ dsp_status proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
dsp_status status = DSP_SOK;
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
struct dmm_map_object *map_obj;
+ u32 tmp_addr = 0;
#ifdef CONFIG_BRIDGE_CACHE_LINE_CHECK
if ((ul_map_attr & BUFMODE_MASK) != RBUF) {
@@ -1105,15 +1187,23 @@ dsp_status proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
/* Add mapping to the page tables. */
if (DSP_SUCCEEDED(status)) {
- status = (*p_proc_object->intf_fxns->pfn_brd_mem_map)
- (p_proc_object->hbridge_context, pa_align, va_align,
- size_align, ul_map_attr);
+ /* Mapped address = MSB of VA | LSB of PA */
+ tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1)));
+ /* mapped memory resource tracking */
+ map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr,
+ size_align);
+ if (!map_obj)
+ status = -ENOMEM;
+ else
+ status = (*p_proc_object->intf_fxns->pfn_brd_mem_map)
+ (p_proc_object->hbridge_context, pa_align, va_align,
+ size_align, ul_map_attr, map_obj->pages);
}
if (DSP_SUCCEEDED(status)) {
/* Mapped address = MSB of VA | LSB of PA */
- *pp_map_addr = (void *)(va_align | ((u32) pmpu_addr &
- (PG_SIZE4K - 1)));
+ *pp_map_addr = (void *) tmp_addr;
} else {
+ remove_mapping_information(pr_ctxt, tmp_addr, size_align);
dmm_un_map_memory(dmm_mgr, va_align, &size_align);
}
mutex_unlock(&proc_lock);
@@ -1121,19 +1211,6 @@ dsp_status proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
if (DSP_FAILED(status))
goto func_end;
- /*
- * A successful map should be followed by insertion of map_obj
- * into dmm_map_list, so that mapped memory resource tracking
- * remains uptodate
- */
- map_obj = kmalloc(sizeof(struct dmm_map_object), GFP_KERNEL);
- if (map_obj) {
- map_obj->dsp_addr = (u32) *pp_map_addr;
- spin_lock(&pr_ctxt->dmm_map_lock);
- list_add(&map_obj->link, &pr_ctxt->dmm_map_list);
- spin_unlock(&pr_ctxt->dmm_map_lock);
- }
-
func_end:
dev_dbg(bridge, "%s: hprocessor %p, pmpu_addr %p, ul_size %x, "
"req_addr %p, ul_map_attr %x, pp_map_addr %p, va_align %x, "
@@ -1422,7 +1499,6 @@ dsp_status proc_un_map(void *hprocessor, void *map_addr,
struct dmm_object *dmm_mgr;
u32 va_align;
u32 size_align;
- struct dmm_map_object *map_obj;
va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K);
if (!p_proc_object) {
@@ -1446,6 +1522,7 @@ dsp_status proc_un_map(void *hprocessor, void *map_addr,
status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map)
(p_proc_object->hbridge_context, va_align, size_align);
}
+
mutex_unlock(&proc_lock);
if (DSP_FAILED(status))
goto func_end;
@@ -1455,15 +1532,7 @@ dsp_status proc_un_map(void *hprocessor, void *map_addr,
* from dmm_map_list, so that mapped memory resource tracking
* remains uptodate
*/
- spin_lock(&pr_ctxt->dmm_map_lock);
- list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
- if (map_obj->dsp_addr == (u32) map_addr) {
- list_del(&map_obj->link);
- kfree(map_obj);
- break;
- }
- }
- spin_unlock(&pr_ctxt->dmm_map_lock);
+ remove_mapping_information(pr_ctxt, (u32) map_addr, size_align);
func_end:
dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n",
--
1.7.0.4
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH v3 3/7] DSPBRIDGE: do not call follow_page
2010-05-27 16:02 [PATCH v3 0/7] DSPBRIDGE: fix mem+cache API issues Ohad Ben-Cohen
2010-05-27 16:02 ` [PATCH v3 1/7] DSPBRIDGE: enhance dmm_map_object Ohad Ben-Cohen
2010-05-27 16:02 ` [PATCH v3 2/7] DSPBRIDGE: maintain mapping and page info Ohad Ben-Cohen
@ 2010-05-27 16:02 ` Ohad Ben-Cohen
2010-07-25 20:13 ` Felipe Contreras
2010-12-20 14:10 ` Felipe Contreras
2010-05-27 16:02 ` [PATCH v3 4/7] DSPBRIDGE: do not use low level cache manipulation API Ohad Ben-Cohen
` (4 subsequent siblings)
7 siblings, 2 replies; 11+ messages in thread
From: Ohad Ben-Cohen @ 2010-05-27 16:02 UTC (permalink / raw)
To: linux-omap
Cc: Felipe Contreras, Ivan Gomez Castellanos, Kanigeri Hari,
Omar Ramirez Luna, Guzman Lugo Fernando, Menon Nishanth,
Hiroshi Doyu, Ohad Ben-Cohen
Eliminate the call to follow_page. Instead, use the page
information that was kept during the proc_map call.
This also has the advantage that users can now only
specify memory areas that were previously mapped.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
---
You can also reach me at < ohadb at ti dot com >.
drivers/dsp/bridge/pmgr/dspapi.c | 4 +-
drivers/dsp/bridge/rmgr/proc.c | 148 +++++++++++++++++++++++++-------------
2 files changed, 99 insertions(+), 53 deletions(-)
diff --git a/drivers/dsp/bridge/pmgr/dspapi.c b/drivers/dsp/bridge/pmgr/dspapi.c
index 05ea853..cc64a99 100644
--- a/drivers/dsp/bridge/pmgr/dspapi.c
+++ b/drivers/dsp/bridge/pmgr/dspapi.c
@@ -688,7 +688,7 @@ u32 procwrap_flush_memory(union Trapped_Args *args, void *pr_ctxt)
PROC_WRITEBACK_INVALIDATE_MEM)
return -EINVAL;
- status = proc_flush_memory(args->args_proc_flushmemory.hprocessor,
+ status = proc_flush_memory(pr_ctxt,
args->args_proc_flushmemory.pmpu_addr,
args->args_proc_flushmemory.ul_size,
args->args_proc_flushmemory.ul_flags);
@@ -703,7 +703,7 @@ u32 procwrap_invalidate_memory(union Trapped_Args *args, void *pr_ctxt)
dsp_status status;
status =
- proc_invalidate_memory(args->args_proc_invalidatememory.hprocessor,
+ proc_invalidate_memory(pr_ctxt,
args->args_proc_invalidatememory.pmpu_addr,
args->args_proc_invalidatememory.ul_size);
return status;
diff --git a/drivers/dsp/bridge/rmgr/proc.c b/drivers/dsp/bridge/rmgr/proc.c
index 37258c4..6628483 100644
--- a/drivers/dsp/bridge/rmgr/proc.c
+++ b/drivers/dsp/bridge/rmgr/proc.c
@@ -189,6 +189,75 @@ out:
spin_unlock(&pr_ctxt->dmm_map_lock);
}
+static int match_containing_map_obj(struct dmm_map_object *map_obj,
+ u32 mpu_addr, u32 size)
+{
+ u32 map_obj_end = map_obj->mpu_addr + map_obj->size;
+
+ return mpu_addr >= map_obj->mpu_addr &&
+ mpu_addr + size <= map_obj_end;
+}
+
+static struct dmm_map_object *find_containing_mapping(
+ struct process_context *pr_ctxt,
+ u32 mpu_addr, u32 size)
+{
+ struct dmm_map_object *map_obj;
+ pr_debug("%s: looking for mpu_addr 0x%x size 0x%x\n", __func__,
+ mpu_addr, size);
+
+ spin_lock(&pr_ctxt->dmm_map_lock);
+ list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
+ pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
+ __func__,
+ map_obj->mpu_addr,
+ map_obj->dsp_addr,
+ map_obj->size);
+ if (match_containing_map_obj(map_obj, mpu_addr, size)) {
+ pr_debug("%s: match!\n", __func__);
+ goto out;
+ }
+
+ pr_debug("%s: no match!\n", __func__);
+ }
+
+ map_obj = NULL;
+out:
+ spin_unlock(&pr_ctxt->dmm_map_lock);
+ return map_obj;
+}
+
+static int find_first_page_in_cache(struct dmm_map_object *map_obj,
+ unsigned long mpu_addr)
+{
+ u32 mapped_base_page = map_obj->mpu_addr >> PAGE_SHIFT;
+ u32 requested_base_page = mpu_addr >> PAGE_SHIFT;
+ int pg_index = requested_base_page - mapped_base_page;
+
+ if (pg_index < 0 || pg_index >= map_obj->num_usr_pgs) {
+ pr_err("%s: failed (got %d)\n", __func__, pg_index);
+ return -1;
+ }
+
+ pr_debug("%s: first page is %d\n", __func__, pg_index);
+ return pg_index;
+}
+
+static inline struct page *get_mapping_page(struct dmm_map_object *map_obj,
+ int pg_i)
+{
+ pr_debug("%s: looking for pg_i %d, num_usr_pgs: %d\n", __func__,
+ pg_i, map_obj->num_usr_pgs);
+
+ if (pg_i < 0 || pg_i >= map_obj->num_usr_pgs) {
+ pr_err("%s: requested pg_i %d is out of mapped range\n",
+ __func__, pg_i);
+ return NULL;
+ }
+
+ return map_obj->pages[pg_i];
+}
+
/*
* ======== proc_attach ========
* Purpose:
@@ -537,23 +606,30 @@ dsp_status proc_enum_nodes(void *hprocessor, void **node_tab,
}
/* Cache operation against kernel address instead of users */
-static int memory_sync_page(struct vm_area_struct *vma, unsigned long start,
- ssize_t len, enum dsp_flushtype ftype)
+static int memory_sync_page(struct dmm_map_object *map_obj,
+ unsigned long start, ssize_t len, enum dsp_flushtype ftype)
{
struct page *page;
void *kaddr;
unsigned long offset;
ssize_t rest;
+ int pg_i;
+
+ pg_i = find_first_page_in_cache(map_obj, start);
+ if (pg_i < 0) {
+ pr_err("%s: failed to find first page in cache\n", __func__);
+ return -EINVAL;
+ }
while (len) {
- page = follow_page(vma, start, FOLL_GET);
+ page = get_mapping_page(map_obj, pg_i);
if (!page) {
pr_err("%s: no page for %08lx\n", __func__, start);
return -EINVAL;
} else if (IS_ERR(page)) {
pr_err("%s: err page for %08lx(%lu)\n", __func__, start,
- IS_ERR(page));
- return IS_ERR(page);
+ PTR_ERR(page));
+ return PTR_ERR(page);
}
offset = start & ~PAGE_MASK;
@@ -562,77 +638,47 @@ static int memory_sync_page(struct vm_area_struct *vma, unsigned long start,
mem_flush_cache(kaddr, rest, ftype);
kunmap(page);
- put_page(page);
len -= rest;
start += rest;
+ pg_i++;
}
return 0;
}
-/* Check if the given area blongs to process virtul memory address space */
-static int memory_sync_vma(unsigned long start, u32 len,
- enum dsp_flushtype ftype)
-{
- int err = 0;
- unsigned long end;
- struct vm_area_struct *vma;
-
- end = start + len;
- if (end <= start)
- return -EINVAL;
-
- while ((vma = find_vma(current->mm, start)) != NULL) {
- ssize_t size;
-
- if (vma->vm_flags & (VM_IO | VM_PFNMAP))
- return -EINVAL;
-
- if (vma->vm_start > start)
- return -EINVAL;
-
- size = min_t(ssize_t, vma->vm_end - start, len);
- err = memory_sync_page(vma, start, size, ftype);
- if (err)
- break;
-
- if (end <= vma->vm_end)
- break;
-
- start = vma->vm_end;
- }
-
- if (!vma)
- err = -EINVAL;
-
- return err;
-}
-
static dsp_status proc_memory_sync(void *hprocessor, void *pmpu_addr,
u32 ul_size, u32 ul_flags,
enum dsp_flushtype FlushMemType)
{
/* Keep STATUS here for future additions to this function */
dsp_status status = DSP_SOK;
- struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
+ struct process_context *pr_ctxt = (struct process_context *) hprocessor;
+ struct dmm_map_object *map_obj;
DBC_REQUIRE(refs > 0);
- if (!p_proc_object) {
+ if (!pr_ctxt) {
status = -EFAULT;
goto err_out;
}
- down_read(¤t->mm->mmap_sem);
+ pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
+ (u32)pmpu_addr,
+ ul_size, ul_flags);
- if (memory_sync_vma((u32) pmpu_addr, ul_size, FlushMemType)) {
+ /* find requested memory are in cached mapping information */
+ map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
+ if (!map_obj) {
+ pr_err("%s: find_containing_mapping failed\n", __func__);
+ status = -EFAULT;
+ goto err_out;
+ }
+ if (memory_sync_page(map_obj, (u32) pmpu_addr, ul_size, ul_flags)) {
pr_err("%s: InValid address parameters %p %x\n",
- __func__, pmpu_addr, ul_size);
+ __func__, pmpu_addr, ul_size);
status = -EFAULT;
}
- up_read(¤t->mm->mmap_sem);
-
err_out:
return status;
--
1.7.0.4
^ permalink raw reply related [flat|nested] 11+ messages in thread* Re: [PATCH v3 3/7] DSPBRIDGE: do not call follow_page
2010-05-27 16:02 ` [PATCH v3 3/7] DSPBRIDGE: do not call follow_page Ohad Ben-Cohen
@ 2010-07-25 20:13 ` Felipe Contreras
2010-12-20 14:10 ` Felipe Contreras
1 sibling, 0 replies; 11+ messages in thread
From: Felipe Contreras @ 2010-07-25 20:13 UTC (permalink / raw)
To: Ohad Ben-Cohen
Cc: linux-omap, Ivan Gomez Castellanos, Kanigeri Hari,
Omar Ramirez Luna, Guzman Lugo Fernando, Menon Nishanth,
Hiroshi Doyu
Hi,
Just for the record, I found a problem in this patch. The next patch
in the series overrides it, so it's not that important, unless
somebody picks only this patch.
On Thu, May 27, 2010 at 7:02 PM, Ohad Ben-Cohen <ohad@wizery.com> wrote:
> @@ -537,23 +606,30 @@ dsp_status proc_enum_nodes(void *hprocessor, void **node_tab,
> }
>
> /* Cache operation against kernel address instead of users */
> -static int memory_sync_page(struct vm_area_struct *vma, unsigned long start,
> - ssize_t len, enum dsp_flushtype ftype)
> +static int memory_sync_page(struct dmm_map_object *map_obj,
> + unsigned long start, ssize_t len, enum dsp_flushtype ftype)
> {
> struct page *page;
> void *kaddr;
[...]
> - if (memory_sync_vma((u32) pmpu_addr, ul_size, FlushMemType)) {
> + /* find requested memory are in cached mapping information */
> + map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
> + if (!map_obj) {
> + pr_err("%s: find_containing_mapping failed\n", __func__);
> + status = -EFAULT;
> + goto err_out;
> + }
> + if (memory_sync_page(map_obj, (u32) pmpu_addr, ul_size, ul_flags)) {
It should be FlushMemType, not ul_flags.
> pr_err("%s: InValid address parameters %p %x\n",
> - __func__, pmpu_addr, ul_size);
> + __func__, pmpu_addr, ul_size);
> status = -EFAULT;
> }
--
Felipe Contreras
--
To unsubscribe from this list: send the line "unsubscribe linux-omap" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply [flat|nested] 11+ messages in thread* Re: [PATCH v3 3/7] DSPBRIDGE: do not call follow_page
2010-05-27 16:02 ` [PATCH v3 3/7] DSPBRIDGE: do not call follow_page Ohad Ben-Cohen
2010-07-25 20:13 ` Felipe Contreras
@ 2010-12-20 14:10 ` Felipe Contreras
1 sibling, 0 replies; 11+ messages in thread
From: Felipe Contreras @ 2010-12-20 14:10 UTC (permalink / raw)
To: Ohad Ben-Cohen
Cc: linux-omap, Ivan Gomez Castellanos, Kanigeri Hari,
Omar Ramirez Luna, Guzman Lugo Fernando, Menon Nishanth,
Hiroshi Doyu
On Thu, May 27, 2010 at 7:02 PM, Ohad Ben-Cohen <ohad@wizery.com> wrote:
> Eliminate the call to follow_page. Instead, use the page
> information that was kept during the proc_map call.
> This also has the advantage that users can now only
> specify memory areas that were previously mapped.
>
> Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
I found another issue with this patch:
> - if (memory_sync_vma((u32) pmpu_addr, ul_size, FlushMemType)) {
> + /* find requested memory are in cached mapping information */
> + map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
> + if (!map_obj) {
> + pr_err("%s: find_containing_mapping failed\n", __func__);
> + status = -EFAULT;
> + goto err_out;
> + }
> + if (memory_sync_page(map_obj, (u32) pmpu_addr, ul_size, ul_flags)) {
> pr_err("%s: InValid address parameters %p %x\n",
> - __func__, pmpu_addr, ul_size);
> + __func__, pmpu_addr, ul_size);
> status = -EFAULT;
> }
find_containing_mapping() is taking the lock for dmm_map_list,
however, nothing prevents the map_obj to be destroyed after that,
specially if kcalloc sleeps, and then an unmap happens. While doing
some stress testing I found there's a race condition that makes
exactly that happen.
I'm sending some patches to fix that.
--
Felipe Contreras
--
To unsubscribe from this list: send the line "unsubscribe linux-omap" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH v3 4/7] DSPBRIDGE: do not use low level cache manipulation API
2010-05-27 16:02 [PATCH v3 0/7] DSPBRIDGE: fix mem+cache API issues Ohad Ben-Cohen
` (2 preceding siblings ...)
2010-05-27 16:02 ` [PATCH v3 3/7] DSPBRIDGE: do not call follow_page Ohad Ben-Cohen
@ 2010-05-27 16:02 ` Ohad Ben-Cohen
2010-05-27 16:02 ` [PATCH v3 5/7] DSPBRIDGE: remove mem_flush_cache Ohad Ben-Cohen
` (3 subsequent siblings)
7 siblings, 0 replies; 11+ messages in thread
From: Ohad Ben-Cohen @ 2010-05-27 16:02 UTC (permalink / raw)
To: linux-omap
Cc: Felipe Contreras, Ivan Gomez Castellanos, Kanigeri Hari,
Omar Ramirez Luna, Guzman Lugo Fernando, Menon Nishanth,
Hiroshi Doyu, Ohad Ben-Cohen
Instead of using low level cache manipulation API,
use the standard DMA API. This is achieved by adding
a proc_begin_dma function that takes a generic
dma_data_direction, and then implementing proc_flush_memory
and proc_invalidate_memory by means of proc_begin_dma
in the following manner:
* flush calls proc_begin_dma with DMA_BIDIRECTIONAL
* Invalidate calls proc_begin_dma with DMA_FROM_DEVICE
proc_begin_dma builds a scatter gatter list using the
page information that was kept during proc_map,
and feed it to the standard dma_map_sg API.
Note that now users cannot manipulate the cache state of any random
address; if the buffer is not part of a previous memory mapping of that
application, the request is denied.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
---
If you want, you can also reach me at < ohadb at ti dot com >.
arch/arm/plat-omap/include/dspbridge/drv.h | 13 +++
drivers/dsp/bridge/rmgr/proc.c | 119 +++++++++++++++++++++-------
2 files changed, 104 insertions(+), 28 deletions(-)
diff --git a/arch/arm/plat-omap/include/dspbridge/drv.h b/arch/arm/plat-omap/include/dspbridge/drv.h
index b1312aa..3186935 100644
--- a/arch/arm/plat-omap/include/dspbridge/drv.h
+++ b/arch/arm/plat-omap/include/dspbridge/drv.h
@@ -84,6 +84,18 @@ struct node_res_object {
struct node_res_object *next;
};
+/* used to cache dma mapping information */
+struct bridge_dma_map_info {
+ /* direction of DMA in action, or DMA_NONE */
+ enum dma_data_direction dir;
+ /* number of elements requested by us */
+ int num_pages;
+ /* number of elements returned from dma_map_sg */
+ int sg_num;
+ /* list of buffers used in this DMA action */
+ struct scatterlist *sg;
+};
+
/* Used for DMM mapped memory accounting */
struct dmm_map_object {
struct list_head link;
@@ -92,6 +104,7 @@ struct dmm_map_object {
u32 size;
u32 num_usr_pgs;
struct page **pages;
+ struct bridge_dma_map_info dma_info;
};
/* Used for DMM reserved memory accounting */
diff --git a/drivers/dsp/bridge/rmgr/proc.c b/drivers/dsp/bridge/rmgr/proc.c
index 6628483..2710a11 100644
--- a/drivers/dsp/bridge/rmgr/proc.c
+++ b/drivers/dsp/bridge/rmgr/proc.c
@@ -17,6 +17,8 @@
*/
/* ------------------------------------ Host OS */
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
#include <dspbridge/host_os.h>
/* ----------------------------------- DSP/BIOS Bridge */
@@ -74,6 +76,8 @@
#define RBUF 0x4000 /* Input buffer */
#define WBUF 0x8000 /* Output Buffer */
+extern struct device *bridge;
+
/* ----------------------------------- Globals */
/* The proc_object structure. */
@@ -177,6 +181,7 @@ static void remove_mapping_information(struct process_context *pr_ctxt,
if (match_exact_map_obj(map_obj, dsp_addr, size)) {
pr_debug("%s: match, deleting map info\n", __func__);
list_del(&map_obj->link);
+ kfree(map_obj->dma_info.sg);
kfree(map_obj->pages);
kfree(map_obj);
goto out;
@@ -606,49 +611,108 @@ dsp_status proc_enum_nodes(void *hprocessor, void **node_tab,
}
/* Cache operation against kernel address instead of users */
-static int memory_sync_page(struct dmm_map_object *map_obj,
- unsigned long start, ssize_t len, enum dsp_flushtype ftype)
+static int build_dma_sg(struct dmm_map_object *map_obj, unsigned long start,
+ ssize_t len, int pg_i)
{
struct page *page;
- void *kaddr;
unsigned long offset;
ssize_t rest;
- int pg_i;
-
- pg_i = find_first_page_in_cache(map_obj, start);
- if (pg_i < 0) {
- pr_err("%s: failed to find first page in cache\n", __func__);
- return -EINVAL;
- }
+ int ret = 0, i = 0;
+ struct scatterlist *sg = map_obj->dma_info.sg;
while (len) {
page = get_mapping_page(map_obj, pg_i);
if (!page) {
pr_err("%s: no page for %08lx\n", __func__, start);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
} else if (IS_ERR(page)) {
pr_err("%s: err page for %08lx(%lu)\n", __func__, start,
PTR_ERR(page));
- return PTR_ERR(page);
+ ret = PTR_ERR(page);
+ goto out;
}
offset = start & ~PAGE_MASK;
- kaddr = kmap(page) + offset;
rest = min_t(ssize_t, PAGE_SIZE - offset, len);
- mem_flush_cache(kaddr, rest, ftype);
- kunmap(page);
+ sg_set_page(&sg[i], page, rest, offset);
+
len -= rest;
start += rest;
- pg_i++;
+ pg_i++, i++;
}
+ if (i != map_obj->dma_info.num_pages) {
+ pr_err("%s: bad number of sg iterations\n", __func__);
+ ret = -EFAULT;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+/* Cache operation against kernel address instead of users */
+static int memory_give_ownership(struct dmm_map_object *map_obj,
+ unsigned long start, ssize_t len, enum dma_data_direction dir)
+{
+ int pg_i, ret, sg_num;
+ struct scatterlist *sg;
+ unsigned long first_data_page = start >> PAGE_SHIFT;
+ unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
+ /* calculating the number of pages this area spans */
+ unsigned long num_pages = last_data_page - first_data_page + 1;
+
+ pg_i = find_first_page_in_cache(map_obj, start);
+ if (pg_i < 0) {
+ pr_err("%s: failed to find first page in cache\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ sg = kcalloc(num_pages, sizeof(*sg), GFP_KERNEL);
+ if (!sg) {
+ pr_err("%s: kcalloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ sg_init_table(sg, num_pages);
+
+ /* cleanup a previous sg allocation */
+ /* this may happen if application doesn't signal for e/o DMA */
+ kfree(map_obj->dma_info.sg);
+
+ map_obj->dma_info.sg = sg;
+ map_obj->dma_info.dir = dir;
+ map_obj->dma_info.num_pages = num_pages;
+
+ ret = build_dma_sg(map_obj, start, len, pg_i);
+ if (ret)
+ goto kfree_sg;
+
+ sg_num = dma_map_sg(bridge, sg, num_pages, dir);
+ if (sg_num < 1) {
+ pr_err("%s: dma_map_sg failed: %d\n", __func__, sg_num);
+ ret = -EFAULT;
+ goto kfree_sg;
+ }
+
+ pr_debug("%s: dma_map_sg mapped %d elements\n", __func__, sg_num);
+ map_obj->dma_info.sg_num = sg_num;
+
return 0;
+
+kfree_sg:
+ kfree(sg);
+ map_obj->dma_info.sg = NULL;
+out:
+ return ret;
}
-static dsp_status proc_memory_sync(void *hprocessor, void *pmpu_addr,
- u32 ul_size, u32 ul_flags,
- enum dsp_flushtype FlushMemType)
+static int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
+ enum dma_data_direction dir)
{
/* Keep STATUS here for future additions to this function */
dsp_status status = DSP_SOK;
@@ -664,7 +728,7 @@ static dsp_status proc_memory_sync(void *hprocessor, void *pmpu_addr,
pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
(u32)pmpu_addr,
- ul_size, ul_flags);
+ ul_size, dir);
/* find requested memory are in cached mapping information */
map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
@@ -673,7 +737,8 @@ static dsp_status proc_memory_sync(void *hprocessor, void *pmpu_addr,
status = -EFAULT;
goto err_out;
}
- if (memory_sync_page(map_obj, (u32) pmpu_addr, ul_size, ul_flags)) {
+
+ if (memory_give_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) {
pr_err("%s: InValid address parameters %p %x\n",
__func__, pmpu_addr, ul_size);
status = -EFAULT;
@@ -692,10 +757,9 @@ err_out:
dsp_status proc_flush_memory(void *hprocessor, void *pmpu_addr,
u32 ul_size, u32 ul_flags)
{
- enum dsp_flushtype mtype = PROC_WRITEBACK_INVALIDATE_MEM;
+ enum dma_data_direction dir = DMA_BIDIRECTIONAL;
- return proc_memory_sync(hprocessor, pmpu_addr, ul_size, ul_flags,
- mtype);
+ return proc_begin_dma(hprocessor, pmpu_addr, ul_size, dir);
}
/*
@@ -703,12 +767,11 @@ dsp_status proc_flush_memory(void *hprocessor, void *pmpu_addr,
* Purpose:
* Invalidates the memory specified
*/
-dsp_status proc_invalidate_memory(void *hprocessor, void *pmpu_addr,
- u32 ul_size)
+dsp_status proc_invalidate_memory(void *hprocessor, void *pmpu_addr, u32 size)
{
- enum dsp_flushtype mtype = PROC_INVALIDATE_MEM;
+ enum dma_data_direction dir = DMA_FROM_DEVICE;
- return proc_memory_sync(hprocessor, pmpu_addr, ul_size, 0, mtype);
+ return proc_begin_dma(hprocessor, pmpu_addr, size, dir);
}
/*
--
1.7.0.4
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH v3 5/7] DSPBRIDGE: remove mem_flush_cache
2010-05-27 16:02 [PATCH v3 0/7] DSPBRIDGE: fix mem+cache API issues Ohad Ben-Cohen
` (3 preceding siblings ...)
2010-05-27 16:02 ` [PATCH v3 4/7] DSPBRIDGE: do not use low level cache manipulation API Ohad Ben-Cohen
@ 2010-05-27 16:02 ` Ohad Ben-Cohen
2010-05-27 16:02 ` [PATCH v3 6/7] DSPBRIDGE: add dspbridge API to mark end of DMA Ohad Ben-Cohen
` (2 subsequent siblings)
7 siblings, 0 replies; 11+ messages in thread
From: Ohad Ben-Cohen @ 2010-05-27 16:02 UTC (permalink / raw)
To: linux-omap
Cc: Felipe Contreras, Ivan Gomez Castellanos, Kanigeri Hari,
Omar Ramirez Luna, Guzman Lugo Fernando, Menon Nishanth,
Hiroshi Doyu, Ohad Ben-Cohen
Now that we use standard DMA API instead of low level cache
manipulation API, mem_flush_cache can be removed.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
---
If you want, you can also reach me at < ohadb at ti dot com >.
arch/arm/plat-omap/include/dspbridge/drv.h | 15 ------------
drivers/dsp/bridge/rmgr/drv.c | 33 ----------------------------
2 files changed, 0 insertions(+), 48 deletions(-)
diff --git a/arch/arm/plat-omap/include/dspbridge/drv.h b/arch/arm/plat-omap/include/dspbridge/drv.h
index 3186935..2eeb47f 100644
--- a/arch/arm/plat-omap/include/dspbridge/drv.h
+++ b/arch/arm/plat-omap/include/dspbridge/drv.h
@@ -466,21 +466,6 @@ extern void *mem_alloc_phys_mem(IN u32 byte_size,
IN u32 ulAlign, OUT u32 *pPhysicalAddress);
/*
- * ======== mem_flush_cache ========
- * Purpose:
- * Performs system cache sync with discard
- * Parameters:
- * pMemBuf: Pointer to memory region to be flushed.
- * pMemBuf: Size of the memory region to be flushed.
- * Returns:
- * Requires:
- * MEM is initialized.
- * Ensures:
- * Cache is synchronized
- */
-extern void mem_flush_cache(void *pMemBuf, u32 byte_size, s32 FlushType);
-
-/*
* ======== mem_free_phys_mem ========
* Purpose:
* Free the given block of physically contiguous memory.
diff --git a/drivers/dsp/bridge/rmgr/drv.c b/drivers/dsp/bridge/rmgr/drv.c
index a8e711a..6ffae0b 100644
--- a/drivers/dsp/bridge/rmgr/drv.c
+++ b/drivers/dsp/bridge/rmgr/drv.c
@@ -1039,39 +1039,6 @@ void *mem_alloc_phys_mem(u32 byte_size, u32 ulAlign, OUT u32 * pPhysicalAddress)
}
/*
- * ======== mem_flush_cache ========
- * Purpose:
- * Flush cache
- */
-void mem_flush_cache(void *pMemBuf, u32 byte_size, s32 FlushType)
-{
- if (!pMemBuf)
- return;
-
- switch (FlushType) {
- /* invalidate only */
- case PROC_INVALIDATE_MEM:
- dmac_inv_range(pMemBuf, pMemBuf + byte_size);
- outer_inv_range(__pa((u32) pMemBuf), __pa((u32) pMemBuf +
- byte_size));
- break;
- /* writeback only */
- case PROC_WRITEBACK_MEM:
- dmac_clean_range(pMemBuf, pMemBuf + byte_size);
- outer_clean_range(__pa((u32) pMemBuf), __pa((u32) pMemBuf +
- byte_size));
- break;
- /* writeback and invalidate */
- case PROC_WRITEBACK_INVALIDATE_MEM:
- dmac_flush_range(pMemBuf, pMemBuf + byte_size);
- outer_flush_range(__pa((u32) pMemBuf), __pa((u32) pMemBuf +
- byte_size));
- break;
- }
-
-}
-
-/*
* ======== mem_free_phys_mem ========
* Purpose:
* Free the given block of physically contiguous memory.
--
1.7.0.4
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH v3 6/7] DSPBRIDGE: add dspbridge API to mark end of DMA
2010-05-27 16:02 [PATCH v3 0/7] DSPBRIDGE: fix mem+cache API issues Ohad Ben-Cohen
` (4 preceding siblings ...)
2010-05-27 16:02 ` [PATCH v3 5/7] DSPBRIDGE: remove mem_flush_cache Ohad Ben-Cohen
@ 2010-05-27 16:02 ` Ohad Ben-Cohen
2010-05-27 16:02 ` [PATCH v3 7/7] DSPBRIDGE: add new PROC_BEGINDMA and PROC_ENDDMA ioctls Ohad Ben-Cohen
2010-06-18 22:57 ` [PATCH v3 0/7] DSPBRIDGE: fix mem+cache API issues Ramirez Luna, Omar
7 siblings, 0 replies; 11+ messages in thread
From: Ohad Ben-Cohen @ 2010-05-27 16:02 UTC (permalink / raw)
To: linux-omap
Cc: Felipe Contreras, Ivan Gomez Castellanos, Kanigeri Hari,
Omar Ramirez Luna, Guzman Lugo Fernando, Menon Nishanth,
Hiroshi Doyu, Ohad Ben-Cohen
Add new dspbridge API that ends DMA transfers.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
---
If you want, you can also reach me at < ohadb at ti dot com >.
drivers/dsp/bridge/rmgr/proc.c | 68 ++++++++++++++++++++++++++++++++++++++++
1 files changed, 68 insertions(+), 0 deletions(-)
diff --git a/drivers/dsp/bridge/rmgr/proc.c b/drivers/dsp/bridge/rmgr/proc.c
index 2710a11..e952d2e 100644
--- a/drivers/dsp/bridge/rmgr/proc.c
+++ b/drivers/dsp/bridge/rmgr/proc.c
@@ -653,6 +653,36 @@ out:
return ret;
}
+static int memory_regain_ownership(struct dmm_map_object *map_obj,
+ unsigned long start, ssize_t len, enum dma_data_direction dir)
+{
+ int ret = 0;
+ unsigned long first_data_page = start >> PAGE_SHIFT;
+ unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
+ /* calculating the number of pages this area spans */
+ unsigned long num_pages = last_data_page - first_data_page + 1;
+ struct bridge_dma_map_info *dma_info = &map_obj->dma_info;
+
+ if (!dma_info->sg)
+ goto out;
+
+ if (dma_info->dir != dir || dma_info->num_pages != num_pages) {
+ pr_err("%s: dma info doesn't match given params\n", __func__);
+ return -EINVAL;
+ }
+
+ dma_unmap_sg(bridge, dma_info->sg, num_pages, dma_info->dir);
+
+ pr_debug("%s: dma_map_sg unmapped\n", __func__);
+
+ kfree(dma_info->sg);
+
+ map_obj->dma_info.sg = NULL;
+
+out:
+ return ret;
+}
+
/* Cache operation against kernel address instead of users */
static int memory_give_ownership(struct dmm_map_object *map_obj,
unsigned long start, ssize_t len, enum dma_data_direction dir)
@@ -749,6 +779,44 @@ err_out:
return status;
}
+static int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
+ enum dma_data_direction dir)
+{
+ /* Keep STATUS here for future additions to this function */
+ int status = 0;
+ struct process_context *pr_ctxt = (struct process_context *) hprocessor;
+ struct dmm_map_object *map_obj;
+
+ DBC_REQUIRE(refs > 0);
+
+ if (!pr_ctxt) {
+ status = -EFAULT;
+ goto err_out;
+ }
+
+ pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
+ (u32)pmpu_addr,
+ ul_size, dir);
+
+ /* find requested memory are in cached mapping information */
+ map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
+ if (!map_obj) {
+ pr_err("%s: find_containing_mapping failed\n", __func__);
+ status = -EFAULT;
+ goto err_out;
+ }
+
+ if (memory_regain_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) {
+ pr_err("%s: InValid address parameters %p %x\n",
+ __func__, pmpu_addr, ul_size);
+ status = -EFAULT;
+ goto err_out;
+ }
+
+err_out:
+ return status;
+}
+
/*
* ======== proc_flush_memory ========
* Purpose:
--
1.7.0.4
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH v3 7/7] DSPBRIDGE: add new PROC_BEGINDMA and PROC_ENDDMA ioctls
2010-05-27 16:02 [PATCH v3 0/7] DSPBRIDGE: fix mem+cache API issues Ohad Ben-Cohen
` (5 preceding siblings ...)
2010-05-27 16:02 ` [PATCH v3 6/7] DSPBRIDGE: add dspbridge API to mark end of DMA Ohad Ben-Cohen
@ 2010-05-27 16:02 ` Ohad Ben-Cohen
2010-06-18 22:57 ` [PATCH v3 0/7] DSPBRIDGE: fix mem+cache API issues Ramirez Luna, Omar
7 siblings, 0 replies; 11+ messages in thread
From: Ohad Ben-Cohen @ 2010-05-27 16:02 UTC (permalink / raw)
To: linux-omap
Cc: Felipe Contreras, Ivan Gomez Castellanos, Kanigeri Hari,
Omar Ramirez Luna, Guzman Lugo Fernando, Menon Nishanth,
Hiroshi Doyu, Ohad Ben-Cohen
Add two new dspbridge ioctls that mark the
beginnind and end of a DMA transfer.
The direction of the DMA transfer is given as a parameter,
thus all three directions (DMA_BIDIRECTIONAL, DMA_TO_DEVICE
and DMA_FROM_DEVICE) are supported.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
---
If you want, you can also reach me at < ohadb at ti dot com >.
.../arm/plat-omap/include/dspbridge/dspapi-ioctl.h | 9 ++++++
arch/arm/plat-omap/include/dspbridge/dspapi.h | 2 +
arch/arm/plat-omap/include/dspbridge/proc.h | 29 +++++++++++++++++++
drivers/dsp/bridge/pmgr/dspapi.c | 30 ++++++++++++++++++++
drivers/dsp/bridge/rmgr/proc.c | 4 +-
5 files changed, 72 insertions(+), 2 deletions(-)
diff --git a/arch/arm/plat-omap/include/dspbridge/dspapi-ioctl.h b/arch/arm/plat-omap/include/dspbridge/dspapi-ioctl.h
index 1780855..6ed2dcc 100644
--- a/arch/arm/plat-omap/include/dspbridge/dspapi-ioctl.h
+++ b/arch/arm/plat-omap/include/dspbridge/dspapi-ioctl.h
@@ -153,6 +153,13 @@ union Trapped_Args {
void *hprocessor;
void *pmpu_addr;
u32 ul_size;
+ u32 dir;
+ } args_proc_dma;
+
+ struct {
+ void *hprocessor;
+ void *pmpu_addr;
+ u32 ul_size;
u32 ul_flags;
} args_proc_flushmemory;
@@ -426,6 +433,8 @@ union Trapped_Args {
#define PROC_FLUSHMEMORY _IOW(DB, DB_IOC(DB_PROC, 14), unsigned long)
#define PROC_STOP _IOWR(DB, DB_IOC(DB_PROC, 15), unsigned long)
#define PROC_INVALIDATEMEMORY _IOW(DB, DB_IOC(DB_PROC, 16), unsigned long)
+#define PROC_BEGINDMA _IOW(DB, DB_IOC(DB_PROC, 17), unsigned long)
+#define PROC_ENDDMA _IOW(DB, DB_IOC(DB_PROC, 18), unsigned long)
/* NODE Module */
#define NODE_ALLOCATE _IOWR(DB, DB_IOC(DB_NODE, 0), unsigned long)
diff --git a/arch/arm/plat-omap/include/dspbridge/dspapi.h b/arch/arm/plat-omap/include/dspbridge/dspapi.h
index 565c800..e821c7b 100644
--- a/arch/arm/plat-omap/include/dspbridge/dspapi.h
+++ b/arch/arm/plat-omap/include/dspbridge/dspapi.h
@@ -126,6 +126,8 @@ extern u32 procwrap_un_map(union Trapped_Args *args, void *pr_ctxt);
extern u32 procwrap_flush_memory(union Trapped_Args *args, void *pr_ctxt);
extern u32 procwrap_stop(union Trapped_Args *args, void *pr_ctxt);
extern u32 procwrap_invalidate_memory(union Trapped_Args *args, void *pr_ctxt);
+extern u32 procwrap_begin_dma(union Trapped_Args *args, void *pr_ctxt);
+extern u32 procwrap_end_dma(union Trapped_Args *args, void *pr_ctxt);
/* NODE wrapper functions */
extern u32 nodewrap_allocate(union Trapped_Args *args, void *pr_ctxt);
diff --git a/arch/arm/plat-omap/include/dspbridge/proc.h b/arch/arm/plat-omap/include/dspbridge/proc.h
index 18b51c5..58fcdea 100644
--- a/arch/arm/plat-omap/include/dspbridge/proc.h
+++ b/arch/arm/plat-omap/include/dspbridge/proc.h
@@ -456,6 +456,35 @@ extern dsp_status proc_start(void *hprocessor);
extern dsp_status proc_stop(void *hprocessor);
/*
+ * ======== proc_end_dma ========
+ * Purpose:
+ * Begin a DMA transfer
+ * Parameters:
+ * hprocessor : The processor handle.
+ * pmpu_addr : Buffer start address
+ * ul_size : Buffer size
+ * dir : The direction of the transfer
+ * Requires:
+ * Memory was previously mapped.
+ */
+extern int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
+ enum dma_data_direction dir);
+/*
+ * ======== proc_begin_dma ========
+ * Purpose:
+ * Begin a DMA transfer
+ * Parameters:
+ * hprocessor : The processor handle.
+ * pmpu_addr : Buffer start address
+ * ul_size : Buffer size
+ * dir : The direction of the transfer
+ * Requires:
+ * Memory was previously mapped.
+ */
+extern int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
+ enum dma_data_direction dir);
+
+/*
* ======== proc_flush_memory ========
* Purpose:
* Flushes a buffer from the MPU data cache.
diff --git a/drivers/dsp/bridge/pmgr/dspapi.c b/drivers/dsp/bridge/pmgr/dspapi.c
index cc64a99..2da2021 100644
--- a/drivers/dsp/bridge/pmgr/dspapi.c
+++ b/drivers/dsp/bridge/pmgr/dspapi.c
@@ -113,6 +113,8 @@ static struct api_cmd proc_cmd[] = {
{procwrap_flush_memory}, /* PROC_FLUSHMEMORY */
{procwrap_stop}, /* PROC_STOP */
{procwrap_invalidate_memory}, /* PROC_INVALIDATEMEMORY */
+ {procwrap_begin_dma}, /* PROC_BEGINDMA */
+ {procwrap_end_dma}, /* PROC_ENDDMA */
};
/* NODE wrapper functions */
@@ -677,6 +679,34 @@ u32 procwrap_enum_node_info(union Trapped_Args *args, void *pr_ctxt)
return status;
}
+u32 procwrap_end_dma(union Trapped_Args *args, void *pr_ctxt)
+{
+ dsp_status status;
+
+ if (args->args_proc_dma.dir >= DMA_NONE)
+ return -EINVAL;
+
+ status = proc_end_dma(pr_ctxt,
+ args->args_proc_dma.pmpu_addr,
+ args->args_proc_dma.ul_size,
+ args->args_proc_dma.dir);
+ return status;
+}
+
+u32 procwrap_begin_dma(union Trapped_Args *args, void *pr_ctxt)
+{
+ dsp_status status;
+
+ if (args->args_proc_dma.dir >= DMA_NONE)
+ return -EINVAL;
+
+ status = proc_begin_dma(pr_ctxt,
+ args->args_proc_dma.pmpu_addr,
+ args->args_proc_dma.ul_size,
+ args->args_proc_dma.dir);
+ return status;
+}
+
/*
* ======== procwrap_flush_memory ========
*/
diff --git a/drivers/dsp/bridge/rmgr/proc.c b/drivers/dsp/bridge/rmgr/proc.c
index e952d2e..93db51b 100644
--- a/drivers/dsp/bridge/rmgr/proc.c
+++ b/drivers/dsp/bridge/rmgr/proc.c
@@ -741,7 +741,7 @@ out:
return ret;
}
-static int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
+int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
enum dma_data_direction dir)
{
/* Keep STATUS here for future additions to this function */
@@ -779,7 +779,7 @@ err_out:
return status;
}
-static int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
+int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
enum dma_data_direction dir)
{
/* Keep STATUS here for future additions to this function */
--
1.7.0.4
^ permalink raw reply related [flat|nested] 11+ messages in thread* RE: [PATCH v3 0/7] DSPBRIDGE: fix mem+cache API issues
2010-05-27 16:02 [PATCH v3 0/7] DSPBRIDGE: fix mem+cache API issues Ohad Ben-Cohen
` (6 preceding siblings ...)
2010-05-27 16:02 ` [PATCH v3 7/7] DSPBRIDGE: add new PROC_BEGINDMA and PROC_ENDDMA ioctls Ohad Ben-Cohen
@ 2010-06-18 22:57 ` Ramirez Luna, Omar
7 siblings, 0 replies; 11+ messages in thread
From: Ramirez Luna, Omar @ 2010-06-18 22:57 UTC (permalink / raw)
To: Ohad Ben-Cohen, linux-omap@vger.kernel.org
Cc: Felipe Contreras, Gomez Castellanos, Ivan, Kanigeri, Hari,
Guzman Lugo, Fernando, Menon, Nishanth, Hiroshi Doyu
>From: Ohad Ben-Cohen [mailto:ohad@wizery.com]
>
>This patchset introduces an approach to eliminate the direct calls
>to follow_page and to the low level cache APIs.
>
>The patchset works by caching the page information while memory
>is mapped, and then using that information later when needed
>instead of calling follow_page. The low level cache API is then replaced
>by standard DMA API.
>
>Changes from v2:
>
>* Fix rebase error that plagued the v2 series
>* Added some debug logs to help analyzing missing mapping issues
>
>Notes:
>1. The global bridge device struct is used by adding an 'extern'
> to proc. This issue should be handled in a different patch series
> (the struct should not be global. instead, it should be accessible
> to the dspbridge code via one of the context objects. This way we
> will also be able to transform pr_* prints to dev_* prints).
>2. The patchset was tested with testsuite, DMM sample app and varios
> MM and recovery scenarios. Many thanks to Ivan Gomez Castellanos
> for the help here. Also Many thanks to Felipe Contreras for the
> thorough review, comments and testing.
>
>Thanks,
>Ohad.
>
>---
>If you want, you can also reach me at < ohadb at ti dot com >.
>
>Ohad Ben-Cohen (7):
> DSPBRIDGE: enhance dmm_map_object
> DSPBRIDGE: maintain mapping and page info
> DSPBRIDGE: do not call follow_page
> DSPBRIDGE: do not use low level cache manipulation API
> DSPBRIDGE: remove mem_flush_cache
> DSPBRIDGE: add dspbridge API to mark end of DMA
> DSPBRIDGE: add new PROC_BEGINDMA and PROC_ENDDMA ioctls
>
> arch/arm/plat-omap/include/dspbridge/drv.h | 32 +-
> .../arm/plat-omap/include/dspbridge/dspapi-ioctl.h | 9 +
> arch/arm/plat-omap/include/dspbridge/dspapi.h | 2 +
> arch/arm/plat-omap/include/dspbridge/dspdefs.h | 3 +-
> arch/arm/plat-omap/include/dspbridge/proc.h | 29 ++
> drivers/dsp/bridge/core/io_sm.c | 11 +-
> drivers/dsp/bridge/core/tiomap3430.c | 9 +-
> drivers/dsp/bridge/pmgr/dspapi.c | 34 ++-
> drivers/dsp/bridge/rmgr/drv.c | 33 --
> drivers/dsp/bridge/rmgr/proc.c | 410 ++++++++++++++++----
> 10 files changed, 433 insertions(+), 139 deletions(-)
Pushed to dspbridge.
- omar
^ permalink raw reply [flat|nested] 11+ messages in thread