* [PATCH 1/9] bus: mhi: ep: Pass mhi_ep_buf_info struct to read/write APIs
2023-11-27 12:45 [PATCH 0/9] bus: mhi: ep: Add async read/write support Manivannan Sadhasivam
@ 2023-11-27 12:45 ` Manivannan Sadhasivam
2023-11-27 12:45 ` [PATCH 2/9] bus: mhi: ep: Rename read_from_host() and write_to_host() APIs Manivannan Sadhasivam
` (9 subsequent siblings)
10 siblings, 0 replies; 22+ messages in thread
From: Manivannan Sadhasivam @ 2023-11-27 12:45 UTC (permalink / raw)
To: lpieralisi, kw
Cc: kishon, bhelgaas, mhi, linux-arm-msm, linux-pci, linux-kernel,
Manivannan Sadhasivam
In the preparation of DMA async support, let's pass the parameters to
read_from_host() and write_to_host() APIs using mhi_ep_buf_info structure.
No functional change.
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
---
drivers/bus/mhi/ep/main.c | 23 +++----
drivers/bus/mhi/ep/ring.c | 41 ++++++------
drivers/pci/endpoint/functions/pci-epf-mhi.c | 66 +++++++++++---------
include/linux/mhi_ep.h | 16 ++++-
4 files changed, 84 insertions(+), 62 deletions(-)
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index 4c8773881e1f..cdf5a84d1f21 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -344,10 +344,9 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
struct device *dev = &mhi_cntrl->mhi_dev->dev;
size_t tr_len, read_offset, write_offset;
+ struct mhi_ep_buf_info buf_info = {};
struct mhi_ring_element *el;
bool tr_done = false;
- void *write_addr;
- u64 read_addr;
u32 buf_left;
int ret;
@@ -376,11 +375,13 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
write_offset = len - buf_left;
- read_addr = mhi_chan->tre_loc + read_offset;
- write_addr = result->buf_addr + write_offset;
+
+ buf_info.host_addr = mhi_chan->tre_loc + read_offset;
+ buf_info.dev_addr = result->buf_addr + write_offset;
+ buf_info.size = tr_len;
dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
- ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len);
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
if (ret < 0) {
dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n");
return ret;
@@ -503,12 +504,11 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan;
struct device *dev = &mhi_chan->mhi_dev->dev;
+ struct mhi_ep_buf_info buf_info = {};
struct mhi_ring_element *el;
u32 buf_left, read_offset;
struct mhi_ep_ring *ring;
enum mhi_ev_ccs code;
- void *read_addr;
- u64 write_addr;
size_t tr_len;
u32 tre_len;
int ret;
@@ -537,11 +537,13 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
tr_len = min(buf_left, tre_len);
read_offset = skb->len - buf_left;
- read_addr = skb->data + read_offset;
- write_addr = MHI_TRE_DATA_GET_PTR(el);
+
+ buf_info.dev_addr = skb->data + read_offset;
+ buf_info.host_addr = MHI_TRE_DATA_GET_PTR(el);
+ buf_info.size = tr_len;
dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
- ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len);
+ ret = mhi_cntrl->write_to_host(mhi_cntrl, &buf_info);
if (ret < 0) {
dev_err(dev, "Error writing to the channel\n");
goto err_exit;
@@ -1449,7 +1451,6 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
ret = -ENOMEM;
goto err_destroy_tre_buf_cache;
}
-
INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker);
INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker);
INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);
diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c
index a1071c13761b..7ea952860def 100644
--- a/drivers/bus/mhi/ep/ring.c
+++ b/drivers/bus/mhi/ep/ring.c
@@ -30,7 +30,8 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
{
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
- size_t start, copy_size;
+ struct mhi_ep_buf_info buf_info = {};
+ size_t start;
int ret;
/* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */
@@ -43,30 +44,34 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
start = ring->wr_offset;
if (start < end) {
- copy_size = (end - start) * sizeof(struct mhi_ring_element);
- ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
- (start * sizeof(struct mhi_ring_element)),
- &ring->ring_cache[start], copy_size);
+ buf_info.size = (end - start) * sizeof(struct mhi_ring_element);
+ buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
+ buf_info.dev_addr = &ring->ring_cache[start];
+
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
if (ret < 0)
return ret;
} else {
- copy_size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
- ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
- (start * sizeof(struct mhi_ring_element)),
- &ring->ring_cache[start], copy_size);
+ buf_info.size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
+ buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
+ buf_info.dev_addr = &ring->ring_cache[start];
+
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
if (ret < 0)
return ret;
if (end) {
- ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase,
- &ring->ring_cache[0],
- end * sizeof(struct mhi_ring_element));
+ buf_info.host_addr = ring->rbase;
+ buf_info.dev_addr = &ring->ring_cache[0];
+ buf_info.size = end * sizeof(struct mhi_ring_element);
+
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
if (ret < 0)
return ret;
}
}
- dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, copy_size);
+ dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, buf_info.size);
return 0;
}
@@ -102,6 +107,7 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e
{
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_buf_info buf_info = {};
size_t old_offset = 0;
u32 num_free_elem;
__le64 rp;
@@ -133,12 +139,11 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e
rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
- ret = mhi_cntrl->write_to_host(mhi_cntrl, el, ring->rbase + (old_offset * sizeof(*el)),
- sizeof(*el));
- if (ret < 0)
- return ret;
+ buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el));
+ buf_info.dev_addr = el;
+ buf_info.size = sizeof(*el);
- return 0;
+ return mhi_cntrl->write_to_host(mhi_cntrl, &buf_info);
}
void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
index daa09289eede..6dc918a8a023 100644
--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -209,28 +209,28 @@ static void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl, u32 vector)
vector + 1);
}
-static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
- void *to, size_t size)
+static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_buf_info *buf_info)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
- size_t offset = get_align_offset(epf_mhi, from);
+ size_t offset = get_align_offset(epf_mhi, buf_info->host_addr);
void __iomem *tre_buf;
phys_addr_t tre_phys;
int ret;
mutex_lock(&epf_mhi->lock);
- ret = __pci_epf_mhi_alloc_map(mhi_cntrl, from, &tre_phys, &tre_buf,
- offset, size);
+ ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys,
+ &tre_buf, offset, buf_info->size);
if (ret) {
mutex_unlock(&epf_mhi->lock);
return ret;
}
- memcpy_fromio(to, tre_buf, size);
+ memcpy_fromio(buf_info->dev_addr, tre_buf, buf_info->size);
- __pci_epf_mhi_unmap_free(mhi_cntrl, from, tre_phys, tre_buf, offset,
- size);
+ __pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys,
+ tre_buf, offset, buf_info->size);
mutex_unlock(&epf_mhi->lock);
@@ -238,27 +238,27 @@ static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
}
static int pci_epf_mhi_iatu_write(struct mhi_ep_cntrl *mhi_cntrl,
- void *from, u64 to, size_t size)
+ struct mhi_ep_buf_info *buf_info)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
- size_t offset = get_align_offset(epf_mhi, to);
+ size_t offset = get_align_offset(epf_mhi, buf_info->host_addr);
void __iomem *tre_buf;
phys_addr_t tre_phys;
int ret;
mutex_lock(&epf_mhi->lock);
- ret = __pci_epf_mhi_alloc_map(mhi_cntrl, to, &tre_phys, &tre_buf,
- offset, size);
+ ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys,
+ &tre_buf, offset, buf_info->size);
if (ret) {
mutex_unlock(&epf_mhi->lock);
return ret;
}
- memcpy_toio(tre_buf, from, size);
+ memcpy_toio(tre_buf, buf_info->dev_addr, buf_info->size);
- __pci_epf_mhi_unmap_free(mhi_cntrl, to, tre_phys, tre_buf, offset,
- size);
+ __pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys,
+ tre_buf, offset, buf_info->size);
mutex_unlock(&epf_mhi->lock);
@@ -270,8 +270,8 @@ static void pci_epf_mhi_dma_callback(void *param)
complete(param);
}
-static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
- void *to, size_t size)
+static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_buf_info *buf_info)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
@@ -284,13 +284,13 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
dma_addr_t dst_addr;
int ret;
- if (size < SZ_4K)
- return pci_epf_mhi_iatu_read(mhi_cntrl, from, to, size);
+ if (buf_info->size < SZ_4K)
+ return pci_epf_mhi_iatu_read(mhi_cntrl, buf_info);
mutex_lock(&epf_mhi->lock);
config.direction = DMA_DEV_TO_MEM;
- config.src_addr = from;
+ config.src_addr = buf_info->host_addr;
ret = dmaengine_slave_config(chan, &config);
if (ret) {
@@ -298,14 +298,16 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
goto err_unlock;
}
- dst_addr = dma_map_single(dma_dev, to, size, DMA_FROM_DEVICE);
+ dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
+ DMA_FROM_DEVICE);
ret = dma_mapping_error(dma_dev, dst_addr);
if (ret) {
dev_err(dev, "Failed to map remote memory\n");
goto err_unlock;
}
- desc = dmaengine_prep_slave_single(chan, dst_addr, size, DMA_DEV_TO_MEM,
+ desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
+ DMA_DEV_TO_MEM,
DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(dev, "Failed to prepare DMA\n");
@@ -332,15 +334,15 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
}
err_unmap:
- dma_unmap_single(dma_dev, dst_addr, size, DMA_FROM_DEVICE);
+ dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
err_unlock:
mutex_unlock(&epf_mhi->lock);
return ret;
}
-static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from,
- u64 to, size_t size)
+static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_buf_info *buf_info)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
@@ -353,13 +355,13 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from,
dma_addr_t src_addr;
int ret;
- if (size < SZ_4K)
- return pci_epf_mhi_iatu_write(mhi_cntrl, from, to, size);
+ if (buf_info->size < SZ_4K)
+ return pci_epf_mhi_iatu_write(mhi_cntrl, buf_info);
mutex_lock(&epf_mhi->lock);
config.direction = DMA_MEM_TO_DEV;
- config.dst_addr = to;
+ config.dst_addr = buf_info->host_addr;
ret = dmaengine_slave_config(chan, &config);
if (ret) {
@@ -367,14 +369,16 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from,
goto err_unlock;
}
- src_addr = dma_map_single(dma_dev, from, size, DMA_TO_DEVICE);
+ src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
+ DMA_TO_DEVICE);
ret = dma_mapping_error(dma_dev, src_addr);
if (ret) {
dev_err(dev, "Failed to map remote memory\n");
goto err_unlock;
}
- desc = dmaengine_prep_slave_single(chan, src_addr, size, DMA_MEM_TO_DEV,
+ desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
+ DMA_MEM_TO_DEV,
DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(dev, "Failed to prepare DMA\n");
@@ -401,7 +405,7 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from,
}
err_unmap:
- dma_unmap_single(dma_dev, src_addr, size, DMA_TO_DEVICE);
+ dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
err_unlock:
mutex_unlock(&epf_mhi->lock);
diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
index ce85d42b685d..96f3a133540d 100644
--- a/include/linux/mhi_ep.h
+++ b/include/linux/mhi_ep.h
@@ -49,6 +49,18 @@ struct mhi_ep_db_info {
u32 status;
};
+/**
+ * struct mhi_ep_buf_info - MHI Endpoint transfer buffer info
+ * @dev_addr: Address of the buffer in endpoint
+ * @host_addr: Address of the bufffer in host
+ * @size: Size of the buffer
+ */
+struct mhi_ep_buf_info {
+ void *dev_addr;
+ u64 host_addr;
+ size_t size;
+};
+
/**
* struct mhi_ep_cntrl - MHI Endpoint controller structure
* @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
@@ -137,8 +149,8 @@ struct mhi_ep_cntrl {
void __iomem **virt, size_t size);
void (*unmap_free)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t phys,
void __iomem *virt, size_t size);
- int (*read_from_host)(struct mhi_ep_cntrl *mhi_cntrl, u64 from, void *to, size_t size);
- int (*write_to_host)(struct mhi_ep_cntrl *mhi_cntrl, void *from, u64 to, size_t size);
+ int (*read_from_host)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+ int (*write_to_host)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
enum mhi_state mhi_state;
--
2.25.1
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH 2/9] bus: mhi: ep: Rename read_from_host() and write_to_host() APIs
2023-11-27 12:45 [PATCH 0/9] bus: mhi: ep: Add async read/write support Manivannan Sadhasivam
2023-11-27 12:45 ` [PATCH 1/9] bus: mhi: ep: Pass mhi_ep_buf_info struct to read/write APIs Manivannan Sadhasivam
@ 2023-11-27 12:45 ` Manivannan Sadhasivam
2023-11-27 12:45 ` [PATCH 3/9] bus: mhi: ep: Introduce async read/write callbacks Manivannan Sadhasivam
` (8 subsequent siblings)
10 siblings, 0 replies; 22+ messages in thread
From: Manivannan Sadhasivam @ 2023-11-27 12:45 UTC (permalink / raw)
To: lpieralisi, kw
Cc: kishon, bhelgaas, mhi, linux-arm-msm, linux-pci, linux-kernel,
Manivannan Sadhasivam
In the preparation for adding async API support, let's rename the existing
APIs to read_sync() and write_sync() to make it explicit that these APIs
are used for synchronous read/write.
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
---
drivers/bus/mhi/ep/main.c | 4 ++--
drivers/bus/mhi/ep/ring.c | 8 ++++----
drivers/pci/endpoint/functions/pci-epf-mhi.c | 8 ++++----
include/linux/mhi_ep.h | 8 ++++----
4 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index cdf5a84d1f21..5748a1da0803 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -381,7 +381,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
buf_info.size = tr_len;
dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
- ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
+ ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
if (ret < 0) {
dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n");
return ret;
@@ -543,7 +543,7 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
buf_info.size = tr_len;
dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
- ret = mhi_cntrl->write_to_host(mhi_cntrl, &buf_info);
+ ret = mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
if (ret < 0) {
dev_err(dev, "Error writing to the channel\n");
goto err_exit;
diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c
index 7ea952860def..aeb53b2c34a8 100644
--- a/drivers/bus/mhi/ep/ring.c
+++ b/drivers/bus/mhi/ep/ring.c
@@ -48,7 +48,7 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
buf_info.dev_addr = &ring->ring_cache[start];
- ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
+ ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
if (ret < 0)
return ret;
} else {
@@ -56,7 +56,7 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
buf_info.dev_addr = &ring->ring_cache[start];
- ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
+ ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
if (ret < 0)
return ret;
@@ -65,7 +65,7 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
buf_info.dev_addr = &ring->ring_cache[0];
buf_info.size = end * sizeof(struct mhi_ring_element);
- ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
+ ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
if (ret < 0)
return ret;
}
@@ -143,7 +143,7 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e
buf_info.dev_addr = el;
buf_info.size = sizeof(*el);
- return mhi_cntrl->write_to_host(mhi_cntrl, &buf_info);
+ return mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
}
void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
index 6dc918a8a023..34e7191f9508 100644
--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -536,11 +536,11 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
if (info->flags & MHI_EPF_USE_DMA) {
- mhi_cntrl->read_from_host = pci_epf_mhi_edma_read;
- mhi_cntrl->write_to_host = pci_epf_mhi_edma_write;
+ mhi_cntrl->read_sync = pci_epf_mhi_edma_read;
+ mhi_cntrl->write_sync = pci_epf_mhi_edma_write;
} else {
- mhi_cntrl->read_from_host = pci_epf_mhi_iatu_read;
- mhi_cntrl->write_to_host = pci_epf_mhi_iatu_write;
+ mhi_cntrl->read_sync = pci_epf_mhi_iatu_read;
+ mhi_cntrl->write_sync = pci_epf_mhi_iatu_write;
}
/* Register the MHI EP controller */
diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
index 96f3a133540d..b96b543bf2f6 100644
--- a/include/linux/mhi_ep.h
+++ b/include/linux/mhi_ep.h
@@ -94,8 +94,8 @@ struct mhi_ep_buf_info {
* @raise_irq: CB function for raising IRQ to the host
* @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it
* @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context
- * @read_from_host: CB function for reading from host memory from endpoint
- * @write_to_host: CB function for writing to host memory from endpoint
+ * @read_sync: CB function for reading from host memory synchronously
+ * @write_sync: CB function for writing to host memory synchronously
* @mhi_state: MHI Endpoint state
* @max_chan: Maximum channels supported by the endpoint controller
* @mru: MRU (Maximum Receive Unit) value of the endpoint controller
@@ -149,8 +149,8 @@ struct mhi_ep_cntrl {
void __iomem **virt, size_t size);
void (*unmap_free)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t phys,
void __iomem *virt, size_t size);
- int (*read_from_host)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
- int (*write_to_host)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+ int (*read_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+ int (*write_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
enum mhi_state mhi_state;
--
2.25.1
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH 3/9] bus: mhi: ep: Introduce async read/write callbacks
2023-11-27 12:45 [PATCH 0/9] bus: mhi: ep: Add async read/write support Manivannan Sadhasivam
2023-11-27 12:45 ` [PATCH 1/9] bus: mhi: ep: Pass mhi_ep_buf_info struct to read/write APIs Manivannan Sadhasivam
2023-11-27 12:45 ` [PATCH 2/9] bus: mhi: ep: Rename read_from_host() and write_to_host() APIs Manivannan Sadhasivam
@ 2023-11-27 12:45 ` Manivannan Sadhasivam
2023-11-27 12:45 ` [PATCH 4/9] PCI: epf-mhi: Simulate async read/write using iATU Manivannan Sadhasivam
` (7 subsequent siblings)
10 siblings, 0 replies; 22+ messages in thread
From: Manivannan Sadhasivam @ 2023-11-27 12:45 UTC (permalink / raw)
To: lpieralisi, kw
Cc: kishon, bhelgaas, mhi, linux-arm-msm, linux-pci, linux-kernel,
Manivannan Sadhasivam
These callbacks can be implemented by the controller drivers to perform
async read/write operation that increases the throughput.
For aiding the async operation, a completion callback is also introduced.
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
---
include/linux/mhi_ep.h | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
index b96b543bf2f6..14c6e8d3f573 100644
--- a/include/linux/mhi_ep.h
+++ b/include/linux/mhi_ep.h
@@ -54,11 +54,16 @@ struct mhi_ep_db_info {
* @dev_addr: Address of the buffer in endpoint
* @host_addr: Address of the bufffer in host
* @size: Size of the buffer
+ * @cb: Callback to be executed by controller drivers after transfer completion (async)
+ * @cb_buf: Opaque buffer to be passed to the callback
*/
struct mhi_ep_buf_info {
void *dev_addr;
u64 host_addr;
size_t size;
+
+ void (*cb)(struct mhi_ep_buf_info *buf_info);
+ void *cb_buf;
};
/**
@@ -96,6 +101,8 @@ struct mhi_ep_buf_info {
* @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context
* @read_sync: CB function for reading from host memory synchronously
* @write_sync: CB function for writing to host memory synchronously
+ * @read_async: CB function for reading from host memory asynchronously
+ * @write_async: CB function for writing to host memory asynchronously
* @mhi_state: MHI Endpoint state
* @max_chan: Maximum channels supported by the endpoint controller
* @mru: MRU (Maximum Receive Unit) value of the endpoint controller
@@ -151,6 +158,8 @@ struct mhi_ep_cntrl {
void __iomem *virt, size_t size);
int (*read_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
int (*write_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+ int (*read_async)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+ int (*write_async)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
enum mhi_state mhi_state;
--
2.25.1
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH 4/9] PCI: epf-mhi: Simulate async read/write using iATU
2023-11-27 12:45 [PATCH 0/9] bus: mhi: ep: Add async read/write support Manivannan Sadhasivam
` (2 preceding siblings ...)
2023-11-27 12:45 ` [PATCH 3/9] bus: mhi: ep: Introduce async read/write callbacks Manivannan Sadhasivam
@ 2023-11-27 12:45 ` Manivannan Sadhasivam
2023-12-13 18:49 ` Krzysztof Wilczyński
2023-11-27 12:45 ` [PATCH 5/9] PCI: epf-mhi: Add support for DMA async read/write operation Manivannan Sadhasivam
` (6 subsequent siblings)
10 siblings, 1 reply; 22+ messages in thread
From: Manivannan Sadhasivam @ 2023-11-27 12:45 UTC (permalink / raw)
To: lpieralisi, kw
Cc: kishon, bhelgaas, mhi, linux-arm-msm, linux-pci, linux-kernel,
Manivannan Sadhasivam
Even though iATU only supports synchronous read/write, the MHI stack may
call async read/write callbacks without knowing the limitations of the
controller driver. So in order to maintain compatibility, let's simulate
async read/write operation with iATU by invoking the completion callback
after memcpy.
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
---
drivers/pci/endpoint/functions/pci-epf-mhi.c | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
index 34e7191f9508..7214f4da733b 100644
--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -234,6 +234,9 @@ static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl,
mutex_unlock(&epf_mhi->lock);
+ if (buf_info->cb)
+ buf_info->cb(buf_info);
+
return 0;
}
@@ -262,6 +265,9 @@ static int pci_epf_mhi_iatu_write(struct mhi_ep_cntrl *mhi_cntrl,
mutex_unlock(&epf_mhi->lock);
+ if (buf_info->cb)
+ buf_info->cb(buf_info);
+
return 0;
}
--
2.25.1
^ permalink raw reply related [flat|nested] 22+ messages in thread* Re: [PATCH 4/9] PCI: epf-mhi: Simulate async read/write using iATU
2023-11-27 12:45 ` [PATCH 4/9] PCI: epf-mhi: Simulate async read/write using iATU Manivannan Sadhasivam
@ 2023-12-13 18:49 ` Krzysztof Wilczyński
0 siblings, 0 replies; 22+ messages in thread
From: Krzysztof Wilczyński @ 2023-12-13 18:49 UTC (permalink / raw)
To: Manivannan Sadhasivam
Cc: lpieralisi, kishon, bhelgaas, mhi, linux-arm-msm, linux-pci,
linux-kernel
Hello,
> Even though iATU only supports synchronous read/write, the MHI stack may
> call async read/write callbacks without knowing the limitations of the
> controller driver. So in order to maintain compatibility, let's simulate
> async read/write operation with iATU by invoking the completion callback
> after memcpy.
>
> Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
> ---
> drivers/pci/endpoint/functions/pci-epf-mhi.c | 6 ++++++
> 1 file changed, 6 insertions(+)
>
> diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
> index 34e7191f9508..7214f4da733b 100644
> --- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
> +++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
> @@ -234,6 +234,9 @@ static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl,
>
> mutex_unlock(&epf_mhi->lock);
>
> + if (buf_info->cb)
> + buf_info->cb(buf_info);
> +
> return 0;
> }
>
> @@ -262,6 +265,9 @@ static int pci_epf_mhi_iatu_write(struct mhi_ep_cntrl *mhi_cntrl,
>
> mutex_unlock(&epf_mhi->lock);
>
> + if (buf_info->cb)
> + buf_info->cb(buf_info);
> +
> return 0;
> }
Looks good!
Reviewed-by: Krzysztof Wilczyński <kw@linux.com>
Krzysztof
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH 5/9] PCI: epf-mhi: Add support for DMA async read/write operation
2023-11-27 12:45 [PATCH 0/9] bus: mhi: ep: Add async read/write support Manivannan Sadhasivam
` (3 preceding siblings ...)
2023-11-27 12:45 ` [PATCH 4/9] PCI: epf-mhi: Simulate async read/write using iATU Manivannan Sadhasivam
@ 2023-11-27 12:45 ` Manivannan Sadhasivam
2023-12-13 18:50 ` Krzysztof Wilczyński
2023-11-27 12:45 ` [PATCH 6/9] PCI: epf-mhi: Enable MHI async read/write support Manivannan Sadhasivam
` (5 subsequent siblings)
10 siblings, 1 reply; 22+ messages in thread
From: Manivannan Sadhasivam @ 2023-11-27 12:45 UTC (permalink / raw)
To: lpieralisi, kw
Cc: kishon, bhelgaas, mhi, linux-arm-msm, linux-pci, linux-kernel,
Manivannan Sadhasivam
The driver currently supports only the sync read/write operation i.e., it
waits for the DMA transfer to complete before returning to the caller
(MHI stack). But it is sub-optimal and defeats the actual purpose of using
DMA.
So let's add support for DMA async read/write operation by skipping the DMA
transfer completion and returning to the caller immediately. When the
completion actually happens later, the driver will be notified using the
DMA completion handler and in turn it will notify the caller using the
newly introduced callback in "struct mhi_ep_buf_info".
Since the DMA completion handler is invoked from the interrupt context, a
separate workqueue (epf_mhi->dma_wq) is used to notify the caller about the
completion of the transfer.
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
---
drivers/pci/endpoint/functions/pci-epf-mhi.c | 231 ++++++++++++++++++-
1 file changed, 228 insertions(+), 3 deletions(-)
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
index 7214f4da733b..3d09a37e5f7c 100644
--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -21,6 +21,15 @@
/* Platform specific flags */
#define MHI_EPF_USE_DMA BIT(0)
+struct pci_epf_mhi_dma_transfer {
+ struct pci_epf_mhi *epf_mhi;
+ struct mhi_ep_buf_info buf_info;
+ struct list_head node;
+ dma_addr_t paddr;
+ enum dma_data_direction dir;
+ size_t size;
+};
+
struct pci_epf_mhi_ep_info {
const struct mhi_ep_cntrl_config *config;
struct pci_epf_header *epf_header;
@@ -124,6 +133,10 @@ struct pci_epf_mhi {
resource_size_t mmio_phys;
struct dma_chan *dma_chan_tx;
struct dma_chan *dma_chan_rx;
+ struct workqueue_struct *dma_wq;
+ struct work_struct dma_work;
+ struct list_head dma_list;
+ spinlock_t list_lock;
u32 mmio_size;
int irq;
};
@@ -418,6 +431,198 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl,
return ret;
}
+static void pci_epf_mhi_dma_worker(struct work_struct *work)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(work, struct pci_epf_mhi, dma_work);
+ struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+ struct pci_epf_mhi_dma_transfer *itr, *tmp;
+ struct mhi_ep_buf_info *buf_info;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&epf_mhi->list_lock, flags);
+ list_splice_tail_init(&epf_mhi->dma_list, &head);
+ spin_unlock_irqrestore(&epf_mhi->list_lock, flags);
+
+ list_for_each_entry_safe(itr, tmp, &head, node) {
+ list_del(&itr->node);
+ dma_unmap_single(dma_dev, itr->paddr, itr->size, itr->dir);
+ buf_info = &itr->buf_info;
+ buf_info->cb(buf_info);
+ kfree(itr);
+ }
+}
+
+static void pci_epf_mhi_dma_async_callback(void *param)
+{
+ struct pci_epf_mhi_dma_transfer *transfer = param;
+ struct pci_epf_mhi *epf_mhi = transfer->epf_mhi;
+
+ spin_lock(&epf_mhi->list_lock);
+ list_add_tail(&transfer->node, &epf_mhi->dma_list);
+ spin_unlock(&epf_mhi->list_lock);
+
+ queue_work(epf_mhi->dma_wq, &epf_mhi->dma_work);
+}
+
+static int pci_epf_mhi_edma_read_async(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_buf_info *buf_info)
+{
+ struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+ struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+ struct pci_epf_mhi_dma_transfer *transfer = NULL;
+ struct dma_chan *chan = epf_mhi->dma_chan_rx;
+ struct device *dev = &epf_mhi->epf->dev;
+ DECLARE_COMPLETION_ONSTACK(complete);
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config config = {};
+ dma_cookie_t cookie;
+ dma_addr_t dst_addr;
+ int ret;
+
+ mutex_lock(&epf_mhi->lock);
+
+ config.direction = DMA_DEV_TO_MEM;
+ config.src_addr = buf_info->host_addr;
+
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret) {
+ dev_err(dev, "Failed to configure DMA channel\n");
+ goto err_unlock;
+ }
+
+ dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
+ DMA_FROM_DEVICE);
+ ret = dma_mapping_error(dma_dev, dst_addr);
+ if (ret) {
+ dev_err(dev, "Failed to map remote memory\n");
+ goto err_unlock;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
+ DMA_DEV_TO_MEM,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "Failed to prepare DMA\n");
+ ret = -EIO;
+ goto err_unmap;
+ }
+
+ transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
+ if (!transfer) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ transfer->epf_mhi = epf_mhi;
+ transfer->paddr = dst_addr;
+ transfer->size = buf_info->size;
+ transfer->dir = DMA_FROM_DEVICE;
+ memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
+
+ desc->callback = pci_epf_mhi_dma_async_callback;
+ desc->callback_param = transfer;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA submit\n");
+ goto err_free_transfer;
+ }
+
+ dma_async_issue_pending(chan);
+
+ goto err_unlock;
+
+err_free_transfer:
+ kfree(transfer);
+err_unmap:
+ dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
+err_unlock:
+ mutex_unlock(&epf_mhi->lock);
+
+ return ret;
+}
+
+static int pci_epf_mhi_edma_write_async(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_buf_info *buf_info)
+{
+ struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+ struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+ struct pci_epf_mhi_dma_transfer *transfer = NULL;
+ struct dma_chan *chan = epf_mhi->dma_chan_tx;
+ struct device *dev = &epf_mhi->epf->dev;
+ DECLARE_COMPLETION_ONSTACK(complete);
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config config = {};
+ dma_cookie_t cookie;
+ dma_addr_t src_addr;
+ int ret;
+
+ mutex_lock(&epf_mhi->lock);
+
+ config.direction = DMA_MEM_TO_DEV;
+ config.dst_addr = buf_info->host_addr;
+
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret) {
+ dev_err(dev, "Failed to configure DMA channel\n");
+ goto err_unlock;
+ }
+
+ src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(dma_dev, src_addr);
+ if (ret) {
+ dev_err(dev, "Failed to map remote memory\n");
+ goto err_unlock;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
+ DMA_MEM_TO_DEV,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "Failed to prepare DMA\n");
+ ret = -EIO;
+ goto err_unmap;
+ }
+
+ transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
+ if (!transfer) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ transfer->epf_mhi = epf_mhi;
+ transfer->paddr = src_addr;
+ transfer->size = buf_info->size;
+ transfer->dir = DMA_TO_DEVICE;
+ memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
+
+ desc->callback = pci_epf_mhi_dma_async_callback;
+ desc->callback_param = transfer;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA submit\n");
+ goto err_free_transfer;
+ }
+
+ dma_async_issue_pending(chan);
+
+ goto err_unlock;
+
+err_free_transfer:
+ kfree(transfer);
+err_unmap:
+ dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
+err_unlock:
+ mutex_unlock(&epf_mhi->lock);
+
+ return ret;
+}
+
struct epf_dma_filter {
struct device *dev;
u32 dma_mask;
@@ -441,6 +646,7 @@ static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi)
struct device *dev = &epf_mhi->epf->dev;
struct epf_dma_filter filter;
dma_cap_mask_t mask;
+ int ret;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@@ -459,16 +665,35 @@ static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi)
&filter);
if (IS_ERR_OR_NULL(epf_mhi->dma_chan_rx)) {
dev_err(dev, "Failed to request rx channel\n");
- dma_release_channel(epf_mhi->dma_chan_tx);
- epf_mhi->dma_chan_tx = NULL;
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_release_tx;
+ }
+
+ epf_mhi->dma_wq = alloc_workqueue("pci_epf_mhi_dma_wq", 0, 0);
+ if (!epf_mhi->dma_wq) {
+ ret = -ENOMEM;
+ goto err_release_rx;
}
+ INIT_LIST_HEAD(&epf_mhi->dma_list);
+ INIT_WORK(&epf_mhi->dma_work, pci_epf_mhi_dma_worker);
+ spin_lock_init(&epf_mhi->list_lock);
+
return 0;
+
+err_release_rx:
+ dma_release_channel(epf_mhi->dma_chan_rx);
+ epf_mhi->dma_chan_rx = NULL;
+err_release_tx:
+ dma_release_channel(epf_mhi->dma_chan_tx);
+ epf_mhi->dma_chan_tx = NULL;
+
+ return ret;
}
static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi)
{
+ destroy_workqueue(epf_mhi->dma_wq);
dma_release_channel(epf_mhi->dma_chan_tx);
dma_release_channel(epf_mhi->dma_chan_rx);
epf_mhi->dma_chan_tx = NULL;
--
2.25.1
^ permalink raw reply related [flat|nested] 22+ messages in thread* Re: [PATCH 5/9] PCI: epf-mhi: Add support for DMA async read/write operation
2023-11-27 12:45 ` [PATCH 5/9] PCI: epf-mhi: Add support for DMA async read/write operation Manivannan Sadhasivam
@ 2023-12-13 18:50 ` Krzysztof Wilczyński
0 siblings, 0 replies; 22+ messages in thread
From: Krzysztof Wilczyński @ 2023-12-13 18:50 UTC (permalink / raw)
To: Manivannan Sadhasivam
Cc: lpieralisi, kishon, bhelgaas, mhi, linux-arm-msm, linux-pci,
linux-kernel
Hello,
> The driver currently supports only the sync read/write operation i.e., it
> waits for the DMA transfer to complete before returning to the caller
> (MHI stack). But it is sub-optimal and defeats the actual purpose of using
> DMA.
>
> So let's add support for DMA async read/write operation by skipping the DMA
> transfer completion and returning to the caller immediately. When the
> completion actually happens later, the driver will be notified using the
> DMA completion handler and in turn it will notify the caller using the
> newly introduced callback in "struct mhi_ep_buf_info".
>
> Since the DMA completion handler is invoked from the interrupt context, a
> separate workqueue (epf_mhi->dma_wq) is used to notify the caller about the
> completion of the transfer.
>
> Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
> ---
> drivers/pci/endpoint/functions/pci-epf-mhi.c | 231 ++++++++++++++++++-
> 1 file changed, 228 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
> index 7214f4da733b..3d09a37e5f7c 100644
> --- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
> +++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
> @@ -21,6 +21,15 @@
> /* Platform specific flags */
> #define MHI_EPF_USE_DMA BIT(0)
>
> +struct pci_epf_mhi_dma_transfer {
> + struct pci_epf_mhi *epf_mhi;
> + struct mhi_ep_buf_info buf_info;
> + struct list_head node;
> + dma_addr_t paddr;
> + enum dma_data_direction dir;
> + size_t size;
> +};
> +
> struct pci_epf_mhi_ep_info {
> const struct mhi_ep_cntrl_config *config;
> struct pci_epf_header *epf_header;
> @@ -124,6 +133,10 @@ struct pci_epf_mhi {
> resource_size_t mmio_phys;
> struct dma_chan *dma_chan_tx;
> struct dma_chan *dma_chan_rx;
> + struct workqueue_struct *dma_wq;
> + struct work_struct dma_work;
> + struct list_head dma_list;
> + spinlock_t list_lock;
> u32 mmio_size;
> int irq;
> };
> @@ -418,6 +431,198 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl,
> return ret;
> }
>
> +static void pci_epf_mhi_dma_worker(struct work_struct *work)
> +{
> + struct pci_epf_mhi *epf_mhi = container_of(work, struct pci_epf_mhi, dma_work);
> + struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
> + struct pci_epf_mhi_dma_transfer *itr, *tmp;
> + struct mhi_ep_buf_info *buf_info;
> + unsigned long flags;
> + LIST_HEAD(head);
> +
> + spin_lock_irqsave(&epf_mhi->list_lock, flags);
> + list_splice_tail_init(&epf_mhi->dma_list, &head);
> + spin_unlock_irqrestore(&epf_mhi->list_lock, flags);
> +
> + list_for_each_entry_safe(itr, tmp, &head, node) {
> + list_del(&itr->node);
> + dma_unmap_single(dma_dev, itr->paddr, itr->size, itr->dir);
> + buf_info = &itr->buf_info;
> + buf_info->cb(buf_info);
> + kfree(itr);
> + }
> +}
> +
> +static void pci_epf_mhi_dma_async_callback(void *param)
> +{
> + struct pci_epf_mhi_dma_transfer *transfer = param;
> + struct pci_epf_mhi *epf_mhi = transfer->epf_mhi;
> +
> + spin_lock(&epf_mhi->list_lock);
> + list_add_tail(&transfer->node, &epf_mhi->dma_list);
> + spin_unlock(&epf_mhi->list_lock);
> +
> + queue_work(epf_mhi->dma_wq, &epf_mhi->dma_work);
> +}
> +
> +static int pci_epf_mhi_edma_read_async(struct mhi_ep_cntrl *mhi_cntrl,
> + struct mhi_ep_buf_info *buf_info)
> +{
> + struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
> + struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
> + struct pci_epf_mhi_dma_transfer *transfer = NULL;
> + struct dma_chan *chan = epf_mhi->dma_chan_rx;
> + struct device *dev = &epf_mhi->epf->dev;
> + DECLARE_COMPLETION_ONSTACK(complete);
> + struct dma_async_tx_descriptor *desc;
> + struct dma_slave_config config = {};
> + dma_cookie_t cookie;
> + dma_addr_t dst_addr;
> + int ret;
> +
> + mutex_lock(&epf_mhi->lock);
> +
> + config.direction = DMA_DEV_TO_MEM;
> + config.src_addr = buf_info->host_addr;
> +
> + ret = dmaengine_slave_config(chan, &config);
> + if (ret) {
> + dev_err(dev, "Failed to configure DMA channel\n");
> + goto err_unlock;
> + }
> +
> + dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> + DMA_FROM_DEVICE);
> + ret = dma_mapping_error(dma_dev, dst_addr);
> + if (ret) {
> + dev_err(dev, "Failed to map remote memory\n");
> + goto err_unlock;
> + }
> +
> + desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
> + DMA_DEV_TO_MEM,
> + DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> + if (!desc) {
> + dev_err(dev, "Failed to prepare DMA\n");
> + ret = -EIO;
> + goto err_unmap;
> + }
> +
> + transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
> + if (!transfer) {
> + ret = -ENOMEM;
> + goto err_unmap;
> + }
> +
> + transfer->epf_mhi = epf_mhi;
> + transfer->paddr = dst_addr;
> + transfer->size = buf_info->size;
> + transfer->dir = DMA_FROM_DEVICE;
> + memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
> +
> + desc->callback = pci_epf_mhi_dma_async_callback;
> + desc->callback_param = transfer;
> +
> + cookie = dmaengine_submit(desc);
> + ret = dma_submit_error(cookie);
> + if (ret) {
> + dev_err(dev, "Failed to do DMA submit\n");
> + goto err_free_transfer;
> + }
> +
> + dma_async_issue_pending(chan);
> +
> + goto err_unlock;
> +
> +err_free_transfer:
> + kfree(transfer);
> +err_unmap:
> + dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
> +err_unlock:
> + mutex_unlock(&epf_mhi->lock);
> +
> + return ret;
> +}
> +
> +static int pci_epf_mhi_edma_write_async(struct mhi_ep_cntrl *mhi_cntrl,
> + struct mhi_ep_buf_info *buf_info)
> +{
> + struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
> + struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
> + struct pci_epf_mhi_dma_transfer *transfer = NULL;
> + struct dma_chan *chan = epf_mhi->dma_chan_tx;
> + struct device *dev = &epf_mhi->epf->dev;
> + DECLARE_COMPLETION_ONSTACK(complete);
> + struct dma_async_tx_descriptor *desc;
> + struct dma_slave_config config = {};
> + dma_cookie_t cookie;
> + dma_addr_t src_addr;
> + int ret;
> +
> + mutex_lock(&epf_mhi->lock);
> +
> + config.direction = DMA_MEM_TO_DEV;
> + config.dst_addr = buf_info->host_addr;
> +
> + ret = dmaengine_slave_config(chan, &config);
> + if (ret) {
> + dev_err(dev, "Failed to configure DMA channel\n");
> + goto err_unlock;
> + }
> +
> + src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> + DMA_TO_DEVICE);
> + ret = dma_mapping_error(dma_dev, src_addr);
> + if (ret) {
> + dev_err(dev, "Failed to map remote memory\n");
> + goto err_unlock;
> + }
> +
> + desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
> + DMA_MEM_TO_DEV,
> + DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> + if (!desc) {
> + dev_err(dev, "Failed to prepare DMA\n");
> + ret = -EIO;
> + goto err_unmap;
> + }
> +
> + transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
> + if (!transfer) {
> + ret = -ENOMEM;
> + goto err_unmap;
> + }
> +
> + transfer->epf_mhi = epf_mhi;
> + transfer->paddr = src_addr;
> + transfer->size = buf_info->size;
> + transfer->dir = DMA_TO_DEVICE;
> + memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
> +
> + desc->callback = pci_epf_mhi_dma_async_callback;
> + desc->callback_param = transfer;
> +
> + cookie = dmaengine_submit(desc);
> + ret = dma_submit_error(cookie);
> + if (ret) {
> + dev_err(dev, "Failed to do DMA submit\n");
> + goto err_free_transfer;
> + }
> +
> + dma_async_issue_pending(chan);
> +
> + goto err_unlock;
> +
> +err_free_transfer:
> + kfree(transfer);
> +err_unmap:
> + dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
> +err_unlock:
> + mutex_unlock(&epf_mhi->lock);
> +
> + return ret;
> +}
> +
> struct epf_dma_filter {
> struct device *dev;
> u32 dma_mask;
> @@ -441,6 +646,7 @@ static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi)
> struct device *dev = &epf_mhi->epf->dev;
> struct epf_dma_filter filter;
> dma_cap_mask_t mask;
> + int ret;
>
> dma_cap_zero(mask);
> dma_cap_set(DMA_SLAVE, mask);
> @@ -459,16 +665,35 @@ static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi)
> &filter);
> if (IS_ERR_OR_NULL(epf_mhi->dma_chan_rx)) {
> dev_err(dev, "Failed to request rx channel\n");
> - dma_release_channel(epf_mhi->dma_chan_tx);
> - epf_mhi->dma_chan_tx = NULL;
> - return -ENODEV;
> + ret = -ENODEV;
> + goto err_release_tx;
> + }
> +
> + epf_mhi->dma_wq = alloc_workqueue("pci_epf_mhi_dma_wq", 0, 0);
> + if (!epf_mhi->dma_wq) {
> + ret = -ENOMEM;
> + goto err_release_rx;
> }
>
> + INIT_LIST_HEAD(&epf_mhi->dma_list);
> + INIT_WORK(&epf_mhi->dma_work, pci_epf_mhi_dma_worker);
> + spin_lock_init(&epf_mhi->list_lock);
> +
> return 0;
> +
> +err_release_rx:
> + dma_release_channel(epf_mhi->dma_chan_rx);
> + epf_mhi->dma_chan_rx = NULL;
> +err_release_tx:
> + dma_release_channel(epf_mhi->dma_chan_tx);
> + epf_mhi->dma_chan_tx = NULL;
> +
> + return ret;
> }
>
> static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi)
> {
> + destroy_workqueue(epf_mhi->dma_wq);
> dma_release_channel(epf_mhi->dma_chan_tx);
> dma_release_channel(epf_mhi->dma_chan_rx);
> epf_mhi->dma_chan_tx = NULL;
Looks good!
Reviewed-by: Krzysztof Wilczyński <kw@linux.com>
Krzysztof
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH 6/9] PCI: epf-mhi: Enable MHI async read/write support
2023-11-27 12:45 [PATCH 0/9] bus: mhi: ep: Add async read/write support Manivannan Sadhasivam
` (4 preceding siblings ...)
2023-11-27 12:45 ` [PATCH 5/9] PCI: epf-mhi: Add support for DMA async read/write operation Manivannan Sadhasivam
@ 2023-11-27 12:45 ` Manivannan Sadhasivam
2023-12-13 18:48 ` Krzysztof Wilczyński
2023-12-14 9:40 ` Krishna Chaitanya Chundru
2023-11-27 12:45 ` [PATCH 7/9] bus: mhi: ep: Add support for async DMA write operation Manivannan Sadhasivam
` (4 subsequent siblings)
10 siblings, 2 replies; 22+ messages in thread
From: Manivannan Sadhasivam @ 2023-11-27 12:45 UTC (permalink / raw)
To: lpieralisi, kw
Cc: kishon, bhelgaas, mhi, linux-arm-msm, linux-pci, linux-kernel,
Manivannan Sadhasivam
Now that both eDMA and iATU are prepared to support async transfer, let's
enable MHI async read/write by supplying the relevant callbacks.
In the absence of eDMA, iATU will be used for both sync and async
operations.
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
---
drivers/pci/endpoint/functions/pci-epf-mhi.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
index 3d09a37e5f7c..d3d6a1054036 100644
--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -766,12 +766,13 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
+ mhi_cntrl->read_sync = mhi_cntrl->read_async = pci_epf_mhi_iatu_read;
+ mhi_cntrl->write_sync = mhi_cntrl->write_async = pci_epf_mhi_iatu_write;
if (info->flags & MHI_EPF_USE_DMA) {
mhi_cntrl->read_sync = pci_epf_mhi_edma_read;
mhi_cntrl->write_sync = pci_epf_mhi_edma_write;
- } else {
- mhi_cntrl->read_sync = pci_epf_mhi_iatu_read;
- mhi_cntrl->write_sync = pci_epf_mhi_iatu_write;
+ mhi_cntrl->read_async = pci_epf_mhi_edma_read_async;
+ mhi_cntrl->write_async = pci_epf_mhi_edma_write_async;
}
/* Register the MHI EP controller */
--
2.25.1
^ permalink raw reply related [flat|nested] 22+ messages in thread* Re: [PATCH 6/9] PCI: epf-mhi: Enable MHI async read/write support
2023-11-27 12:45 ` [PATCH 6/9] PCI: epf-mhi: Enable MHI async read/write support Manivannan Sadhasivam
@ 2023-12-13 18:48 ` Krzysztof Wilczyński
2023-12-14 5:19 ` Manivannan Sadhasivam
2023-12-14 9:40 ` Krishna Chaitanya Chundru
1 sibling, 1 reply; 22+ messages in thread
From: Krzysztof Wilczyński @ 2023-12-13 18:48 UTC (permalink / raw)
To: Manivannan Sadhasivam
Cc: lpieralisi, kishon, bhelgaas, mhi, linux-arm-msm, linux-pci,
linux-kernel
Hello,
Manivannan, you will be taking this through the MHI tree, correct?
> Now that both eDMA and iATU are prepared to support async transfer, let's
> enable MHI async read/write by supplying the relevant callbacks.
>
> In the absence of eDMA, iATU will be used for both sync and async
> operations.
>
> Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
> ---
> drivers/pci/endpoint/functions/pci-epf-mhi.c | 7 ++++---
> 1 file changed, 4 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
> index 3d09a37e5f7c..d3d6a1054036 100644
> --- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
> +++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
> @@ -766,12 +766,13 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
> mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
> mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
> mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
> + mhi_cntrl->read_sync = mhi_cntrl->read_async = pci_epf_mhi_iatu_read;
> + mhi_cntrl->write_sync = mhi_cntrl->write_async = pci_epf_mhi_iatu_write;
> if (info->flags & MHI_EPF_USE_DMA) {
> mhi_cntrl->read_sync = pci_epf_mhi_edma_read;
> mhi_cntrl->write_sync = pci_epf_mhi_edma_write;
> - } else {
> - mhi_cntrl->read_sync = pci_epf_mhi_iatu_read;
> - mhi_cntrl->write_sync = pci_epf_mhi_iatu_write;
> + mhi_cntrl->read_async = pci_epf_mhi_edma_read_async;
> + mhi_cntrl->write_async = pci_epf_mhi_edma_write_async;
> }
Looks good!
Reviewed-by: Krzysztof Wilczyński <kw@linux.com>
Krzysztof
^ permalink raw reply [flat|nested] 22+ messages in thread* Re: [PATCH 6/9] PCI: epf-mhi: Enable MHI async read/write support
2023-12-13 18:48 ` Krzysztof Wilczyński
@ 2023-12-14 5:19 ` Manivannan Sadhasivam
0 siblings, 0 replies; 22+ messages in thread
From: Manivannan Sadhasivam @ 2023-12-14 5:19 UTC (permalink / raw)
To: Krzysztof Wilczyński
Cc: lpieralisi, kishon, bhelgaas, mhi, linux-arm-msm, linux-pci,
linux-kernel
On Thu, Dec 14, 2023 at 03:48:29AM +0900, Krzysztof Wilczyński wrote:
> Hello,
>
> Manivannan, you will be taking this through the MHI tree, correct?
>
Yes, to avoid conflict with other MHI patches, I'm taking this series through
MHI tree.
> > Now that both eDMA and iATU are prepared to support async transfer, let's
> > enable MHI async read/write by supplying the relevant callbacks.
> >
> > In the absence of eDMA, iATU will be used for both sync and async
> > operations.
> >
> > Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
> > ---
> > drivers/pci/endpoint/functions/pci-epf-mhi.c | 7 ++++---
> > 1 file changed, 4 insertions(+), 3 deletions(-)
> >
> > diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
> > index 3d09a37e5f7c..d3d6a1054036 100644
> > --- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
> > +++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
> > @@ -766,12 +766,13 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
> > mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
> > mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
> > mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
> > + mhi_cntrl->read_sync = mhi_cntrl->read_async = pci_epf_mhi_iatu_read;
> > + mhi_cntrl->write_sync = mhi_cntrl->write_async = pci_epf_mhi_iatu_write;
> > if (info->flags & MHI_EPF_USE_DMA) {
> > mhi_cntrl->read_sync = pci_epf_mhi_edma_read;
> > mhi_cntrl->write_sync = pci_epf_mhi_edma_write;
> > - } else {
> > - mhi_cntrl->read_sync = pci_epf_mhi_iatu_read;
> > - mhi_cntrl->write_sync = pci_epf_mhi_iatu_write;
> > + mhi_cntrl->read_async = pci_epf_mhi_edma_read_async;
> > + mhi_cntrl->write_async = pci_epf_mhi_edma_write_async;
> > }
>
> Looks good!
>
> Reviewed-by: Krzysztof Wilczyński <kw@linux.com>
>
Thanks!
- Mani
> Krzysztof
--
மணிவண்ணன் சதாசிவம்
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH 6/9] PCI: epf-mhi: Enable MHI async read/write support
2023-11-27 12:45 ` [PATCH 6/9] PCI: epf-mhi: Enable MHI async read/write support Manivannan Sadhasivam
2023-12-13 18:48 ` Krzysztof Wilczyński
@ 2023-12-14 9:40 ` Krishna Chaitanya Chundru
2023-12-14 10:09 ` Manivannan Sadhasivam
1 sibling, 1 reply; 22+ messages in thread
From: Krishna Chaitanya Chundru @ 2023-12-14 9:40 UTC (permalink / raw)
To: Manivannan Sadhasivam, lpieralisi, kw
Cc: kishon, bhelgaas, mhi, linux-arm-msm, linux-pci, linux-kernel
On 11/27/2023 6:15 PM, Manivannan Sadhasivam wrote:
> Now that both eDMA and iATU are prepared to support async transfer, let's
> enable MHI async read/write by supplying the relevant callbacks.
>
> In the absence of eDMA, iATU will be used for both sync and async
> operations.
>
> Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
> ---
> drivers/pci/endpoint/functions/pci-epf-mhi.c | 7 ++++---
> 1 file changed, 4 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
> index 3d09a37e5f7c..d3d6a1054036 100644
> --- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
> +++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
> @@ -766,12 +766,13 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
> mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
> mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
> mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
> + mhi_cntrl->read_sync = mhi_cntrl->read_async = pci_epf_mhi_iatu_read;
> + mhi_cntrl->write_sync = mhi_cntrl->write_async = pci_epf_mhi_iatu_write;
> if (info->flags & MHI_EPF_USE_DMA) {
> mhi_cntrl->read_sync = pci_epf_mhi_edma_read;
> mhi_cntrl->write_sync = pci_epf_mhi_edma_write;
> - } else {
> - mhi_cntrl->read_sync = pci_epf_mhi_iatu_read;
> - mhi_cntrl->write_sync = pci_epf_mhi_iatu_write;
> + mhi_cntrl->read_async = pci_epf_mhi_edma_read_async;
> + mhi_cntrl->write_async = pci_epf_mhi_edma_write_async;
I think the read_async & write async should be updated inside the if
condition where MHI_EPF_USE_DMA flag is set.
- Krishna Chaitanya.
> }
>
> /* Register the MHI EP controller */
^ permalink raw reply [flat|nested] 22+ messages in thread* Re: [PATCH 6/9] PCI: epf-mhi: Enable MHI async read/write support
2023-12-14 9:40 ` Krishna Chaitanya Chundru
@ 2023-12-14 10:09 ` Manivannan Sadhasivam
2023-12-14 10:14 ` Krishna Chaitanya Chundru
0 siblings, 1 reply; 22+ messages in thread
From: Manivannan Sadhasivam @ 2023-12-14 10:09 UTC (permalink / raw)
To: Krishna Chaitanya Chundru
Cc: lpieralisi, kw, kishon, bhelgaas, mhi, linux-arm-msm, linux-pci,
linux-kernel
On Thu, Dec 14, 2023 at 03:10:01PM +0530, Krishna Chaitanya Chundru wrote:
>
> On 11/27/2023 6:15 PM, Manivannan Sadhasivam wrote:
> > Now that both eDMA and iATU are prepared to support async transfer, let's
> > enable MHI async read/write by supplying the relevant callbacks.
> >
> > In the absence of eDMA, iATU will be used for both sync and async
> > operations.
> >
> > Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
> > ---
> > drivers/pci/endpoint/functions/pci-epf-mhi.c | 7 ++++---
> > 1 file changed, 4 insertions(+), 3 deletions(-)
> >
> > diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
> > index 3d09a37e5f7c..d3d6a1054036 100644
> > --- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
> > +++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
> > @@ -766,12 +766,13 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
> > mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
> > mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
> > mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
> > + mhi_cntrl->read_sync = mhi_cntrl->read_async = pci_epf_mhi_iatu_read;
> > + mhi_cntrl->write_sync = mhi_cntrl->write_async = pci_epf_mhi_iatu_write;
> > if (info->flags & MHI_EPF_USE_DMA) {
> > mhi_cntrl->read_sync = pci_epf_mhi_edma_read;
> > mhi_cntrl->write_sync = pci_epf_mhi_edma_write;
> > - } else {
> > - mhi_cntrl->read_sync = pci_epf_mhi_iatu_read;
> > - mhi_cntrl->write_sync = pci_epf_mhi_iatu_write;
> > + mhi_cntrl->read_async = pci_epf_mhi_edma_read_async;
> > + mhi_cntrl->write_async = pci_epf_mhi_edma_write_async;
>
> I think the read_async & write async should be updated inside the if
> condition where MHI_EPF_USE_DMA flag is set.
>
That's what being done here. Am I missing anything?
- Mani
> - Krishna Chaitanya.
>
> > }
> > /* Register the MHI EP controller */
--
மணிவண்ணன் சதாசிவம்
^ permalink raw reply [flat|nested] 22+ messages in thread* Re: [PATCH 6/9] PCI: epf-mhi: Enable MHI async read/write support
2023-12-14 10:09 ` Manivannan Sadhasivam
@ 2023-12-14 10:14 ` Krishna Chaitanya Chundru
2023-12-14 10:47 ` Manivannan Sadhasivam
0 siblings, 1 reply; 22+ messages in thread
From: Krishna Chaitanya Chundru @ 2023-12-14 10:14 UTC (permalink / raw)
To: Manivannan Sadhasivam
Cc: lpieralisi, kw, kishon, bhelgaas, mhi, linux-arm-msm, linux-pci,
linux-kernel
On 12/14/2023 3:39 PM, Manivannan Sadhasivam wrote:
> On Thu, Dec 14, 2023 at 03:10:01PM +0530, Krishna Chaitanya Chundru wrote:
>> On 11/27/2023 6:15 PM, Manivannan Sadhasivam wrote:
>>> Now that both eDMA and iATU are prepared to support async transfer, let's
>>> enable MHI async read/write by supplying the relevant callbacks.
>>>
>>> In the absence of eDMA, iATU will be used for both sync and async
>>> operations.
>>>
>>> Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
>>> ---
>>> drivers/pci/endpoint/functions/pci-epf-mhi.c | 7 ++++---
>>> 1 file changed, 4 insertions(+), 3 deletions(-)
>>>
>>> diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
>>> index 3d09a37e5f7c..d3d6a1054036 100644
>>> --- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
>>> +++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
>>> @@ -766,12 +766,13 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
>>> mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
>>> mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
>>> mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
>>> + mhi_cntrl->read_sync = mhi_cntrl->read_async = pci_epf_mhi_iatu_read;
>>> + mhi_cntrl->write_sync = mhi_cntrl->write_async = pci_epf_mhi_iatu_write;
>>> if (info->flags & MHI_EPF_USE_DMA) {
>>> mhi_cntrl->read_sync = pci_epf_mhi_edma_read;
>>> mhi_cntrl->write_sync = pci_epf_mhi_edma_write;
>>> - } else {
>>> - mhi_cntrl->read_sync = pci_epf_mhi_iatu_read;
>>> - mhi_cntrl->write_sync = pci_epf_mhi_iatu_write;
>>> + mhi_cntrl->read_async = pci_epf_mhi_edma_read_async;
>>> + mhi_cntrl->write_async = pci_epf_mhi_edma_write_async;
>> I think the read_async & write async should be updated inside the if
>> condition where MHI_EPF_USE_DMA flag is set.
>>
> That's what being done here. Am I missing anything?
>
> - Mani
It should be like this as edma sync & aysnc read write should be update
only if DMA is supported, in the patch I see async function pointers are
being updated with the edma function pointers for IATU operations.
if (info->flags & MHI_EPF_USE_DMA) {
mhi_cntrl->read_sync = pci_epf_mhi_edma_read;
mhi_cntrl->write_sync = pci_epf_mhi_edma_write;
mhi_cntrl->read_async = pci_epf_mhi_edma_read_async;
mhi_cntrl->write_async = pci_epf_mhi_edma_write_async;
}
- Krishna Chaitanya.
>> - Krishna Chaitanya.
>>
>>> }
>>> /* Register the MHI EP controller */
^ permalink raw reply [flat|nested] 22+ messages in thread* Re: [PATCH 6/9] PCI: epf-mhi: Enable MHI async read/write support
2023-12-14 10:14 ` Krishna Chaitanya Chundru
@ 2023-12-14 10:47 ` Manivannan Sadhasivam
2023-12-14 10:54 ` Krishna Chaitanya Chundru
0 siblings, 1 reply; 22+ messages in thread
From: Manivannan Sadhasivam @ 2023-12-14 10:47 UTC (permalink / raw)
To: Krishna Chaitanya Chundru
Cc: Manivannan Sadhasivam, lpieralisi, kw, kishon, bhelgaas, mhi,
linux-arm-msm, linux-pci, linux-kernel
On Thu, Dec 14, 2023 at 03:44:21PM +0530, Krishna Chaitanya Chundru wrote:
>
> On 12/14/2023 3:39 PM, Manivannan Sadhasivam wrote:
> > On Thu, Dec 14, 2023 at 03:10:01PM +0530, Krishna Chaitanya Chundru wrote:
> > > On 11/27/2023 6:15 PM, Manivannan Sadhasivam wrote:
> > > > Now that both eDMA and iATU are prepared to support async transfer, let's
> > > > enable MHI async read/write by supplying the relevant callbacks.
> > > >
> > > > In the absence of eDMA, iATU will be used for both sync and async
> > > > operations.
> > > >
> > > > Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
> > > > ---
> > > > drivers/pci/endpoint/functions/pci-epf-mhi.c | 7 ++++---
> > > > 1 file changed, 4 insertions(+), 3 deletions(-)
> > > >
> > > > diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
> > > > index 3d09a37e5f7c..d3d6a1054036 100644
> > > > --- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
> > > > +++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
> > > > @@ -766,12 +766,13 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
> > > > mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
> > > > mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
> > > > mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
> > > > + mhi_cntrl->read_sync = mhi_cntrl->read_async = pci_epf_mhi_iatu_read;
> > > > + mhi_cntrl->write_sync = mhi_cntrl->write_async = pci_epf_mhi_iatu_write;
> > > > if (info->flags & MHI_EPF_USE_DMA) {
> > > > mhi_cntrl->read_sync = pci_epf_mhi_edma_read;
> > > > mhi_cntrl->write_sync = pci_epf_mhi_edma_write;
> > > > - } else {
> > > > - mhi_cntrl->read_sync = pci_epf_mhi_iatu_read;
> > > > - mhi_cntrl->write_sync = pci_epf_mhi_iatu_write;
> > > > + mhi_cntrl->read_async = pci_epf_mhi_edma_read_async;
> > > > + mhi_cntrl->write_async = pci_epf_mhi_edma_write_async;
> > > I think the read_async & write async should be updated inside the if
> > > condition where MHI_EPF_USE_DMA flag is set.
> > >
> > That's what being done here. Am I missing anything?
> >
> > - Mani
>
> It should be like this as edma sync & aysnc read write should be update only
> if DMA is supported, in the patch I see async function pointers are being
> updated with the edma function pointers for IATU operations.
>
> if (info->flags & MHI_EPF_USE_DMA) {
>
> mhi_cntrl->read_sync = pci_epf_mhi_edma_read;
> mhi_cntrl->write_sync = pci_epf_mhi_edma_write;
> mhi_cntrl->read_async = pci_epf_mhi_edma_read_async;
> mhi_cntrl->write_async = pci_epf_mhi_edma_write_async;
> }
Are you reading the patch correctly? Please take a look at this commit:
https://git.kernel.org/pub/scm/linux/kernel/git/mani/mhi.git/tree/drivers/pci/endpoint/functions/pci-epf-mhi.c?h=mhi-next&id=d1c6f4ba4746ed41fde8269cb5fea88bddb60504#n771
- Mani
> - Krishna Chaitanya.
>
> > > - Krishna Chaitanya.
> > >
> > > > }
> > > > /* Register the MHI EP controller */
--
மணிவண்ணன் சதாசிவம்
^ permalink raw reply [flat|nested] 22+ messages in thread* Re: [PATCH 6/9] PCI: epf-mhi: Enable MHI async read/write support
2023-12-14 10:47 ` Manivannan Sadhasivam
@ 2023-12-14 10:54 ` Krishna Chaitanya Chundru
0 siblings, 0 replies; 22+ messages in thread
From: Krishna Chaitanya Chundru @ 2023-12-14 10:54 UTC (permalink / raw)
To: Manivannan Sadhasivam
Cc: Manivannan Sadhasivam, lpieralisi, kw, kishon, bhelgaas, mhi,
linux-arm-msm, linux-pci, linux-kernel
On 12/14/2023 4:17 PM, Manivannan Sadhasivam wrote:
> On Thu, Dec 14, 2023 at 03:44:21PM +0530, Krishna Chaitanya Chundru wrote:
>> On 12/14/2023 3:39 PM, Manivannan Sadhasivam wrote:
>>> On Thu, Dec 14, 2023 at 03:10:01PM +0530, Krishna Chaitanya Chundru wrote:
>>>> On 11/27/2023 6:15 PM, Manivannan Sadhasivam wrote:
>>>>> Now that both eDMA and iATU are prepared to support async transfer, let's
>>>>> enable MHI async read/write by supplying the relevant callbacks.
>>>>>
>>>>> In the absence of eDMA, iATU will be used for both sync and async
>>>>> operations.
>>>>>
>>>>> Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
>>>>> ---
>>>>> drivers/pci/endpoint/functions/pci-epf-mhi.c | 7 ++++---
>>>>> 1 file changed, 4 insertions(+), 3 deletions(-)
>>>>>
>>>>> diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
>>>>> index 3d09a37e5f7c..d3d6a1054036 100644
>>>>> --- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
>>>>> +++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
>>>>> @@ -766,12 +766,13 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
>>>>> mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
>>>>> mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
>>>>> mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
>>>>> + mhi_cntrl->read_sync = mhi_cntrl->read_async = pci_epf_mhi_iatu_read;
>>>>> + mhi_cntrl->write_sync = mhi_cntrl->write_async = pci_epf_mhi_iatu_write;
>>>>> if (info->flags & MHI_EPF_USE_DMA) {
>>>>> mhi_cntrl->read_sync = pci_epf_mhi_edma_read;
>>>>> mhi_cntrl->write_sync = pci_epf_mhi_edma_write;
>>>>> - } else {
>>>>> - mhi_cntrl->read_sync = pci_epf_mhi_iatu_read;
>>>>> - mhi_cntrl->write_sync = pci_epf_mhi_iatu_write;
>>>>> + mhi_cntrl->read_async = pci_epf_mhi_edma_read_async;
>>>>> + mhi_cntrl->write_async = pci_epf_mhi_edma_write_async;
>>>> I think the read_async & write async should be updated inside the if
>>>> condition where MHI_EPF_USE_DMA flag is set.
>>>>
>>> That's what being done here. Am I missing anything?
>>>
>>> - Mani
>> It should be like this as edma sync & aysnc read write should be update only
>> if DMA is supported, in the patch I see async function pointers are being
>> updated with the edma function pointers for IATU operations.
>>
>> if (info->flags & MHI_EPF_USE_DMA) {
>>
>> mhi_cntrl->read_sync = pci_epf_mhi_edma_read;
>> mhi_cntrl->write_sync = pci_epf_mhi_edma_write;
>> mhi_cntrl->read_async = pci_epf_mhi_edma_read_async;
>> mhi_cntrl->write_async = pci_epf_mhi_edma_write_async;
>> }
> Are you reading the patch correctly? Please take a look at this commit:
> https://git.kernel.org/pub/scm/linux/kernel/git/mani/mhi.git/tree/drivers/pci/endpoint/functions/pci-epf-mhi.c?h=mhi-next&id=d1c6f4ba4746ed41fde8269cb5fea88bddb60504#n771
>
> - Mani
Sorry for the noise, I didn't notice else is also removed.
- Krishna Chaitanya.
>> - Krishna Chaitanya.
>>
>>>> - Krishna Chaitanya.
>>>>
>>>>> }
>>>>> /* Register the MHI EP controller */
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH 7/9] bus: mhi: ep: Add support for async DMA write operation
2023-11-27 12:45 [PATCH 0/9] bus: mhi: ep: Add async read/write support Manivannan Sadhasivam
` (5 preceding siblings ...)
2023-11-27 12:45 ` [PATCH 6/9] PCI: epf-mhi: Enable MHI async read/write support Manivannan Sadhasivam
@ 2023-11-27 12:45 ` Manivannan Sadhasivam
2023-11-27 12:45 ` [PATCH 8/9] bus: mhi: ep: Add support for async DMA read operation Manivannan Sadhasivam
` (3 subsequent siblings)
10 siblings, 0 replies; 22+ messages in thread
From: Manivannan Sadhasivam @ 2023-11-27 12:45 UTC (permalink / raw)
To: lpieralisi, kw
Cc: kishon, bhelgaas, mhi, linux-arm-msm, linux-pci, linux-kernel,
Manivannan Sadhasivam
In order to optimize the data transfer, let's use the async DMA operation
for writing (queuing) data to the host.
In the async path, the completion event for the transfer ring will only be
sent to the host when the controller driver notifies the MHI stack of the
actual transfer completion using the callback (mhi_ep_skb_completion)
supplied in "struct mhi_ep_buf_info".
Also to accommodate the async operation, the transfer ring read offset
(ring->rd_offset) is cached in the "struct mhi_ep_chan" and updated locally
to let the stack queue further ring items to the controller driver. But the
actual read offset of the transfer ring will only be updated in the
completion callback.
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
---
drivers/bus/mhi/ep/internal.h | 1 +
drivers/bus/mhi/ep/main.c | 77 ++++++++++++++++++++++++++---------
include/linux/mhi_ep.h | 4 ++
3 files changed, 62 insertions(+), 20 deletions(-)
diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h
index 8c5cf2b67951..577965f95fda 100644
--- a/drivers/bus/mhi/ep/internal.h
+++ b/drivers/bus/mhi/ep/internal.h
@@ -162,6 +162,7 @@ struct mhi_ep_chan {
void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result);
enum mhi_ch_state state;
enum dma_data_direction dir;
+ size_t rd_offset;
u64 tre_loc;
u32 tre_size;
u32 tre_bytes_left;
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index 5748a1da0803..81d693433a5f 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -203,6 +203,8 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
goto err_unlock;
}
+
+ mhi_chan->rd_offset = ch_ring->rd_offset;
}
/* Set channel state to RUNNING */
@@ -332,7 +334,7 @@ bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_directio
struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
- return !!(ring->rd_offset == ring->wr_offset);
+ return !!(mhi_chan->rd_offset == ring->wr_offset);
}
EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty);
@@ -359,7 +361,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
return -ENODEV;
}
- el = &ring->ring_cache[ring->rd_offset];
+ el = &ring->ring_cache[mhi_chan->rd_offset];
/* Check if there is data pending to be read from previous read operation */
if (mhi_chan->tre_bytes_left) {
@@ -438,6 +440,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
tr_done = true;
}
+ mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
mhi_ep_ring_inc_index(ring);
}
@@ -498,6 +501,37 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem
return 0;
}
+static void mhi_ep_skb_completion(struct mhi_ep_buf_info *buf_info)
+{
+ struct mhi_ep_device *mhi_dev = buf_info->mhi_dev;
+ struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan;
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+ struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset];
+ struct device *dev = &mhi_dev->dev;
+ struct mhi_result result = {};
+ int ret;
+
+ if (mhi_chan->xfer_cb) {
+ result.buf_addr = buf_info->cb_buf;
+ result.dir = mhi_chan->dir;
+ result.bytes_xferd = buf_info->size;
+
+ mhi_chan->xfer_cb(mhi_dev, &result);
+ }
+
+ dev_dbg(dev, "Sending completion for ring (%d) rd_offset: %ld\n",
+ring->er_index, ring->rd_offset);
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, buf_info->size,
+ buf_info->code);
+ if (ret) {
+ dev_err(dev, "Error sending transfer completion event\n");
+ return;
+ }
+
+ mhi_ep_ring_inc_index(ring);
+}
+
/* TODO: Handle partially formed TDs */
int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
{
@@ -508,7 +542,6 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
struct mhi_ring_element *el;
u32 buf_left, read_offset;
struct mhi_ep_ring *ring;
- enum mhi_ev_ccs code;
size_t tr_len;
u32 tre_len;
int ret;
@@ -532,7 +565,7 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
goto err_exit;
}
- el = &ring->ring_cache[ring->rd_offset];
+ el = &ring->ring_cache[mhi_chan->rd_offset];
tre_len = MHI_TRE_DATA_GET_LEN(el);
tr_len = min(buf_left, tre_len);
@@ -541,33 +574,36 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
buf_info.dev_addr = skb->data + read_offset;
buf_info.host_addr = MHI_TRE_DATA_GET_PTR(el);
buf_info.size = tr_len;
+ buf_info.cb = mhi_ep_skb_completion;
+ buf_info.cb_buf = skb;
+ buf_info.mhi_dev = mhi_dev;
- dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
- ret = mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
- if (ret < 0) {
- dev_err(dev, "Error writing to the channel\n");
- goto err_exit;
- }
-
- buf_left -= tr_len;
/*
* For all TREs queued by the host for DL channel, only the EOT flag will be set.
* If the packet doesn't fit into a single TRE, send the OVERFLOW event to
* the host so that the host can adjust the packet boundary to next TREs. Else send
* the EOT event to the host indicating the packet boundary.
*/
- if (buf_left)
- code = MHI_EV_CC_OVERFLOW;
+ if (buf_left - tr_len)
+ buf_info.code = MHI_EV_CC_OVERFLOW;
else
- code = MHI_EV_CC_EOT;
+ buf_info.code = MHI_EV_CC_EOT;
- ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code);
- if (ret) {
- dev_err(dev, "Error sending transfer completion event\n");
+ dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
+ ret = mhi_cntrl->write_async(mhi_cntrl, &buf_info);
+ if (ret < 0) {
+ dev_err(dev, "Error writing to the channel\n");
goto err_exit;
}
- mhi_ep_ring_inc_index(ring);
+ buf_left -= tr_len;
+ /*
+ * Update the read offset cached in mhi_chan. Actual read offset
+ * will be updated by the completion handler.
+ */
+ dev_dbg(dev, "rd_offset at the end of queue_skb: %ld\n",
+mhi_chan->rd_offset);
+ mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
} while (buf_left);
mutex_unlock(&mhi_chan->lock);
@@ -807,7 +843,7 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
}
/* Sanity check to make sure there are elements in the ring */
- if (ring->rd_offset == ring->wr_offset) {
+ if (chan->rd_offset == ring->wr_offset) {
mutex_unlock(&chan->lock);
kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
continue;
@@ -1451,6 +1487,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
ret = -ENOMEM;
goto err_destroy_tre_buf_cache;
}
+
INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker);
INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker);
INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);
diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
index 14c6e8d3f573..11bf3212f782 100644
--- a/include/linux/mhi_ep.h
+++ b/include/linux/mhi_ep.h
@@ -51,16 +51,20 @@ struct mhi_ep_db_info {
/**
* struct mhi_ep_buf_info - MHI Endpoint transfer buffer info
+ * @mhi_dev: MHI device associated with this buffer
* @dev_addr: Address of the buffer in endpoint
* @host_addr: Address of the bufffer in host
* @size: Size of the buffer
+ * @code: Transfer completion code
* @cb: Callback to be executed by controller drivers after transfer completion (async)
* @cb_buf: Opaque buffer to be passed to the callback
*/
struct mhi_ep_buf_info {
+ struct mhi_ep_device *mhi_dev;
void *dev_addr;
u64 host_addr;
size_t size;
+ int code;
void (*cb)(struct mhi_ep_buf_info *buf_info);
void *cb_buf;
--
2.25.1
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH 8/9] bus: mhi: ep: Add support for async DMA read operation
2023-11-27 12:45 [PATCH 0/9] bus: mhi: ep: Add async read/write support Manivannan Sadhasivam
` (6 preceding siblings ...)
2023-11-27 12:45 ` [PATCH 7/9] bus: mhi: ep: Add support for async DMA write operation Manivannan Sadhasivam
@ 2023-11-27 12:45 ` Manivannan Sadhasivam
2023-11-27 12:45 ` [PATCH 9/9] bus: mhi: ep: Add checks for read/write callbacks while registering controllers Manivannan Sadhasivam
` (2 subsequent siblings)
10 siblings, 0 replies; 22+ messages in thread
From: Manivannan Sadhasivam @ 2023-11-27 12:45 UTC (permalink / raw)
To: lpieralisi, kw
Cc: kishon, bhelgaas, mhi, linux-arm-msm, linux-pci, linux-kernel,
Manivannan Sadhasivam
As like the async DMA write operation, let's add support for async DMA read
operation. In the async path, the data will be read from the transfer ring
continuously and when the controller driver notifies the stack using the
completion callback (mhi_ep_read_completion), then the client driver will
be notified with the read data and the completion event will be sent to the
host for the respective ring element (if requested by the host).
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
---
drivers/bus/mhi/ep/main.c | 162 +++++++++++++++++++++-----------------
1 file changed, 89 insertions(+), 73 deletions(-)
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index 81d693433a5f..3e599d9640f5 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -338,17 +338,81 @@ bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_directio
}
EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty);
+static void mhi_ep_read_completion(struct mhi_ep_buf_info *buf_info)
+{
+ struct mhi_ep_device *mhi_dev = buf_info->mhi_dev;
+ struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_ep_chan *mhi_chan = mhi_dev->ul_chan;
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+ struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset];
+ struct mhi_result result = {};
+ int ret;
+
+ if (mhi_chan->xfer_cb) {
+ result.buf_addr = buf_info->cb_buf;
+ result.dir = mhi_chan->dir;
+ result.bytes_xferd = buf_info->size;
+
+ mhi_chan->xfer_cb(mhi_dev, &result);
+ }
+
+ /*
+ * The host will split the data packet into multiple TREs if it can't fit
+ * the packet in a single TRE. In that case, CHAIN flag will be set by the
+ * host for all TREs except the last one.
+ */
+ if (buf_info->code != MHI_EV_CC_OVERFLOW) {
+ if (MHI_TRE_DATA_GET_CHAIN(el)) {
+ /*
+ * IEOB (Interrupt on End of Block) flag will be set by the host if
+ * it expects the completion event for all TREs of a TD.
+ */
+ if (MHI_TRE_DATA_GET_IEOB(el)) {
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
+ MHI_TRE_DATA_GET_LEN(el),
+ MHI_EV_CC_EOB);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev,
+ "Error sending transfer compl. event\n");
+ goto err_free_tre_buf;
+ }
+ }
+ } else {
+ /*
+ * IEOT (Interrupt on End of Transfer) flag will be set by the host
+ * for the last TRE of the TD and expects the completion event for
+ * the same.
+ */
+ if (MHI_TRE_DATA_GET_IEOT(el)) {
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
+ MHI_TRE_DATA_GET_LEN(el),
+ MHI_EV_CC_EOT);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev,
+ "Error sending transfer compl. event\n");
+ goto err_free_tre_buf;
+ }
+ }
+ }
+ }
+
+ mhi_ep_ring_inc_index(ring);
+
+err_free_tre_buf:
+ kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_info->cb_buf);
+}
+
static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
- struct mhi_ep_ring *ring,
- struct mhi_result *result,
- u32 len)
+ struct mhi_ep_ring *ring)
{
struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
struct device *dev = &mhi_cntrl->mhi_dev->dev;
size_t tr_len, read_offset, write_offset;
struct mhi_ep_buf_info buf_info = {};
+ u32 len = MHI_EP_DEFAULT_MTU;
struct mhi_ring_element *el;
bool tr_done = false;
+ void *buf_addr;
u32 buf_left;
int ret;
@@ -378,83 +442,50 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
write_offset = len - buf_left;
+ buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA);
+ if (!buf_addr)
+ return -ENOMEM;
+
buf_info.host_addr = mhi_chan->tre_loc + read_offset;
- buf_info.dev_addr = result->buf_addr + write_offset;
+ buf_info.dev_addr = buf_addr + write_offset;
buf_info.size = tr_len;
+ buf_info.cb = mhi_ep_read_completion;
+ buf_info.cb_buf = buf_addr;
+ buf_info.mhi_dev = mhi_chan->mhi_dev;
+
+ if (mhi_chan->tre_bytes_left - tr_len)
+ buf_info.code = MHI_EV_CC_OVERFLOW;
dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
- ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
+ ret = mhi_cntrl->read_async(mhi_cntrl, &buf_info);
if (ret < 0) {
dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n");
- return ret;
+ goto err_free_buf_addr;
}
buf_left -= tr_len;
mhi_chan->tre_bytes_left -= tr_len;
- /*
- * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been
- * read completely:
- *
- * 1. Send completion event to the host based on the flags set in TRE.
- * 2. Increment the local read offset of the transfer ring.
- */
if (!mhi_chan->tre_bytes_left) {
- /*
- * The host will split the data packet into multiple TREs if it can't fit
- * the packet in a single TRE. In that case, CHAIN flag will be set by the
- * host for all TREs except the last one.
- */
- if (MHI_TRE_DATA_GET_CHAIN(el)) {
- /*
- * IEOB (Interrupt on End of Block) flag will be set by the host if
- * it expects the completion event for all TREs of a TD.
- */
- if (MHI_TRE_DATA_GET_IEOB(el)) {
- ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
- MHI_TRE_DATA_GET_LEN(el),
- MHI_EV_CC_EOB);
- if (ret < 0) {
- dev_err(&mhi_chan->mhi_dev->dev,
- "Error sending transfer compl. event\n");
- return ret;
- }
- }
- } else {
- /*
- * IEOT (Interrupt on End of Transfer) flag will be set by the host
- * for the last TRE of the TD and expects the completion event for
- * the same.
- */
- if (MHI_TRE_DATA_GET_IEOT(el)) {
- ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
- MHI_TRE_DATA_GET_LEN(el),
- MHI_EV_CC_EOT);
- if (ret < 0) {
- dev_err(&mhi_chan->mhi_dev->dev,
- "Error sending transfer compl. event\n");
- return ret;
- }
- }
-
+ if (MHI_TRE_DATA_GET_IEOT(el))
tr_done = true;
- }
mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
- mhi_ep_ring_inc_index(ring);
}
-
- result->bytes_xferd += tr_len;
} while (buf_left && !tr_done);
return 0;
+
+err_free_buf_addr:
+ kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_addr);
+
+ return ret;
}
-static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
+static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring)
{
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
struct mhi_result result = {};
- u32 len = MHI_EP_DEFAULT_MTU;
struct mhi_ep_chan *mhi_chan;
int ret;
@@ -475,27 +506,15 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
} else {
/* UL channel */
- result.buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA);
- if (!result.buf_addr)
- return -ENOMEM;
-
do {
- ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len);
+ ret = mhi_ep_read_channel(mhi_cntrl, ring);
if (ret < 0) {
dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
- kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr);
return ret;
}
- result.dir = mhi_chan->dir;
- mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
- result.bytes_xferd = 0;
- memset(result.buf_addr, 0, len);
-
/* Read until the ring becomes empty */
} while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
-
- kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr);
}
return 0;
@@ -804,7 +823,6 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work);
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct mhi_ep_ring_item *itr, *tmp;
- struct mhi_ring_element *el;
struct mhi_ep_ring *ring;
struct mhi_ep_chan *chan;
unsigned long flags;
@@ -849,10 +867,8 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
continue;
}
- el = &ring->ring_cache[ring->rd_offset];
-
dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
- ret = mhi_ep_process_ch_ring(ring, el);
+ ret = mhi_ep_process_ch_ring(ring);
if (ret) {
dev_err(dev, "Error processing ring for channel (%u): %d\n",
ring->ch_id, ret);
--
2.25.1
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH 9/9] bus: mhi: ep: Add checks for read/write callbacks while registering controllers
2023-11-27 12:45 [PATCH 0/9] bus: mhi: ep: Add async read/write support Manivannan Sadhasivam
` (7 preceding siblings ...)
2023-11-27 12:45 ` [PATCH 8/9] bus: mhi: ep: Add support for async DMA read operation Manivannan Sadhasivam
@ 2023-11-27 12:45 ` Manivannan Sadhasivam
2023-12-13 19:31 ` [PATCH 0/9] bus: mhi: ep: Add async read/write support Bjorn Helgaas
2023-12-14 10:55 ` Manivannan Sadhasivam
10 siblings, 0 replies; 22+ messages in thread
From: Manivannan Sadhasivam @ 2023-11-27 12:45 UTC (permalink / raw)
To: lpieralisi, kw
Cc: kishon, bhelgaas, mhi, linux-arm-msm, linux-pci, linux-kernel,
Manivannan Sadhasivam
The MHI EP controller drivers has to support both sync and async read/write
callbacks. Hence, add a check for it.
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
---
drivers/bus/mhi/ep/main.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index 3e599d9640f5..6b84aeeb247a 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -1471,6 +1471,10 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq)
return -EINVAL;
+ if (!mhi_cntrl->read_sync || !mhi_cntrl->write_sync ||
+ !mhi_cntrl->read_async || !mhi_cntrl->write_async)
+ return -EINVAL;
+
ret = mhi_ep_chan_init(mhi_cntrl, config);
if (ret)
return ret;
--
2.25.1
^ permalink raw reply related [flat|nested] 22+ messages in thread* Re: [PATCH 0/9] bus: mhi: ep: Add async read/write support
2023-11-27 12:45 [PATCH 0/9] bus: mhi: ep: Add async read/write support Manivannan Sadhasivam
` (8 preceding siblings ...)
2023-11-27 12:45 ` [PATCH 9/9] bus: mhi: ep: Add checks for read/write callbacks while registering controllers Manivannan Sadhasivam
@ 2023-12-13 19:31 ` Bjorn Helgaas
2023-12-14 5:21 ` Manivannan Sadhasivam
2023-12-14 10:55 ` Manivannan Sadhasivam
10 siblings, 1 reply; 22+ messages in thread
From: Bjorn Helgaas @ 2023-12-13 19:31 UTC (permalink / raw)
To: Manivannan Sadhasivam
Cc: lpieralisi, kw, kishon, bhelgaas, mhi, linux-arm-msm, linux-pci,
linux-kernel
On Mon, Nov 27, 2023 at 06:15:20PM +0530, Manivannan Sadhasivam wrote:
> Hi,
>
> This series add async read/write support for the MHI endpoint stack by
> modifying the MHI ep stack and the MHI EPF (controller) driver.
>
> Currently, only sync read/write operations are supported by the stack,
> this resulting in poor data throughput as the transfer is halted until
> receiving the DMA completion. So this series adds async support such
> that the MHI transfers can continue without waiting for the transfer
> completion. And once the completion happens, host is notified by sending
> the transfer completion event.
>
> This series brings iperf throughput of ~4Gbps on SM8450 based dev platform,
> where previously 1.6Gbps was achieved with sync operation.
>
> - Mani
>
> Manivannan Sadhasivam (9):
> bus: mhi: ep: Pass mhi_ep_buf_info struct to read/write APIs
> bus: mhi: ep: Rename read_from_host() and write_to_host() APIs
> bus: mhi: ep: Introduce async read/write callbacks
> PCI: epf-mhi: Simulate async read/write using iATU
> PCI: epf-mhi: Add support for DMA async read/write operation
> PCI: epf-mhi: Enable MHI async read/write support
> bus: mhi: ep: Add support for async DMA write operation
> bus: mhi: ep: Add support for async DMA read operation
> bus: mhi: ep: Add checks for read/write callbacks while registering
> controllers
>
> drivers/bus/mhi/ep/internal.h | 1 +
> drivers/bus/mhi/ep/main.c | 256 +++++++++------
> drivers/bus/mhi/ep/ring.c | 41 +--
> drivers/pci/endpoint/functions/pci-epf-mhi.c | 314 ++++++++++++++++---
> include/linux/mhi_ep.h | 33 +-
> 5 files changed, 485 insertions(+), 160 deletions(-)
Mani, do you want to merge this via your MHI tree? If so, you can
include Krzysztof's Reviewed-by tags and my:
Acked-by: Bjorn Helgaas <bhelgaas@google.com>
If you think it'd be better via the PCI tree, let me know and we can
do that, too.
Bjorn
^ permalink raw reply [flat|nested] 22+ messages in thread* Re: [PATCH 0/9] bus: mhi: ep: Add async read/write support
2023-12-13 19:31 ` [PATCH 0/9] bus: mhi: ep: Add async read/write support Bjorn Helgaas
@ 2023-12-14 5:21 ` Manivannan Sadhasivam
0 siblings, 0 replies; 22+ messages in thread
From: Manivannan Sadhasivam @ 2023-12-14 5:21 UTC (permalink / raw)
To: Bjorn Helgaas
Cc: lpieralisi, kw, kishon, bhelgaas, mhi, linux-arm-msm, linux-pci,
linux-kernel
On Wed, Dec 13, 2023 at 01:31:03PM -0600, Bjorn Helgaas wrote:
> On Mon, Nov 27, 2023 at 06:15:20PM +0530, Manivannan Sadhasivam wrote:
> > Hi,
> >
> > This series add async read/write support for the MHI endpoint stack by
> > modifying the MHI ep stack and the MHI EPF (controller) driver.
> >
> > Currently, only sync read/write operations are supported by the stack,
> > this resulting in poor data throughput as the transfer is halted until
> > receiving the DMA completion. So this series adds async support such
> > that the MHI transfers can continue without waiting for the transfer
> > completion. And once the completion happens, host is notified by sending
> > the transfer completion event.
> >
> > This series brings iperf throughput of ~4Gbps on SM8450 based dev platform,
> > where previously 1.6Gbps was achieved with sync operation.
> >
> > - Mani
> >
> > Manivannan Sadhasivam (9):
> > bus: mhi: ep: Pass mhi_ep_buf_info struct to read/write APIs
> > bus: mhi: ep: Rename read_from_host() and write_to_host() APIs
> > bus: mhi: ep: Introduce async read/write callbacks
> > PCI: epf-mhi: Simulate async read/write using iATU
> > PCI: epf-mhi: Add support for DMA async read/write operation
> > PCI: epf-mhi: Enable MHI async read/write support
> > bus: mhi: ep: Add support for async DMA write operation
> > bus: mhi: ep: Add support for async DMA read operation
> > bus: mhi: ep: Add checks for read/write callbacks while registering
> > controllers
> >
> > drivers/bus/mhi/ep/internal.h | 1 +
> > drivers/bus/mhi/ep/main.c | 256 +++++++++------
> > drivers/bus/mhi/ep/ring.c | 41 +--
> > drivers/pci/endpoint/functions/pci-epf-mhi.c | 314 ++++++++++++++++---
> > include/linux/mhi_ep.h | 33 +-
> > 5 files changed, 485 insertions(+), 160 deletions(-)
>
> Mani, do you want to merge this via your MHI tree? If so, you can
> include Krzysztof's Reviewed-by tags and my:
>
> Acked-by: Bjorn Helgaas <bhelgaas@google.com>
>
> If you think it'd be better via the PCI tree, let me know and we can
> do that, too.
>
Thanks Bjorn! Yes, to avoid possible conflicts with other MHI patches, I need to
take this series via MHI tree.
- Mani
> Bjorn
--
மணிவண்ணன் சதாசிவம்
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH 0/9] bus: mhi: ep: Add async read/write support
2023-11-27 12:45 [PATCH 0/9] bus: mhi: ep: Add async read/write support Manivannan Sadhasivam
` (9 preceding siblings ...)
2023-12-13 19:31 ` [PATCH 0/9] bus: mhi: ep: Add async read/write support Bjorn Helgaas
@ 2023-12-14 10:55 ` Manivannan Sadhasivam
10 siblings, 0 replies; 22+ messages in thread
From: Manivannan Sadhasivam @ 2023-12-14 10:55 UTC (permalink / raw)
To: lpieralisi, kw
Cc: kishon, bhelgaas, mhi, linux-arm-msm, linux-pci, linux-kernel
On Mon, Nov 27, 2023 at 06:15:20PM +0530, Manivannan Sadhasivam wrote:
> Hi,
>
> This series add async read/write support for the MHI endpoint stack by
> modifying the MHI ep stack and the MHI EPF (controller) driver.
>
> Currently, only sync read/write operations are supported by the stack,
> this resulting in poor data throughput as the transfer is halted until
> receiving the DMA completion. So this series adds async support such
> that the MHI transfers can continue without waiting for the transfer
> completion. And once the completion happens, host is notified by sending
> the transfer completion event.
>
> This series brings iperf throughput of ~4Gbps on SM8450 based dev platform,
> where previously 1.6Gbps was achieved with sync operation.
>
Applied to mhi-next with reviews from Bjorn and Krzysztof for PCI EPF patches.
- Mani
> - Mani
>
> Manivannan Sadhasivam (9):
> bus: mhi: ep: Pass mhi_ep_buf_info struct to read/write APIs
> bus: mhi: ep: Rename read_from_host() and write_to_host() APIs
> bus: mhi: ep: Introduce async read/write callbacks
> PCI: epf-mhi: Simulate async read/write using iATU
> PCI: epf-mhi: Add support for DMA async read/write operation
> PCI: epf-mhi: Enable MHI async read/write support
> bus: mhi: ep: Add support for async DMA write operation
> bus: mhi: ep: Add support for async DMA read operation
> bus: mhi: ep: Add checks for read/write callbacks while registering
> controllers
>
> drivers/bus/mhi/ep/internal.h | 1 +
> drivers/bus/mhi/ep/main.c | 256 +++++++++------
> drivers/bus/mhi/ep/ring.c | 41 +--
> drivers/pci/endpoint/functions/pci-epf-mhi.c | 314 ++++++++++++++++---
> include/linux/mhi_ep.h | 33 +-
> 5 files changed, 485 insertions(+), 160 deletions(-)
>
> --
> 2.25.1
>
--
மணிவண்ணன் சதாசிவம்
^ permalink raw reply [flat|nested] 22+ messages in thread