From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
To: stable@vger.kernel.org
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
patches@lists.linux.dev, Arnd Bergmann <arnd@arndb.de>,
Gregory CLEMENT <gregory.clement@bootlin.com>,
stable <stable@kernel.org>
Subject: [PATCH 4.14 17/25] ARM: PL011: Fix DMA support
Date: Mon, 11 Dec 2023 19:21:08 +0100 [thread overview]
Message-ID: <20231211182009.304317381@linuxfoundation.org> (raw)
In-Reply-To: <20231211182008.665944227@linuxfoundation.org>
4.14-stable review patch. If anyone has any objections, please let me know.
------------------
From: Arnd Bergmann <arnd@arndb.de>
commit 58ac1b3799799069d53f5bf95c093f2fe8dd3cc5 upstream.
Since there is no guarantee that the memory returned by
dma_alloc_coherent() is associated with a 'struct page', using the
architecture specific phys_to_page() is wrong, but using
virt_to_page() would be as well.
Stop using sg lists altogether and just use the *_single() functions
instead. This also simplifies the code a bit since the scatterlists in
this driver always have only one entry anyway.
https://lore.kernel.org/lkml/86db0fe5-930d-4cbb-bd7d-03367da38951@app.fastmail.com/
Use consistent names for dma buffers
gc: Add a commit log from the initial thread:
https://lore.kernel.org/lkml/86db0fe5-930d-4cbb-bd7d-03367da38951@app.fastmail.com/
Use consistent names for dma buffers
Fixes: cb06ff102e2d7 ("ARM: PL011: Add support for Rx DMA buffer polling.")
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Tested-by: Gregory CLEMENT <gregory.clement@bootlin.com>
Signed-off-by: Gregory CLEMENT <gregory.clement@bootlin.com>
Cc: stable <stable@kernel.org>
Link: https://lore.kernel.org/r/20231122171503.235649-1-gregory.clement@bootlin.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
drivers/tty/serial/amba-pl011.c | 112 +++++++++++++++++++---------------------
1 file changed, 54 insertions(+), 58 deletions(-)
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -240,17 +240,18 @@ static struct vendor_data vendor_zte = {
/* Deals with DMA transactions */
-struct pl011_sgbuf {
- struct scatterlist sg;
- char *buf;
+struct pl011_dmabuf {
+ dma_addr_t dma;
+ size_t len;
+ char *buf;
};
struct pl011_dmarx_data {
struct dma_chan *chan;
struct completion complete;
bool use_buf_b;
- struct pl011_sgbuf sgbuf_a;
- struct pl011_sgbuf sgbuf_b;
+ struct pl011_dmabuf dbuf_a;
+ struct pl011_dmabuf dbuf_b;
dma_cookie_t cookie;
bool running;
struct timer_list timer;
@@ -263,7 +264,8 @@ struct pl011_dmarx_data {
struct pl011_dmatx_data {
struct dma_chan *chan;
- struct scatterlist sg;
+ dma_addr_t dma;
+ size_t len;
char *buf;
bool queued;
};
@@ -384,32 +386,24 @@ static int pl011_fifo_to_tty(struct uart
#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
-static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
+static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db,
enum dma_data_direction dir)
{
- dma_addr_t dma_addr;
-
- sg->buf = dma_alloc_coherent(chan->device->dev,
- PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
- if (!sg->buf)
+ db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE,
+ &db->dma, GFP_KERNEL);
+ if (!db->buf)
return -ENOMEM;
-
- sg_init_table(&sg->sg, 1);
- sg_set_page(&sg->sg, phys_to_page(dma_addr),
- PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
- sg_dma_address(&sg->sg) = dma_addr;
- sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
+ db->len = PL011_DMA_BUFFER_SIZE;
return 0;
}
-static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
+static void pl011_dmabuf_free(struct dma_chan *chan, struct pl011_dmabuf *db,
enum dma_data_direction dir)
{
- if (sg->buf) {
+ if (db->buf) {
dma_free_coherent(chan->device->dev,
- PL011_DMA_BUFFER_SIZE, sg->buf,
- sg_dma_address(&sg->sg));
+ PL011_DMA_BUFFER_SIZE, db->buf, db->dma);
}
}
@@ -570,8 +564,8 @@ static void pl011_dma_tx_callback(void *
spin_lock_irqsave(&uap->port.lock, flags);
if (uap->dmatx.queued)
- dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
- DMA_TO_DEVICE);
+ dma_unmap_single(dmatx->chan->device->dev, dmatx->dma,
+ dmatx->len, DMA_TO_DEVICE);
dmacr = uap->dmacr;
uap->dmacr = dmacr & ~UART011_TXDMAE;
@@ -657,18 +651,19 @@ static int pl011_dma_tx_refill(struct ua
memcpy(&dmatx->buf[first], &xmit->buf[0], second);
}
- dmatx->sg.length = count;
-
- if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
+ dmatx->len = count;
+ dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count,
+ DMA_TO_DEVICE);
+ if (dmatx->dma == DMA_MAPPING_ERROR) {
uap->dmatx.queued = false;
dev_dbg(uap->port.dev, "unable to map TX DMA\n");
return -EBUSY;
}
- desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
+ desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
- dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
+ dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE);
uap->dmatx.queued = false;
/*
* If DMA cannot be used right now, we complete this
@@ -832,8 +827,8 @@ __acquires(&uap->port.lock)
dmaengine_terminate_async(uap->dmatx.chan);
if (uap->dmatx.queued) {
- dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
- DMA_TO_DEVICE);
+ dma_unmap_single(uap->dmatx.chan->device->dev, uap->dmatx.dma,
+ uap->dmatx.len, DMA_TO_DEVICE);
uap->dmatx.queued = false;
uap->dmacr &= ~UART011_TXDMAE;
pl011_write(uap->dmacr, uap, REG_DMACR);
@@ -847,15 +842,15 @@ static int pl011_dma_rx_trigger_dma(stru
struct dma_chan *rxchan = uap->dmarx.chan;
struct pl011_dmarx_data *dmarx = &uap->dmarx;
struct dma_async_tx_descriptor *desc;
- struct pl011_sgbuf *sgbuf;
+ struct pl011_dmabuf *dbuf;
if (!rxchan)
return -EIO;
/* Start the RX DMA job */
- sgbuf = uap->dmarx.use_buf_b ?
- &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
- desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
+ dbuf = uap->dmarx.use_buf_b ?
+ &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
+ desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
/*
@@ -895,8 +890,8 @@ static void pl011_dma_rx_chars(struct ua
bool readfifo)
{
struct tty_port *port = &uap->port.state->port;
- struct pl011_sgbuf *sgbuf = use_buf_b ?
- &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+ struct pl011_dmabuf *dbuf = use_buf_b ?
+ &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
int dma_count = 0;
u32 fifotaken = 0; /* only used for vdbg() */
@@ -905,7 +900,7 @@ static void pl011_dma_rx_chars(struct ua
if (uap->dmarx.poll_rate) {
/* The data can be taken by polling */
- dmataken = sgbuf->sg.length - dmarx->last_residue;
+ dmataken = dbuf->len - dmarx->last_residue;
/* Recalculate the pending size */
if (pending >= dmataken)
pending -= dmataken;
@@ -919,7 +914,7 @@ static void pl011_dma_rx_chars(struct ua
* Note that tty_insert_flip_buf() tries to take as many chars
* as it can.
*/
- dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
+ dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
pending);
uap->port.icount.rx += dma_count;
@@ -930,7 +925,7 @@ static void pl011_dma_rx_chars(struct ua
/* Reset the last_residue for Rx DMA poll */
if (uap->dmarx.poll_rate)
- dmarx->last_residue = sgbuf->sg.length;
+ dmarx->last_residue = dbuf->len;
/*
* Only continue with trying to read the FIFO if all DMA chars have
@@ -967,8 +962,8 @@ static void pl011_dma_rx_irq(struct uart
{
struct pl011_dmarx_data *dmarx = &uap->dmarx;
struct dma_chan *rxchan = dmarx->chan;
- struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
- &dmarx->sgbuf_b : &dmarx->sgbuf_a;
+ struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
+ &dmarx->dbuf_b : &dmarx->dbuf_a;
size_t pending;
struct dma_tx_state state;
enum dma_status dmastat;
@@ -990,7 +985,7 @@ static void pl011_dma_rx_irq(struct uart
pl011_write(uap->dmacr, uap, REG_DMACR);
uap->dmarx.running = false;
- pending = sgbuf->sg.length - state.residue;
+ pending = dbuf->len - state.residue;
BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
/* Then we terminate the transfer - we now know our residue */
dmaengine_terminate_all(rxchan);
@@ -1017,8 +1012,8 @@ static void pl011_dma_rx_callback(void *
struct pl011_dmarx_data *dmarx = &uap->dmarx;
struct dma_chan *rxchan = dmarx->chan;
bool lastbuf = dmarx->use_buf_b;
- struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
- &dmarx->sgbuf_b : &dmarx->sgbuf_a;
+ struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
+ &dmarx->dbuf_b : &dmarx->dbuf_a;
size_t pending;
struct dma_tx_state state;
int ret;
@@ -1036,7 +1031,7 @@ static void pl011_dma_rx_callback(void *
* the DMA irq handler. So we check the residue here.
*/
rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
- pending = sgbuf->sg.length - state.residue;
+ pending = dbuf->len - state.residue;
BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
/* Then we terminate the transfer - we now know our residue */
dmaengine_terminate_all(rxchan);
@@ -1088,16 +1083,16 @@ static void pl011_dma_rx_poll(unsigned l
unsigned long flags = 0;
unsigned int dmataken = 0;
unsigned int size = 0;
- struct pl011_sgbuf *sgbuf;
+ struct pl011_dmabuf *dbuf;
int dma_count;
struct dma_tx_state state;
- sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+ dbuf = dmarx->use_buf_b ? &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
if (likely(state.residue < dmarx->last_residue)) {
- dmataken = sgbuf->sg.length - dmarx->last_residue;
+ dmataken = dbuf->len - dmarx->last_residue;
size = dmarx->last_residue - state.residue;
- dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
+ dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
size);
if (dma_count == size)
dmarx->last_residue = state.residue;
@@ -1144,7 +1139,7 @@ static void pl011_dma_startup(struct uar
return;
}
- sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
+ uap->dmatx.len = PL011_DMA_BUFFER_SIZE;
/* The DMA buffer is now the FIFO the TTY subsystem can use */
uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
@@ -1154,7 +1149,7 @@ static void pl011_dma_startup(struct uar
goto skip_rx;
/* Allocate and map DMA RX buffers */
- ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
+ ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a,
DMA_FROM_DEVICE);
if (ret) {
dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
@@ -1162,12 +1157,12 @@ static void pl011_dma_startup(struct uar
goto skip_rx;
}
- ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
+ ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b,
DMA_FROM_DEVICE);
if (ret) {
dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
"RX buffer B", ret);
- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
+ pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a,
DMA_FROM_DEVICE);
goto skip_rx;
}
@@ -1223,8 +1218,9 @@ static void pl011_dma_shutdown(struct ua
/* In theory, this should already be done by pl011_dma_flush_buffer */
dmaengine_terminate_all(uap->dmatx.chan);
if (uap->dmatx.queued) {
- dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
- DMA_TO_DEVICE);
+ dma_unmap_single(uap->dmatx.chan->device->dev,
+ uap->dmatx.dma, uap->dmatx.len,
+ DMA_TO_DEVICE);
uap->dmatx.queued = false;
}
@@ -1235,8 +1231,8 @@ static void pl011_dma_shutdown(struct ua
if (uap->using_rx_dma) {
dmaengine_terminate_all(uap->dmarx.chan);
/* Clean up the RX DMA */
- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
+ pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE);
+ pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE);
if (uap->dmarx.poll_rate)
del_timer_sync(&uap->dmarx.timer);
uap->using_rx_dma = false;
next prev parent reply other threads:[~2023-12-11 18:22 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-12-11 18:20 [PATCH 4.14 00/25] 4.14.333-rc1 review Greg Kroah-Hartman
2023-12-11 18:20 ` [PATCH 4.14 01/25] tg3: Move the [rt]x_dropped counters to tg3_napi Greg Kroah-Hartman
2023-12-11 18:20 ` [PATCH 4.14 02/25] tg3: Increment tx_dropped in tg3_tso_bug() Greg Kroah-Hartman
2023-12-11 18:20 ` [PATCH 4.14 03/25] drm/amdgpu: correct chunk_ptr to a pointer to chunk Greg Kroah-Hartman
2023-12-11 18:20 ` [PATCH 4.14 04/25] net: hns: fix fake link up on xge port Greg Kroah-Hartman
2023-12-11 18:20 ` [PATCH 4.14 05/25] tcp: do not accept ACK of bytes we never sent Greg Kroah-Hartman
2023-12-11 18:20 ` [PATCH 4.14 06/25] RDMA/bnxt_re: Correct module description string Greg Kroah-Hartman
2023-12-11 18:20 ` [PATCH 4.14 07/25] hwmon: (acpi_power_meter) Fix 4.29 MW bug Greg Kroah-Hartman
2023-12-11 18:20 ` [PATCH 4.14 08/25] tracing: Fix a warning when allocating buffered events fails Greg Kroah-Hartman
2023-12-11 18:21 ` [PATCH 4.14 09/25] scsi: be2iscsi: Fix a memleak in beiscsi_init_wrb_handle() Greg Kroah-Hartman
2023-12-11 18:21 ` [PATCH 4.14 10/25] ALSA: pcm: fix out-of-bounds in snd_pcm_state_names Greg Kroah-Hartman
2023-12-11 18:21 ` [PATCH 4.14 11/25] nilfs2: prevent WARNING in nilfs_sufile_set_segment_usage() Greg Kroah-Hartman
2023-12-11 18:21 ` [PATCH 4.14 12/25] tracing: Always update snapshot buffer size Greg Kroah-Hartman
2023-12-11 18:21 ` [PATCH 4.14 13/25] tracing: Fix incomplete locking when disabling buffered events Greg Kroah-Hartman
2023-12-11 18:21 ` [PATCH 4.14 14/25] tracing: Fix a possible race " Greg Kroah-Hartman
2023-12-11 18:21 ` [PATCH 4.14 15/25] packet: Move reference count in packet_sock to atomic_long_t Greg Kroah-Hartman
2023-12-11 18:21 ` [PATCH 4.14 16/25] parport: Add support for Brainboxes IX/UC/PX parallel cards Greg Kroah-Hartman
2023-12-11 18:21 ` Greg Kroah-Hartman [this message]
2023-12-11 18:21 ` [PATCH 4.14 18/25] serial: sc16is7xx: address RX timeout interrupt errata Greg Kroah-Hartman
2023-12-11 18:21 ` [PATCH 4.14 19/25] serial: 8250_omap: Add earlycon support for the AM654 UART controller Greg Kroah-Hartman
2023-12-11 18:21 ` [PATCH 4.14 20/25] KVM: s390/mm: Properly reset no-dat Greg Kroah-Hartman
2023-12-11 18:21 ` [PATCH 4.14 21/25] nilfs2: fix missing error check for sb_set_blocksize call Greg Kroah-Hartman
2023-12-11 18:21 ` [PATCH 4.14 22/25] netlink: dont call ->netlink_bind with table lock held Greg Kroah-Hartman
2023-12-11 18:21 ` [PATCH 4.14 23/25] genetlink: add CAP_NET_ADMIN test for multicast bind Greg Kroah-Hartman
2023-12-11 18:21 ` [PATCH 4.14 24/25] psample: Require CAP_NET_ADMIN when joining "packets" group Greg Kroah-Hartman
2023-12-11 18:21 ` [PATCH 4.14 25/25] drop_monitor: Require CAP_SYS_ADMIN when joining "events" group Greg Kroah-Hartman
2023-12-11 19:46 ` [PATCH 4.14 00/25] 4.14.333-rc1 review Daniel Díaz
2023-12-12 10:39 ` Greg Kroah-Hartman
2023-12-12 19:10 ` Pavel Machek
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231211182009.304317381@linuxfoundation.org \
--to=gregkh@linuxfoundation.org \
--cc=arnd@arndb.de \
--cc=gregory.clement@bootlin.com \
--cc=patches@lists.linux.dev \
--cc=stable@kernel.org \
--cc=stable@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox