From: P J P <ppandit@redhat.com>
To: Jason Wang <jasowang@redhat.com>
Cc: Prasad J Pandit <pjp@fedoraproject.org>,
Stefan Hajnoczi <stefanha@gmail.com>,
Qemu Developers <qemu-devel@nongnu.org>,
Li Qiang <pangpei.lq@antfin.com>,
Sven Schnelle <svens@stackframe.org>,
Ziming Zhang <ezrakiez@gmail.com>
Subject: [PATCH v4 1/3] net: tulip: check frame size and r/w data length
Date: Thu, 19 Mar 2020 15:22:09 +0530 [thread overview]
Message-ID: <20200319095211.741445-2-ppandit@redhat.com> (raw)
In-Reply-To: <20200319095211.741445-1-ppandit@redhat.com>
From: Prasad J Pandit <pjp@fedoraproject.org>
Tulip network driver while copying tx/rx buffers does not check
frame size against r/w data length. This may lead to OOB buffer
access. Add check to avoid it.
Limit iterations over descriptors to avoid potential infinite
loop issue in tulip_xmit_list_update.
Reported-by: Li Qiang <pangpei.lq@antfin.com>
Reported-by: Ziming Zhang <ezrakiez@gmail.com>
Reported-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
---
hw/net/tulip.c | 36 +++++++++++++++++++++++++++---------
1 file changed, 27 insertions(+), 9 deletions(-)
Update v3: return a value from tulip_copy_tx_buffers() and avoid infinite loop
-> https://lists.gnu.org/archive/html/qemu-devel/2020-02/msg06275.html
diff --git a/hw/net/tulip.c b/hw/net/tulip.c
index cfac2719d3..fbe40095da 100644
--- a/hw/net/tulip.c
+++ b/hw/net/tulip.c
@@ -170,6 +170,10 @@ static void tulip_copy_rx_bytes(TULIPState *s, struct tulip_descriptor *desc)
} else {
len = s->rx_frame_len;
}
+
+ if (s->rx_frame_len + len >= sizeof(s->rx_frame)) {
+ return;
+ }
pci_dma_write(&s->dev, desc->buf_addr1, s->rx_frame +
(s->rx_frame_size - s->rx_frame_len), len);
s->rx_frame_len -= len;
@@ -181,6 +185,10 @@ static void tulip_copy_rx_bytes(TULIPState *s, struct tulip_descriptor *desc)
} else {
len = s->rx_frame_len;
}
+
+ if (s->rx_frame_len + len >= sizeof(s->rx_frame)) {
+ return;
+ }
pci_dma_write(&s->dev, desc->buf_addr2, s->rx_frame +
(s->rx_frame_size - s->rx_frame_len), len);
s->rx_frame_len -= len;
@@ -227,7 +235,8 @@ static ssize_t tulip_receive(TULIPState *s, const uint8_t *buf, size_t size)
trace_tulip_receive(buf, size);
- if (size < 14 || size > 2048 || s->rx_frame_len || tulip_rx_stopped(s)) {
+ if (size < 14 || size > sizeof(s->rx_frame) - 4
+ || s->rx_frame_len || tulip_rx_stopped(s)) {
return 0;
}
@@ -275,7 +284,6 @@ static ssize_t tulip_receive_nc(NetClientState *nc,
return tulip_receive(qemu_get_nic_opaque(nc), buf, size);
}
-
static NetClientInfo net_tulip_info = {
.type = NET_CLIENT_DRIVER_NIC,
.size = sizeof(NICState),
@@ -558,7 +566,7 @@ static void tulip_tx(TULIPState *s, struct tulip_descriptor *desc)
if ((s->csr[6] >> CSR6_OM_SHIFT) & CSR6_OM_MASK) {
/* Internal or external Loopback */
tulip_receive(s, s->tx_frame, s->tx_frame_len);
- } else {
+ } else if (s->tx_frame_len <= sizeof(s->tx_frame)) {
qemu_send_packet(qemu_get_queue(s->nic),
s->tx_frame, s->tx_frame_len);
}
@@ -570,23 +578,31 @@ static void tulip_tx(TULIPState *s, struct tulip_descriptor *desc)
}
}
-static void tulip_copy_tx_buffers(TULIPState *s, struct tulip_descriptor *desc)
+static int tulip_copy_tx_buffers(TULIPState *s, struct tulip_descriptor *desc)
{
int len1 = (desc->control >> TDES1_BUF1_SIZE_SHIFT) & TDES1_BUF1_SIZE_MASK;
int len2 = (desc->control >> TDES1_BUF2_SIZE_SHIFT) & TDES1_BUF2_SIZE_MASK;
+ if (s->tx_frame_len + len1 >= sizeof(s->tx_frame)) {
+ return -1;
+ }
if (len1) {
pci_dma_read(&s->dev, desc->buf_addr1,
s->tx_frame + s->tx_frame_len, len1);
s->tx_frame_len += len1;
}
+ if (s->tx_frame_len + len2 >= sizeof(s->tx_frame)) {
+ return -1;
+ }
if (len2) {
pci_dma_read(&s->dev, desc->buf_addr2,
s->tx_frame + s->tx_frame_len, len2);
s->tx_frame_len += len2;
}
desc->status = (len1 + len2) ? 0 : 0x7fffffff;
+
+ return 0;
}
static void tulip_setup_filter_addr(TULIPState *s, uint8_t *buf, int n)
@@ -651,13 +667,15 @@ static uint32_t tulip_ts(TULIPState *s)
static void tulip_xmit_list_update(TULIPState *s)
{
+#define TULIP_DESC_MAX 128
+ uint8_t i = 0;
struct tulip_descriptor desc;
if (tulip_ts(s) != CSR5_TS_SUSPENDED) {
return;
}
- for (;;) {
+ for (i = 0; i < TULIP_DESC_MAX; i++) {
tulip_desc_read(s, s->current_tx_desc, &desc);
tulip_dump_tx_descriptor(s, &desc);
@@ -675,10 +693,10 @@ static void tulip_xmit_list_update(TULIPState *s)
s->tx_frame_len = 0;
}
- tulip_copy_tx_buffers(s, &desc);
-
- if (desc.control & TDES1_LS) {
- tulip_tx(s, &desc);
+ if (!tulip_copy_tx_buffers(s, &desc)) {
+ if (desc.control & TDES1_LS) {
+ tulip_tx(s, &desc);
+ }
}
}
tulip_desc_write(s, s->current_tx_desc, &desc);
--
2.25.1
next prev parent reply other threads:[~2020-03-19 9:55 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-03-19 9:52 [PATCH v4 0/3] net: tulip: add checks to avoid OOB access P J P
2020-03-19 9:52 ` P J P [this message]
2020-03-19 9:52 ` [PATCH v4 2/3] net: tulip: add .can_recieve routine P J P
2020-03-19 10:24 ` Philippe Mathieu-Daudé
2020-03-19 17:47 ` P J P
2020-03-19 9:52 ` [PATCH v4 3/3] net: tulip: flush queued packets post receive P J P
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200319095211.741445-2-ppandit@redhat.com \
--to=ppandit@redhat.com \
--cc=ezrakiez@gmail.com \
--cc=jasowang@redhat.com \
--cc=pangpei.lq@antfin.com \
--cc=pjp@fedoraproject.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@gmail.com \
--cc=svens@stackframe.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).