qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Gleb Natapov <gleb@redhat.com>
To: qemu-devel@nongnu.org
Subject: [Qemu-devel] [PATCH v2 1/3] Stop VM on ENOSPC error.
Date: Thu, 15 Jan 2009 12:12:42 +0200	[thread overview]
Message-ID: <20090115101241.13211.64596.stgit@dhcp-1-237.tlv.redhat.com> (raw)

And repeat last IDE command after VM restart.

Signed-off-by: Gleb Natapov <gleb@redhat.com>
---

 hw/ide.c |   54 ++++++++++++++++++++++++++++++++++++++++++++++++++----
 1 files changed, 50 insertions(+), 4 deletions(-)

diff --git a/hw/ide.c b/hw/ide.c
index 7dd41f7..2d2cead 100644
--- a/hw/ide.c
+++ b/hw/ide.c
@@ -457,6 +457,8 @@ static inline int media_is_cd(IDEState *s)
 #define BM_STATUS_DMAING 0x01
 #define BM_STATUS_ERROR  0x02
 #define BM_STATUS_INT    0x04
+#define BM_STATUS_DMA_RETRY  0x08
+#define BM_STATUS_PIO_RETRY  0x10
 
 #define BM_CMD_START     0x01
 #define BM_CMD_READ      0x08
@@ -488,6 +490,8 @@ typedef struct BMDMAState {
     IDEState *ide_if;
     BlockDriverCompletionFunc *dma_cb;
     BlockDriverAIOCB *aiocb;
+    int64_t sector_num;
+    uint32_t nsector;
 } BMDMAState;
 
 typedef struct PCIIDEState {
@@ -498,6 +502,7 @@ typedef struct PCIIDEState {
 } PCIIDEState;
 
 static void ide_dma_start(IDEState *s, BlockDriverCompletionFunc *dma_cb);
+static void ide_dma_restart(IDEState *s);
 static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret);
 
 static void padstr(char *str, const char *src, int len)
@@ -991,8 +996,13 @@ static void ide_sector_write(IDEState *s)
         n = s->req_nb_sectors;
     ret = bdrv_write(s->bs, sector_num, s->io_buffer, n);
     if (ret != 0) {
-	ide_rw_error(s);
-	return;
+        if (ret == -ENOSPC) {
+            s->bmdma->ide_if = s;
+            s->bmdma->status |= BM_STATUS_PIO_RETRY;
+            vm_stop(0);
+        } else
+            ide_rw_error(s);
+        return;
     }
 
     s->nsector -= n;
@@ -1024,6 +1034,20 @@ static void ide_sector_write(IDEState *s)
     }
 }
 
+static void ide_dma_restart_cb(void *opaque, int running)
+{
+    BMDMAState *bm = opaque;
+    if (!running)
+        return;
+    if (bm->status & BM_STATUS_DMA_RETRY) {
+        bm->status &= ~BM_STATUS_DMA_RETRY;
+        ide_dma_restart(bm->ide_if);
+    } else if (bm->status & BM_STATUS_PIO_RETRY) {
+        bm->status &= ~BM_STATUS_PIO_RETRY;
+        ide_sector_write(bm->ide_if);
+    }
+}
+
 static void ide_write_dma_cb(void *opaque, int ret)
 {
     BMDMAState *bm = opaque;
@@ -1032,8 +1056,12 @@ static void ide_write_dma_cb(void *opaque, int ret)
     int64_t sector_num;
 
     if (ret < 0) {
-	ide_dma_error(s);
-	return;
+        if (ret == -ENOSPC) {
+            bm->status |= BM_STATUS_DMA_RETRY;
+            vm_stop(0);
+        } else
+            ide_dma_error(s);
+        return;
     }
 
     n = s->io_buffer_size >> 9;
@@ -2849,11 +2877,24 @@ static void ide_dma_start(IDEState *s, BlockDriverCompletionFunc *dma_cb)
     bm->cur_prd_last = 0;
     bm->cur_prd_addr = 0;
     bm->cur_prd_len = 0;
+    bm->sector_num = ide_get_sector(s);
+    bm->nsector = s->nsector;
     if (bm->status & BM_STATUS_DMAING) {
         bm->dma_cb(bm, 0);
     }
 }
 
+static void ide_dma_restart(IDEState *s)
+{
+    BMDMAState *bm = s->bmdma;
+    ide_set_sector(s, bm->sector_num);
+    s->io_buffer_index = 0;
+    s->io_buffer_size = 0;
+    s->nsector = bm->nsector;
+    bm->cur_addr = bm->addr;
+    ide_dma_start(s, bm->dma_cb);
+}
+
 static void ide_dma_cancel(BMDMAState *bm)
 {
     if (bm->status & BM_STATUS_DMAING) {
@@ -3043,6 +3084,7 @@ static void bmdma_map(PCIDevice *pci_dev, int region_num,
         d->ide_if[2 * i].bmdma = bm;
         d->ide_if[2 * i + 1].bmdma = bm;
         bm->pci_dev = (PCIIDEState *)pci_dev;
+        qemu_add_vm_change_state_handler(ide_dma_restart_cb, bm);
 
         register_ioport_write(addr, 1, 1, bmdma_cmd_writeb, bm);
 
@@ -3071,6 +3113,8 @@ static void pci_ide_save(QEMUFile* f, void *opaque)
         qemu_put_8s(f, &bm->cmd);
         qemu_put_8s(f, &bm->status);
         qemu_put_be32s(f, &bm->addr);
+        qemu_put_sbe64s(f, &bm->sector_num);
+        qemu_put_be32s(f, &bm->nsector);
         /* XXX: if a transfer is pending, we do not save it yet */
     }
 
@@ -3105,6 +3149,8 @@ static int pci_ide_load(QEMUFile* f, void *opaque, int version_id)
         qemu_get_8s(f, &bm->cmd);
         qemu_get_8s(f, &bm->status);
         qemu_get_be32s(f, &bm->addr);
+        qemu_get_sbe64s(f, &bm->sector_num);
+        qemu_get_be32s(f, &bm->nsector);
         /* XXX: if a transfer is pending, we do not save it yet */
     }
 

             reply	other threads:[~2009-01-15 10:14 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2009-01-15 10:12 Gleb Natapov [this message]
2009-01-15 10:12 ` [Qemu-devel] [PATCH v2 2/3] bdrv_write should not stop on partial write Gleb Natapov
2009-01-15 20:44   ` Anthony Liguori
2009-01-15 10:12 ` [Qemu-devel] [PATCH v2 3/3] Return -errno on write failure Gleb Natapov
2009-01-15 20:44   ` Anthony Liguori
2009-01-15 20:39 ` [Qemu-devel] [PATCH v2 1/3] Stop VM on ENOSPC error Anthony Liguori

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20090115101241.13211.64596.stgit@dhcp-1-237.tlv.redhat.com \
    --to=gleb@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).