qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Joerg Platte <lists@naasa.net>
To: qemu-devel@nongnu.org
Subject: Re: [Qemu-devel] SPARC iommu mapping
Date: Sun, 9 Apr 2006 18:31:30 +0200	[thread overview]
Message-ID: <200604091831.31772.lists@naasa.net> (raw)
In-Reply-To: <BAY104-F117C8B7ECF3ED07C6591A9FFC90@phx.gbl>

[-- Attachment #1: Type: text/plain, Size: 703 bytes --]

Am Freitag, 7. April 2006 17:44 schrieb Blue Swirl:
Hi!

> Maybe this patch helps?

The attached patch is an updated version of your patch. With whis patch I was 
able to copy files from one directory to another on a disk image. After 
unmounting the image, e2fsck reported no errors on this image. But 
unfortunately, booting from this image was not possible. INIT reported a 
segmentation violation. Maybe there is something else wrong. But now writing 
works much better than before :-)

regards,
Jörg

PS: Is there any reason, why qemu copies disk data byte by byte? Calling 
iommu_translate only once per page and copying a whole page could speed up 
disk access significantly...

[-- Attachment #2: qemu-write.patch --]
[-- Type: text/x-diff, Size: 5321 bytes --]

--- esp.c.orig	2006-04-09 18:20:38.000000000 +0200
+++ esp.c	2006-04-09 18:04:15.000000000 +0200
@@ -63,6 +63,8 @@
     ESPDMAFunc *dma_cb;
     int64_t offset, len;
     int target;
+    int blocksize;
+    int ti_bufstart;
 };
 
 #define STAT_DO 0x00
@@ -229,12 +231,12 @@
                             target_phys_addr_t phys_addr, 
                             int transfer_size1)
 {
+    int len = transfer_size1/s->blocksize;
     DPRINTF("Write callback (offset %lld len %lld size %d trans_size %d)\n",
             s->offset, s->len, s->ti_size, transfer_size1);
-    bdrv_write(s->bd[s->target], s->offset, s->ti_buf, s->len);
-    s->offset = 0;
-    s->len = 0;
-    s->target = 0;
+
+    bdrv_write(s->bd[s->target], s->offset, s->ti_buf+s->ti_bufstart, len);
+    s->offset+=len;
     return 0;
 }
 
@@ -265,6 +267,7 @@
     s->ti_size = 0;
     s->ti_rptr = 0;
     s->ti_wptr = 0;
+    s->ti_bufstart = 0;
 
     if (target >= 4 || !s->bd[target]) { // No such drive
 	s->rregs[4] = STAT_IN;
@@ -293,6 +296,7 @@
 	s->ti_buf[3] = 2;
 	s->ti_buf[4] = 32;
 	s->ti_dir = 1;
+        s->ti_bufstart = 0;
 	s->ti_size = 36;
 	break;
     case 0x1a:
@@ -314,6 +318,7 @@
 	    s->ti_buf[6] = 2; // sector size 512
 	s->ti_buf[7] = 0;
 	s->ti_dir = 1;
+        s->ti_bufstart = 0;
 	s->ti_size = 8;
 	break;
     case 0x28:
@@ -336,6 +341,7 @@
 	    bdrv_read(s->bd[target], offset, s->ti_buf, len);
 	    // XXX error handling
 	    s->ti_dir = 1;
+	    s->ti_bufstart = 0;
 	    break;
 	}
     case 0x2a:
@@ -346,10 +352,12 @@
 		offset = ((buf[3] << 24) | (buf[4] << 16) | (buf[5] << 8) | buf[6]) * 4;
 		len = ((buf[8] << 8) | buf[9]) * 4;
 		s->ti_size = len * 2048;
+		s->blocksize=2048;
 	    } else {
 		offset = (buf[3] << 24) | (buf[4] << 16) | (buf[5] << 8) | buf[6];
 		len = (buf[8] << 8) | buf[9];
 		s->ti_size = len * 512;
+		s->blocksize=512;
 	    }
 	    DPRINTF("Write (10) (offset %lld len %lld)\n", offset, len);
             if (s->ti_size > TI_BUFSZ) {
@@ -359,6 +367,7 @@
             s->offset = offset;
             s->len = len;
             s->target = target;
+            s->ti_bufstart = 0;
 	    // XXX error handling
 	    s->ti_dir = 0;
 	    break;
@@ -400,6 +409,7 @@
                 break;
             }
 	    s->ti_dir = 1;
+	    s->ti_bufstart = 0;
             break;
         }
     default:
@@ -415,10 +425,9 @@
 
 static void dma_write(ESPState *s, const uint8_t *buf, uint32_t len)
 {
-    uint32_t dmaptr, dmalen;
+    uint32_t dmaptr;
 
-    dmalen = s->wregs[0] | (s->wregs[1] << 8);
-    DPRINTF("Transfer status len %d\n", dmalen);
+    DPRINTF("Transfer status len %d\n", len);
     if (s->dma) {
 	dmaptr = iommu_translate(s->espdmaregs[1]);
 	DPRINTF("DMA Direction: %c\n", s->espdmaregs[0] & 0x100? 'w': 'r');
@@ -428,10 +437,10 @@
 	s->rregs[6] = SEQ_CD;
     } else {
 	memcpy(s->ti_buf, buf, len);
-	s->ti_size = dmalen;
+	s->ti_size = len;
 	s->ti_rptr = 0;
 	s->ti_wptr = 0;
-	s->rregs[7] = dmalen;
+	s->rregs[7] = len;
     }
     s->espdmaregs[0] |= DMA_INTR;
     pic_set_irq(s->irq, 1);
@@ -442,34 +451,57 @@
 
 static void handle_ti(ESPState *s)
 {
-    uint32_t dmaptr, dmalen;
+    uint32_t dmaptr, dmalen, minlen;
     unsigned int i;
 
     dmalen = s->wregs[0] | (s->wregs[1] << 8);
-    DPRINTF("Transfer Information len %d\n", dmalen);
+    if (dmalen==0) {
+      dmalen=0x10000;
+    }
+
+    minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
+    DPRINTF("Transfer Information len %d\n", minlen);
     if (s->dma) {
 	dmaptr = iommu_translate(s->espdmaregs[1]);
-	DPRINTF("DMA Direction: %c, addr 0x%8.8x\n", s->espdmaregs[0] & 0x100? 'w': 'r', dmaptr);
-	for (i = 0; i < s->ti_size; i++) {
+	DPRINTF("DMA Direction: %c, addr 0x%8.8x %08x %d %d\n", s->espdmaregs[0] & 0x100? 'w': 'r', dmaptr, s->ti_size, s->ti_bufstart, s->ti_dir);
+	for (i = 0; i < minlen; i++) {
 	    dmaptr = iommu_translate(s->espdmaregs[1] + i);
 	    if (s->ti_dir)
-		cpu_physical_memory_write(dmaptr, &s->ti_buf[i], 1);
+		cpu_physical_memory_write(dmaptr, &s->ti_buf[s->ti_bufstart+i], 1);
 	    else
-		cpu_physical_memory_read(dmaptr, &s->ti_buf[i], 1);
+		cpu_physical_memory_read(dmaptr, &s->ti_buf[s->ti_bufstart+i], 1);
 	}
         if (s->dma_cb) {
-            s->dma_cb(s, s->espdmaregs[1], dmalen);
+            s->dma_cb(s, s->espdmaregs[1], minlen);
+        }
+        if (minlen<s->ti_size) {
+	    s->rregs[4] = STAT_IN | STAT_TC | (s->ti_dir?STAT_DO:STAT_DI);
+	    s->ti_size-=minlen;
+	    s->ti_bufstart+=minlen;
+        } else {
+	    s->rregs[4] = STAT_IN | STAT_TC | STAT_ST;
             s->dma_cb = NULL;
+            s->offset = 0;
+            s->len = 0;
+            s->target = 0;
+            s->ti_bufstart = 0;
         }
-	s->rregs[4] = STAT_IN | STAT_TC | STAT_ST;
-	s->rregs[5] = INTR_BS;
+        s->rregs[5] = INTR_BS;
 	s->rregs[6] = 0;
+	s->rregs[7] = 0;
 	s->espdmaregs[0] |= DMA_INTR;
     } else {
-	s->ti_size = dmalen;
+	s->ti_size = minlen;
 	s->ti_rptr = 0;
 	s->ti_wptr = 0;
-	s->rregs[7] = dmalen;
+	s->rregs[7] = minlen;
     }	
     pic_set_irq(s->irq, 1);
 }
@@ -485,8 +517,10 @@
     s->ti_rptr = 0;
     s->ti_wptr = 0;
     s->ti_dir = 0;
+    s->ti_bufstart = 0;
     s->dma = 0;
     s->dma_cb = NULL;
+    s->blocksize = 0;
 }
 
 static uint32_t esp_mem_readb(void *opaque, target_phys_addr_t addr)

  parent reply	other threads:[~2006-04-09 16:31 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2006-04-07 15:44 [Qemu-devel] SPARC iommu mapping Blue Swirl
2006-04-07 21:33 ` Joerg Platte
2006-04-09 16:31 ` Joerg Platte [this message]
2006-04-10 13:18 ` Joerg Platte
2006-04-11 19:21   ` Blue Swirl
2006-04-14 16:07     ` Joerg Platte
2006-04-17  8:49       ` Blue Swirl
2006-05-03 18:37         ` Joerg Platte
2006-05-03 18:52           ` Blue Swirl
  -- strict thread matches above, loose matches on Subject: below --
2006-04-04 19:27 Joerg Platte
2006-04-05 15:23 ` Joerg Platte
2006-04-05 17:36   ` Blue Swirl
2006-04-05 18:14     ` Joerg Platte
2006-04-05 18:25     ` Joerg Platte
2006-04-05 20:03     ` Joerg Platte
2006-04-06 16:53     ` Joerg Platte

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=200604091831.31772.lists@naasa.net \
    --to=lists@naasa.net \
    --cc=jplatte@naasa.net \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).