qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Blue Swirl" <blauwirbel@gmail.com>
To: qemu-devel@nongnu.org
Subject: Re: [Qemu-devel] sparc32 counter/timer issues
Date: Fri, 21 Sep 2007 23:19:36 +0300	[thread overview]
Message-ID: <f43fc5580709211319g7191bbb2h34353ad25d6179e7@mail.gmail.com> (raw)
In-Reply-To: <46F3D3A1.1040300@earthlink.net>

[-- Attachment #1: Type: text/plain, Size: 1044 bytes --]

On 9/21/07, Robert Reif <reif@earthlink.net> wrote:
> I'm trying to run a real ss10 openboot prom image rather than
> the supplied prom image and found some issues with the way
> counters and timers are implemented.  It appears that the processor
> and system counter/timers are not independent.  The system
> config register actually configures the processor counter/timers
> and the config register is actually a bit mask of the counter/timer
> to configure. 1, 2, 4, and 8 are used to as config values for each
> processor counter/timer and 0xf is used for setting all of them.
> This isn't apparent in the slaveio documentation because it is
> for a single cpu only.
>
> Because the system config register configures the processor
> timers, it needs access to all the processor timers (or the
> processor timers need access to the system timer).  This isn't
> how it's currently implemented.

Thanks for testing. This patch changes the config register to what you
described, everything seems to work like before. Do you see any
difference?

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #2: timer_fix.diff --]
[-- Type: text/x-diff; name="timer_fix.diff", Size: 5150 bytes --]

Index: qemu/hw/slavio_timer.c
===================================================================
--- qemu.orig/hw/slavio_timer.c	2007-09-21 19:30:43.000000000 +0000
+++ qemu/hw/slavio_timer.c	2007-09-21 20:08:51.000000000 +0000
@@ -47,6 +47,8 @@
  *
  */
 
+#define MAX_CPUS 16
+
 typedef struct SLAVIO_TIMERState {
     qemu_irq irq;
     ptimer_state *timer;
@@ -54,10 +56,13 @@
     uint64_t limit;
     int stopped;
     int mode; // 0 = processor, 1 = user, 2 = system
+    struct SLAVIO_TIMERState *slave[MAX_CPUS];
+    uint32_t slave_mode;
 } SLAVIO_TIMERState;
 
 #define TIMER_MAXADDR 0x1f
 #define TIMER_SIZE (TIMER_MAXADDR + 1)
+#define CPU_TIMER_SIZE 0x10
 
 // Update count, set irq, update expire_time
 // Convert from ptimer countdown units
@@ -120,7 +125,7 @@
         break;
     case 4:
 	// read user/system mode
-        ret = s->mode & 1;
+        ret = s->slave_mode;
         break;
     default:
         ret = 0;
@@ -167,13 +172,22 @@
 	break;
     case 4:
 	// bit 0: user (1) or system (0) counter mode
-	if (s->mode == 0 || s->mode == 1)
-	    s->mode = val & 1;
-        if (s->mode == 1) {
-            qemu_irq_lower(s->irq);
-            s->limit = -1ULL;
+        {
+            unsigned int i;
+
+            for (i = 0; i < MAX_CPUS; i++) {
+                if (val & (1 << i)) {
+                    qemu_irq_lower(s->slave[i]->irq);
+                    s->slave[i]->limit = -1ULL;
+                    s->slave[i]->mode = 1;
+                } else {
+                    s->slave[i]->mode = 0;
+                }
+                ptimer_set_limit(s->slave[i]->timer, s->slave[i]->limit >> 9,
+                                 1);
+            }
+            s->slave_mode = val & ((1 << MAX_CPUS) - 1);
         }
-        ptimer_set_limit(s->timer, s->limit >> 9, 1);
 	break;
     default:
 	break;
@@ -240,7 +254,8 @@
     qemu_irq_lower(s->irq);
 }
 
-void slavio_timer_init(target_phys_addr_t addr, qemu_irq irq, int mode)
+static SLAVIO_TIMERState *slavio_timer_init(target_phys_addr_t addr,
+                                            qemu_irq irq, int mode)
 {
     int slavio_timer_io_memory;
     SLAVIO_TIMERState *s;
@@ -248,7 +263,7 @@
 
     s = qemu_mallocz(sizeof(SLAVIO_TIMERState));
     if (!s)
-        return;
+        return s;
     s->irq = irq;
     s->mode = mode;
     bh = qemu_bh_new(slavio_timer_irq, s);
@@ -257,8 +272,29 @@
 
     slavio_timer_io_memory = cpu_register_io_memory(0, slavio_timer_mem_read,
 						    slavio_timer_mem_write, s);
-    cpu_register_physical_memory(addr, TIMER_SIZE, slavio_timer_io_memory);
+    if (mode < 2)
+        cpu_register_physical_memory(addr, CPU_TIMER_SIZE, slavio_timer_io_memory);
+    else
+        cpu_register_physical_memory(addr, TIMER_SIZE,
+                                     slavio_timer_io_memory);
     register_savevm("slavio_timer", addr, 2, slavio_timer_save, slavio_timer_load, s);
     qemu_register_reset(slavio_timer_reset, s);
     slavio_timer_reset(s);
+
+    return s;
+}
+
+void slavio_timer_init_all(target_phys_addr_t base, qemu_irq master_irq,
+                           qemu_irq *cpu_irqs)
+{
+    SLAVIO_TIMERState *master;
+    unsigned int i;
+
+    master = slavio_timer_init(base + 0x10000ULL, master_irq, 2);
+
+    for (i = 0; i < MAX_CPUS; i++) {
+        master->slave[i] = slavio_timer_init(base + (target_phys_addr_t)
+                                             (i * TARGET_PAGE_SIZE),
+                                             cpu_irqs[i], 0);
+    }
 }
Index: qemu/hw/sun4m.c
===================================================================
--- qemu.orig/hw/sun4m.c	2007-09-21 19:31:14.000000000 +0000
+++ qemu/hw/sun4m.c	2007-09-21 19:49:54.000000000 +0000
@@ -379,13 +379,10 @@
 
     nvram = m48t59_init(slavio_irq[0], hwdef->nvram_base, 0,
                         hwdef->nvram_size, 8);
-    for (i = 0; i < MAX_CPUS; i++) {
-        slavio_timer_init(hwdef->counter_base +
-                          (target_phys_addr_t)(i * TARGET_PAGE_SIZE),
-                           slavio_cpu_irq[i], 0);
-    }
-    slavio_timer_init(hwdef->counter_base + 0x10000ULL,
-                      slavio_irq[hwdef->clock1_irq], 2);
+
+    slavio_timer_init_all(hwdef->counter_base, slavio_irq[hwdef->clock1_irq],
+                          slavio_cpu_irq);
+
     slavio_serial_ms_kbd_init(hwdef->ms_kb_base, slavio_irq[hwdef->ms_kb_irq]);
     // Slavio TTYA (base+4, Linux ttyS0) is the first Qemu serial device
     // Slavio TTYB (base+0, Linux ttyS1) is the second Qemu serial device
Index: qemu/vl.h
===================================================================
--- qemu.orig/vl.h	2007-09-21 19:47:54.000000000 +0000
+++ qemu/vl.h	2007-09-21 19:48:26.000000000 +0000
@@ -1275,7 +1275,8 @@
 int load_uboot(const char *filename, target_ulong *ep, int *is_linux);
 
 /* slavio_timer.c */
-void slavio_timer_init(target_phys_addr_t addr, qemu_irq irq, int mode);
+void slavio_timer_init_all(target_phys_addr_t base, qemu_irq master_irq,
+                           qemu_irq *cpu_irqs);
 
 /* slavio_serial.c */
 SerialState *slavio_serial_init(target_phys_addr_t base, qemu_irq irq,

  reply	other threads:[~2007-09-21 20:19 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2007-09-21 14:22 [Qemu-devel] sparc32 counter/timer issues Robert Reif
2007-09-21 20:19 ` Blue Swirl [this message]
  -- strict thread matches above, loose matches on Subject: below --
2007-09-21 23:10 Robert Reif
2007-09-23 12:08 ` Blue Swirl

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=f43fc5580709211319g7191bbb2h34353ad25d6179e7@mail.gmail.com \
    --to=blauwirbel@gmail.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).