xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: "Jan Beulich" <JBeulich@novell.com>
To: "xen-devel@lists.xensource.com" <xen-devel@lists.xensource.com>
Subject: [PATCH] eliminate unnecessary NR_CPUS-sized arrays from 't' key handler
Date: Thu, 08 Jul 2010 16:36:05 +0100	[thread overview]
Message-ID: <4C360C85020000780000A48D@vpn.id2.novell.com> (raw)

[-- Attachment #1: Type: text/plain, Size: 2961 bytes --]

Replace them with per-CPU data.

Signed-off-by: Jan Beulich <jbeulich@novell.com>

--- 2010-06-15.orig/xen/common/keyhandler.c	2010-06-11 11:41:35.000000000 +0200
+++ 2010-06-15/xen/common/keyhandler.c	2010-07-06 17:23:10.000000000 +0200
@@ -307,8 +307,8 @@ static struct keyhandler dump_domains_ke
 };
 
 static cpumask_t read_clocks_cpumask = CPU_MASK_NONE;
-static s_time_t read_clocks_time[NR_CPUS];
-static u64 read_cycles_time[NR_CPUS];
+static DEFINE_PER_CPU(s_time_t, read_clocks_time);
+static DEFINE_PER_CPU(u64, read_cycles_time);
 
 static void read_clocks_slave(void *unused)
 {
@@ -316,8 +316,8 @@ static void read_clocks_slave(void *unus
     local_irq_disable();
     while ( !cpu_isset(cpu, read_clocks_cpumask) )
         cpu_relax();
-    read_clocks_time[cpu] = NOW();
-    read_cycles_time[cpu] = get_cycles();
+    per_cpu(read_clocks_time, cpu) = NOW();
+    per_cpu(read_cycles_time, cpu) = get_cycles();
     cpu_clear(cpu, read_clocks_cpumask);
     local_irq_enable();
 }
@@ -339,8 +339,8 @@ static void read_clocks(unsigned char ke
 
     local_irq_disable();
     read_clocks_cpumask = cpu_online_map;
-    read_clocks_time[cpu] = NOW();
-    read_cycles_time[cpu] = get_cycles();
+    per_cpu(read_clocks_time, cpu) = NOW();
+    per_cpu(read_cycles_time, cpu) = get_cycles();
     cpu_clear(cpu, read_clocks_cpumask);
     local_irq_enable();
 
@@ -350,20 +350,24 @@ static void read_clocks(unsigned char ke
     min_stime_cpu = max_stime_cpu = min_cycles_cpu = max_cycles_cpu = cpu;
     for_each_online_cpu ( cpu )
     {
-        if ( read_clocks_time[cpu] < read_clocks_time[min_stime_cpu] )
+        if ( per_cpu(read_clocks_time, cpu) <
+             per_cpu(read_clocks_time, min_stime_cpu) )
             min_stime_cpu = cpu;
-        if ( read_clocks_time[cpu] > read_clocks_time[max_stime_cpu] )
+        if ( per_cpu(read_clocks_time, cpu) >
+             per_cpu(read_clocks_time, max_stime_cpu) )
             max_stime_cpu = cpu;
-        if ( read_cycles_time[cpu] < read_cycles_time[min_cycles_cpu] )
+        if ( per_cpu(read_cycles_time, cpu) <
+             per_cpu(read_cycles_time, min_cycles_cpu) )
             min_cycles_cpu = cpu;
-        if ( read_cycles_time[cpu] > read_cycles_time[max_cycles_cpu] )
+        if ( per_cpu(read_cycles_time, cpu) >
+             per_cpu(read_cycles_time, max_cycles_cpu) )
             max_cycles_cpu = cpu;
     }
 
-    min_stime = read_clocks_time[min_stime_cpu];
-    max_stime = read_clocks_time[max_stime_cpu];
-    min_cycles = read_cycles_time[min_cycles_cpu];
-    max_cycles = read_cycles_time[max_cycles_cpu];
+    min_stime = per_cpu(read_clocks_time, min_stime_cpu);
+    max_stime = per_cpu(read_clocks_time, max_stime_cpu);
+    min_cycles = per_cpu(read_cycles_time, min_cycles_cpu);
+    max_cycles = per_cpu(read_cycles_time, max_cycles_cpu);
 
     spin_unlock(&lock);
 




[-- Attachment #2: read-clocks-keyhandler-no-NR_CPUS.patch --]
[-- Type: text/plain, Size: 2955 bytes --]

Replace them with per-CPU data.

Signed-off-by: Jan Beulich <jbeulich@novell.com>

--- 2010-06-15.orig/xen/common/keyhandler.c	2010-06-11 11:41:35.000000000 +0200
+++ 2010-06-15/xen/common/keyhandler.c	2010-07-06 17:23:10.000000000 +0200
@@ -307,8 +307,8 @@ static struct keyhandler dump_domains_ke
 };
 
 static cpumask_t read_clocks_cpumask = CPU_MASK_NONE;
-static s_time_t read_clocks_time[NR_CPUS];
-static u64 read_cycles_time[NR_CPUS];
+static DEFINE_PER_CPU(s_time_t, read_clocks_time);
+static DEFINE_PER_CPU(u64, read_cycles_time);
 
 static void read_clocks_slave(void *unused)
 {
@@ -316,8 +316,8 @@ static void read_clocks_slave(void *unus
     local_irq_disable();
     while ( !cpu_isset(cpu, read_clocks_cpumask) )
         cpu_relax();
-    read_clocks_time[cpu] = NOW();
-    read_cycles_time[cpu] = get_cycles();
+    per_cpu(read_clocks_time, cpu) = NOW();
+    per_cpu(read_cycles_time, cpu) = get_cycles();
     cpu_clear(cpu, read_clocks_cpumask);
     local_irq_enable();
 }
@@ -339,8 +339,8 @@ static void read_clocks(unsigned char ke
 
     local_irq_disable();
     read_clocks_cpumask = cpu_online_map;
-    read_clocks_time[cpu] = NOW();
-    read_cycles_time[cpu] = get_cycles();
+    per_cpu(read_clocks_time, cpu) = NOW();
+    per_cpu(read_cycles_time, cpu) = get_cycles();
     cpu_clear(cpu, read_clocks_cpumask);
     local_irq_enable();
 
@@ -350,20 +350,24 @@ static void read_clocks(unsigned char ke
     min_stime_cpu = max_stime_cpu = min_cycles_cpu = max_cycles_cpu = cpu;
     for_each_online_cpu ( cpu )
     {
-        if ( read_clocks_time[cpu] < read_clocks_time[min_stime_cpu] )
+        if ( per_cpu(read_clocks_time, cpu) <
+             per_cpu(read_clocks_time, min_stime_cpu) )
             min_stime_cpu = cpu;
-        if ( read_clocks_time[cpu] > read_clocks_time[max_stime_cpu] )
+        if ( per_cpu(read_clocks_time, cpu) >
+             per_cpu(read_clocks_time, max_stime_cpu) )
             max_stime_cpu = cpu;
-        if ( read_cycles_time[cpu] < read_cycles_time[min_cycles_cpu] )
+        if ( per_cpu(read_cycles_time, cpu) <
+             per_cpu(read_cycles_time, min_cycles_cpu) )
             min_cycles_cpu = cpu;
-        if ( read_cycles_time[cpu] > read_cycles_time[max_cycles_cpu] )
+        if ( per_cpu(read_cycles_time, cpu) >
+             per_cpu(read_cycles_time, max_cycles_cpu) )
             max_cycles_cpu = cpu;
     }
 
-    min_stime = read_clocks_time[min_stime_cpu];
-    max_stime = read_clocks_time[max_stime_cpu];
-    min_cycles = read_cycles_time[min_cycles_cpu];
-    max_cycles = read_cycles_time[max_cycles_cpu];
+    min_stime = per_cpu(read_clocks_time, min_stime_cpu);
+    max_stime = per_cpu(read_clocks_time, max_stime_cpu);
+    min_cycles = per_cpu(read_cycles_time, min_cycles_cpu);
+    max_cycles = per_cpu(read_cycles_time, max_cycles_cpu);
 
     spin_unlock(&lock);
 

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

                 reply	other threads:[~2010-07-08 15:36 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=4C360C85020000780000A48D@vpn.id2.novell.com \
    --to=jbeulich@novell.com \
    --cc=xen-devel@lists.xensource.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).