xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Jae-Min Ryu <jm77.ryu@samsung.com>
To: Jae-Min Ryu <jm77.ryu@samsung.com>,
	Lars Kurth <lars.kurth@citrix.com>,
	Ian Campbell <Ian.Campbell@citrix.com>,
	Stefano Stabellini <Stefano.Stabellini@eu.citrix.com>,
	"Keir (Xen.org)" <keir@xen.org>,
	Ian Jackson <Ian.Jackson@eu.citrix.com>,
	"xen-arm@lists.xensource.com" <xen-arm@lists.xensource.com>,
	"xen-devel@lists.xensource.com" <xen-devel@lists.xensource.com>
Cc: 서상범 <sbuk.suh@samsung.com>
Subject: [PATCH 04/14]  arm: implement xen init code
Date: Mon, 13 Feb 2012 07:56:36 +0000 (GMT)	[thread overview]
Message-ID: <0LZB0052INECEV60@mailout1.samsung.com> (raw)

[-- Attachment #1: Type: text/plain, Size: 6006 bytes --]

arm: implement xen init code

 xen/arch/arm/xen/cpu.c   |   84 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-------
 xen/arch/arm/xen/setup.c |  101 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------
 2 files changed, 167 insertions(+), 18 deletions(-)

Signed-off-by: Jaemin Ryu <jm77.ryu@samsung.com>

diff -r fb0815ba40a1 xen/arch/arm/xen/cpu.c
--- a/xen/arch/arm/xen/cpu.c	Fri Feb 03 16:26:49 2012 +0900
+++ b/xen/arch/arm/xen/cpu.c	Fri Feb 03 17:28:15 2012 +0900
@@ -28,6 +28,11 @@
 #include <xen/sched.h>
 #include <xen/preempt.h>
 #include <xen/percpu.h>
+#include <asm/mmu.h>
+#include <asm/current.h>
+#include <asm/delay.h>
+#include <asm/processor.h>
+#include <asm/delay.h>
 
 cpumask_t cpu_online_map;
 cpumask_t cpu_present_map;
@@ -46,7 +51,12 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t
 
 int __cpu_up(unsigned int cpu)
 {
-	NOT_YET();
+	int ret = 0;
+
+	while(!cpu_online(cpu)) {
+		cpu_relax();
+		process_pending_softirqs();
+	}
 
 	return 0;
 }
@@ -63,35 +73,93 @@ void __cpu_die(unsigned int cpu)
 
 void set_cpu_sibling_map(unsigned int cpu)
 {
-	NOT_YET();
+	unsigned int i;
+
+	for_each_present_cpu(i) {
+		cpumask_set_cpu(i, &per_cpu(cpu_sibling_mask, cpu));
+		cpumask_set_cpu(cpu, &per_cpu(cpu_sibling_mask, i));
+
+		cpumask_set_cpu(i, &per_cpu(cpu_core_mask, cpu));
+		cpumask_set_cpu(cpu, &per_cpu(cpu_core_mask, i));
+	}
 }
 
 void smp_prepare_cpus(unsigned int max_cpus)
 {
-	NOT_YET();
+	set_cpu_sibling_map(0);
 }
 
 void smp_prepare_boot_cpu(void)
 {
-	NOT_YET();
+	int cpu = smp_processor_id();
+
+	cpumask_set_cpu(cpu, &cpu_online_map);
+	cpumask_set_cpu(cpu, &cpu_present_map);
+	cpumask_set_cpu(cpu, &cpu_possible_map);
+
+	cpu_info_init(get_cpu_info());
 }
 
 asmlinkage void start_xen_on_slave_cpu(void)
 {
-	NOT_YET();
+	unsigned int cpu;
+	struct vcpu *v;	
+
+	cpu = smp_processor_id();
+
+        /* idle vcpu is allocated by scheduler_init() */
+        v = idle_vcpu[cpu];
+	set_current(idle_vcpu[cpu]);
+
+	set_cpu_sibling_map(cpu);
+
+	notify_cpu_starting(cpu);
+	wmb();
+
+	cpumask_set_cpu(cpu, &cpu_online_map);
+	wmb();
+
+	local_irq_enable();
+	local_fiq_enable();
+
+	startup_cpu_idle_loop();
 }
 
 void smp_send_event_check_mask(const cpumask_t *mask)
 {
-	NOT_YET();
+	int cpu;
+	unsigned long map = 0;
+
+	for_each_cpu(cpu, mask) {
+		map |= 1 << cpu;
+	}
+
+	/* Trigger remote CPU */
 }
 
 void smp_call_function(void (*f)(void *param), void *param, int wait)
 {
-	NOT_YET();
 }
 
 void smp_send_state_dump(unsigned int cpu)
 {
-	NOT_YET();
 }
+
+void cpu_topology_init(unsigned int cpus)
+{
+	int i;
+
+	if (cpus == 0) {
+		cpus = 1;
+	}
+
+	if (cpus > MAX_PHYS_CPUS) {
+		cpus = MAX_PHYS_CPUS;
+	}
+
+	for (i = 0; i < cpus; i++) {
+		cpumask_set_cpu(i, &cpu_possible_map);
+		cpumask_set_cpu(i, &cpu_present_map);
+	}
+}
+
diff -r fb0815ba40a1 xen/arch/arm/xen/setup.c
--- a/xen/arch/arm/xen/setup.c	Fri Feb 03 16:26:49 2012 +0900
+++ b/xen/arch/arm/xen/setup.c	Fri Feb 03 17:28:15 2012 +0900
@@ -30,35 +30,116 @@
 #include <xen/preempt.h>
 #include <public/version.h>
 #include <public/sched.h>
-
+#include <asm/mmu.h>
 
 struct domain _dom_xen = {
-        .refcnt = ATOMIC_INIT(1),
-        .domain_id = DOMID_XEN,
-        .domain_lock = SPIN_LOCK_UNLOCKED,
+	.refcnt = ATOMIC_INIT(1),
+	.domain_id = DOMID_XEN,
+	.domain_lock = SPIN_LOCK_UNLOCKED,
 };
 
 struct domain _dom_io = {
-        .refcnt = ATOMIC_INIT(1),
-        .domain_id = DOMID_IO,
-        .domain_lock = SPIN_LOCK_UNLOCKED,
+	.refcnt = ATOMIC_INIT(1),
+	.domain_id = DOMID_IO,
+	.domain_lock = SPIN_LOCK_UNLOCKED,
 };
 
 struct domain _dom_cow = {
-        .refcnt = ATOMIC_INIT(1),
-        .domain_id = DOMID_COW,
-        .domain_lock = SPIN_LOCK_UNLOCKED,
+	.refcnt = ATOMIC_INIT(1),
+	.domain_id = DOMID_COW,
+	.domain_lock = SPIN_LOCK_UNLOCKED,
 };
 
 struct domain *dom_xen = &_dom_xen;
 struct domain *dom_io = &_dom_io;
 struct domain *dom_cow = &_dom_cow;
 
+/* maxcpus: maximum number of CPUs to be activated */
+static unsigned int max_cpus = NR_CPUS;
+integer_param("maxcpus", max_cpus);
+
+/* Default domain size = 64MB */
+static unsigned int dom0_size = 256 * 1024 * 1024;
+integer_param("dom0_size", dom0_size);
+
+//static unsigned long dom0_image_start = 0x40B00000UL;
+static unsigned long dom0_image_start = 0x00B00000UL;
+integer_param("image_start", dom0_image_start);
+
+//static unsigned long dom0_image_size = 0xA00000UL;
+static unsigned long dom0_image_size = 0xA00000UL;
+integer_param("image_length", dom0_image_size);
+
 void arch_get_xen_caps(xen_capabilities_info_t *info)
 {
 }
 
+static void idle_domain_init(void)
+{
+	struct vcpu *v;
+
+	scheduler_init();
+
+	/* idle vcpu is allocated by scheduler_init() */
+	v = idle_vcpu[0];
+
+	set_current_vcpu(v);
+}
+
 asmlinkage void start_xen(void)
 {
+	unsigned int i;
+
+	smp_prepare_boot_cpu();
+
+	softirq_init();
+
+	tasklet_subsys_init();
+
+	timer_init();
+
+	idle_domain_init();
+
+	rcu_init();
+
+	local_irq_enable();
+
+	smp_prepare_cpus(max_cpus);
+
+	do_presmp_initcalls();
+
+	timekeeping_init();
+
+	for_each_present_cpu(i) {
+		if (num_online_cpus() < max_cpus && !cpu_online(i)) {
+			int ret = cpu_up(i);
+	
+			if (ret != 0) {
+				printk("Fail to bring up CPU %u (error %d)\n", i, ret);
+			}
+		}
+	}
+
+	printk("Brought up %ld CPUs\n", (long)num_online_cpus());
+
+	do_initcalls();
+
+	dom0 = domain_create(0, 0, 0);
+	if (dom0 == NULL) {
+		panic("Domain creation failed\n");
+	}
+
+
+	if (domain_construct(dom0,
+			     dom0_image_start, 
+			     dom0_image_size, 
+			     dom0_size, 
+			     max_cpus)) {
+		PANIC("Domain construction failed\n");
+	}
+
+	domain_unpause_by_systemcontroller(dom0);
+
+	startup_cpu_idle_loop();
 }
 

[-- Attachment #2: patch04.diff --]
[-- Type: application/octet-stream, Size: 5738 bytes --]

arm: implement xen init code

 xen/arch/arm/xen/cpu.c   |   84 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-------
 xen/arch/arm/xen/setup.c |  101 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------
 2 files changed, 167 insertions(+), 18 deletions(-)

Signed-off-by: Jaemin Ryu <jm77.ryu@samsung.com>

diff -r fb0815ba40a1 xen/arch/arm/xen/cpu.c
--- a/xen/arch/arm/xen/cpu.c	Fri Feb 03 16:26:49 2012 +0900
+++ b/xen/arch/arm/xen/cpu.c	Fri Feb 03 17:28:15 2012 +0900
@@ -28,6 +28,11 @@
 #include <xen/sched.h>
 #include <xen/preempt.h>
 #include <xen/percpu.h>
+#include <asm/mmu.h>
+#include <asm/current.h>
+#include <asm/delay.h>
+#include <asm/processor.h>
+#include <asm/delay.h>
 
 cpumask_t cpu_online_map;
 cpumask_t cpu_present_map;
@@ -46,7 +51,12 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t
 
 int __cpu_up(unsigned int cpu)
 {
-	NOT_YET();
+	int ret = 0;
+
+	while(!cpu_online(cpu)) {
+		cpu_relax();
+		process_pending_softirqs();
+	}
 
 	return 0;
 }
@@ -63,35 +73,93 @@ void __cpu_die(unsigned int cpu)
 
 void set_cpu_sibling_map(unsigned int cpu)
 {
-	NOT_YET();
+	unsigned int i;
+
+	for_each_present_cpu(i) {
+		cpumask_set_cpu(i, &per_cpu(cpu_sibling_mask, cpu));
+		cpumask_set_cpu(cpu, &per_cpu(cpu_sibling_mask, i));
+
+		cpumask_set_cpu(i, &per_cpu(cpu_core_mask, cpu));
+		cpumask_set_cpu(cpu, &per_cpu(cpu_core_mask, i));
+	}
 }
 
 void smp_prepare_cpus(unsigned int max_cpus)
 {
-	NOT_YET();
+	set_cpu_sibling_map(0);
 }
 
 void smp_prepare_boot_cpu(void)
 {
-	NOT_YET();
+	int cpu = smp_processor_id();
+
+	cpumask_set_cpu(cpu, &cpu_online_map);
+	cpumask_set_cpu(cpu, &cpu_present_map);
+	cpumask_set_cpu(cpu, &cpu_possible_map);
+
+	cpu_info_init(get_cpu_info());
 }
 
 asmlinkage void start_xen_on_slave_cpu(void)
 {
-	NOT_YET();
+	unsigned int cpu;
+	struct vcpu *v;	
+
+	cpu = smp_processor_id();
+
+        /* idle vcpu is allocated by scheduler_init() */
+        v = idle_vcpu[cpu];
+	set_current(idle_vcpu[cpu]);
+
+	set_cpu_sibling_map(cpu);
+
+	notify_cpu_starting(cpu);
+	wmb();
+
+	cpumask_set_cpu(cpu, &cpu_online_map);
+	wmb();
+
+	local_irq_enable();
+	local_fiq_enable();
+
+	startup_cpu_idle_loop();
 }
 
 void smp_send_event_check_mask(const cpumask_t *mask)
 {
-	NOT_YET();
+	int cpu;
+	unsigned long map = 0;
+
+	for_each_cpu(cpu, mask) {
+		map |= 1 << cpu;
+	}
+
+	/* Trigger remote CPU */
 }
 
 void smp_call_function(void (*f)(void *param), void *param, int wait)
 {
-	NOT_YET();
 }
 
 void smp_send_state_dump(unsigned int cpu)
 {
-	NOT_YET();
 }
+
+void cpu_topology_init(unsigned int cpus)
+{
+	int i;
+
+	if (cpus == 0) {
+		cpus = 1;
+	}
+
+	if (cpus > MAX_PHYS_CPUS) {
+		cpus = MAX_PHYS_CPUS;
+	}
+
+	for (i = 0; i < cpus; i++) {
+		cpumask_set_cpu(i, &cpu_possible_map);
+		cpumask_set_cpu(i, &cpu_present_map);
+	}
+}
+
diff -r fb0815ba40a1 xen/arch/arm/xen/setup.c
--- a/xen/arch/arm/xen/setup.c	Fri Feb 03 16:26:49 2012 +0900
+++ b/xen/arch/arm/xen/setup.c	Fri Feb 03 17:28:15 2012 +0900
@@ -30,35 +30,116 @@
 #include <xen/preempt.h>
 #include <public/version.h>
 #include <public/sched.h>
-
+#include <asm/mmu.h>
 
 struct domain _dom_xen = {
-        .refcnt = ATOMIC_INIT(1),
-        .domain_id = DOMID_XEN,
-        .domain_lock = SPIN_LOCK_UNLOCKED,
+	.refcnt = ATOMIC_INIT(1),
+	.domain_id = DOMID_XEN,
+	.domain_lock = SPIN_LOCK_UNLOCKED,
 };
 
 struct domain _dom_io = {
-        .refcnt = ATOMIC_INIT(1),
-        .domain_id = DOMID_IO,
-        .domain_lock = SPIN_LOCK_UNLOCKED,
+	.refcnt = ATOMIC_INIT(1),
+	.domain_id = DOMID_IO,
+	.domain_lock = SPIN_LOCK_UNLOCKED,
 };
 
 struct domain _dom_cow = {
-        .refcnt = ATOMIC_INIT(1),
-        .domain_id = DOMID_COW,
-        .domain_lock = SPIN_LOCK_UNLOCKED,
+	.refcnt = ATOMIC_INIT(1),
+	.domain_id = DOMID_COW,
+	.domain_lock = SPIN_LOCK_UNLOCKED,
 };
 
 struct domain *dom_xen = &_dom_xen;
 struct domain *dom_io = &_dom_io;
 struct domain *dom_cow = &_dom_cow;
 
+/* maxcpus: maximum number of CPUs to be activated */
+static unsigned int max_cpus = NR_CPUS;
+integer_param("maxcpus", max_cpus);
+
+/* Default domain size = 64MB */
+static unsigned int dom0_size = 256 * 1024 * 1024;
+integer_param("dom0_size", dom0_size);
+
+//static unsigned long dom0_image_start = 0x40B00000UL;
+static unsigned long dom0_image_start = 0x00B00000UL;
+integer_param("image_start", dom0_image_start);
+
+//static unsigned long dom0_image_size = 0xA00000UL;
+static unsigned long dom0_image_size = 0xA00000UL;
+integer_param("image_length", dom0_image_size);
+
 void arch_get_xen_caps(xen_capabilities_info_t *info)
 {
 }
 
+static void idle_domain_init(void)
+{
+	struct vcpu *v;
+
+	scheduler_init();
+
+	/* idle vcpu is allocated by scheduler_init() */
+	v = idle_vcpu[0];
+
+	set_current_vcpu(v);
+}
+
 asmlinkage void start_xen(void)
 {
+	unsigned int i;
+
+	smp_prepare_boot_cpu();
+
+	softirq_init();
+
+	tasklet_subsys_init();
+
+	timer_init();
+
+	idle_domain_init();
+
+	rcu_init();
+
+	local_irq_enable();
+
+	smp_prepare_cpus(max_cpus);
+
+	do_presmp_initcalls();
+
+	timekeeping_init();
+
+	for_each_present_cpu(i) {
+		if (num_online_cpus() < max_cpus && !cpu_online(i)) {
+			int ret = cpu_up(i);
+	
+			if (ret != 0) {
+				printk("Fail to bring up CPU %u (error %d)\n", i, ret);
+			}
+		}
+	}
+
+	printk("Brought up %ld CPUs\n", (long)num_online_cpus());
+
+	do_initcalls();
+
+	dom0 = domain_create(0, 0, 0);
+	if (dom0 == NULL) {
+		panic("Domain creation failed\n");
+	}
+
+
+	if (domain_construct(dom0,
+			     dom0_image_start, 
+			     dom0_image_size, 
+			     dom0_size, 
+			     max_cpus)) {
+		PANIC("Domain construction failed\n");
+	}
+
+	domain_unpause_by_systemcontroller(dom0);
+
+	startup_cpu_idle_loop();
 }
 

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

                 reply	other threads:[~2012-02-13  7:56 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=0LZB0052INECEV60@mailout1.samsung.com \
    --to=jm77.ryu@samsung.com \
    --cc=Ian.Campbell@citrix.com \
    --cc=Ian.Jackson@eu.citrix.com \
    --cc=Stefano.Stabellini@eu.citrix.com \
    --cc=keir@xen.org \
    --cc=lars.kurth@citrix.com \
    --cc=sbuk.suh@samsung.com \
    --cc=xen-arm@lists.xensource.com \
    --cc=xen-devel@lists.xensource.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).