virtualization.lists.linux-foundation.org archive mirror
 help / color / mirror / Atom feed
* [RFC/PATCH PV_OPS X86_64 01/17] paravirt_ops - core changes
       [not found] <20070308054422.820010000@redhat.com>
@ 2007-03-08  6:01 ` Steven Rostedt
  2007-03-08  6:01 ` [RFC/PATCH PV_OPS X86_64 02/17] paravirt_ops - msr Steven Rostedt
                   ` (15 subsequent siblings)
  16 siblings, 0 replies; 19+ messages in thread
From: Steven Rostedt @ 2007-03-08  6:01 UTC (permalink / raw)
  To: virtualization; +Cc: Chris Wright, Glauber de Oliveira Costa

plain text document attachment (xx-paravirt-core.patch)
Paravirt Ops core files.

Signed-off-by: Steven Rostedt srostedt@redhat.com
Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>



Index: clean-start/arch/x86_64/kernel/paravirt.c
===================================================================
--- /dev/null
+++ clean-start/arch/x86_64/kernel/paravirt.c
@@ -0,0 +1,504 @@
+/*  Paravirtualization interfaces
+    Copyright (C) 2007 Glauber de Oliveira Costa, Red Hat Inc.
+    Based on i386 work by Rusty Russell.
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+*/
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/efi.h>
+#include <linux/bcd.h>
+#include <linux/start_kernel.h>
+
+#include <asm/bug.h>
+#include <asm/paravirt.h>
+#include <asm/desc.h>
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/delay.h>
+#include <asm/fixmap.h>
+#include <asm/apic.h>
+#include <asm/tlbflush.h>
+#include <asm/msr.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/proto.h>
+#include <asm/time.h>
+#include <asm/e820.h>
+
+/* nop stub */
+void native_nop(void)
+{
+}
+
+static void __init default_banner(void)
+{
+	printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
+	       paravirt_ops.name);
+}
+
+void memory_setup(void)
+{
+	paravirt_ops.memory_setup();
+}
+
+void syscall_init(void)
+{
+	paravirt_ops.syscall_init();
+}
+
+/* Simple instruction patching code. */
+#define DEF_NATIVE(name, code)					\
+	extern const char start_##name[], end_##name[];		\
+	asm("start_" #name ": " code "; end_" #name ":")
+DEF_NATIVE(cli, "cli");
+DEF_NATIVE(sti, "sti");
+/* We push rdi , and pop in rda. This is due to x86_64 calling conventions
+ * Recall that we are patching a function call */
+DEF_NATIVE(popfq, "pushq %rdi; popfq");
+DEF_NATIVE(pushfq, "pushfq; popq %rax");
+DEF_NATIVE(pushfq_cli, "pushfq; popq %rax; cli");
+DEF_NATIVE(iret, "iretq");
+DEF_NATIVE(sysretq, "sysretq");
+DEF_NATIVE(swapgs, "swapgs");
+
+static const struct native_insns
+{
+	const char *start, *end;
+} native_insns[] = {
+	[PARAVIRT_IRQ_DISABLE] = { start_cli, end_cli },
+	[PARAVIRT_IRQ_ENABLE] = { start_sti, end_sti },
+	[PARAVIRT_RESTORE_FLAGS] = { start_popfq, end_popfq },
+	[PARAVIRT_SAVE_FLAGS] = { start_pushfq, end_pushfq },
+	[PARAVIRT_SAVE_FLAGS_IRQ_DISABLE] = { start_pushfq_cli, end_pushfq_cli },
+	[PARAVIRT_INTERRUPT_RETURN] = { start_iret, end_iret },
+	[PARAVIRT_SYSRETQ] = { start_sysretq, end_sysretq },
+	[PARAVIRT_SWAPGS] = { start_swapgs, end_swapgs },
+};
+
+static unsigned native_patch(u8 type, u16 clobbers, void *insns, unsigned len)
+{
+	unsigned int insn_len;
+
+	/* Don't touch it if we don't have a replacement */
+	if (type >= ARRAY_SIZE(native_insns) || !native_insns[type].start)
+		return len;
+
+	insn_len = native_insns[type].end - native_insns[type].start;
+
+	/* Similarly if we can't fit replacement. */
+	if (len < insn_len)
+		return len;
+
+	memcpy(insns, native_insns[type].start, insn_len);
+	return insn_len;
+}
+
+static unsigned long native_get_debugreg(int regno)
+{
+	unsigned long val = 0; 	/* Damn you, gcc! */
+
+	switch (regno) {
+	case 0:
+		asm("movq %%db0, %0" :"=r" (val)); break;
+	case 1:
+		asm("movq %%db1, %0" :"=r" (val)); break;
+	case 2:
+		asm("movq %%db2, %0" :"=r" (val)); break;
+	case 3:
+		asm("movq %%db3, %0" :"=r" (val)); break;
+	case 6:
+		asm("movq %%db6, %0" :"=r" (val)); break;
+	case 7:
+		asm("movq %%db7, %0" :"=r" (val)); break;
+	default:
+		BUG();
+	}
+	return val;
+}
+
+static void native_set_debugreg(int regno, unsigned long value)
+{
+	switch (regno) {
+	case 0:
+		asm("movq %0,%%db0"	: /* no output */ :"r" (value));
+		break;
+	case 1:
+		asm("movq %0,%%db1"	: /* no output */ :"r" (value));
+		break;
+	case 2:
+		asm("movq %0,%%db2"	: /* no output */ :"r" (value));
+		break;
+	case 3:
+		asm("movq %0,%%db3"	: /* no output */ :"r" (value));
+		break;
+	case 6:
+		asm("movq %0,%%db6"	: /* no output */ :"r" (value));
+		break;
+	case 7:
+		asm("movq %0,%%db7"	: /* no output */ :"r" (value));
+		break;
+	default:
+		BUG();
+	}
+}
+
+void init_IRQ(void)
+{
+	paravirt_ops.init_IRQ();
+}
+
+static unsigned long native_save_fl(void)
+{
+	unsigned long f;
+	asm volatile("pushfq ; popq %0":"=g" (f): /* no input */);
+	return f;
+}
+
+static void native_restore_fl(unsigned long f)
+{
+	asm volatile("pushq %0 ; popfq": /* no output */
+			     :"g" (f)
+			     :"memory", "cc");
+}
+
+static void native_irq_disable(void)
+{
+	asm volatile("cli": : :"memory");
+}
+
+static void native_irq_enable(void)
+{
+	asm volatile("sti": : :"memory");
+}
+
+static void native_safe_halt(void)
+{
+	asm volatile("sti; hlt": : :"memory");
+}
+
+static void native_halt(void)
+{
+	asm volatile("hlt": : :"memory");
+}
+
+static u64 native_read_tsc(void)
+{
+	unsigned long a, b;
+	asm volatile("rdtsc" : "=a" (a), "=d" (b));
+	return a | (b << 32);
+}
+
+static u64 native_read_tscp(int *aux)
+{
+	u64 a, b;
+	asm volatile ("rdtscp" : "=a" (a), "=b" (b), "=c" (aux));
+	return a | (b << 32);
+}
+
+static u64 native_read_pmc(void)
+{
+	unsigned long a, b;
+	asm volatile("rdpmc" : "=a" (a), "=b" (b));
+	return a | (b << 32);
+}
+
+static void native_store_gdt(struct desc_ptr *dtr)
+{
+	asm ("sgdt %w0":"=m" (*dtr));
+}
+
+static void native_store_idt(struct desc_ptr *dtr)
+{
+	asm ("sidt %w0":"=m" (*dtr));
+}
+
+static unsigned long native_store_tr(void)
+{
+	unsigned long tr;
+	asm ("str %w0":"=r" (tr));
+	return tr;
+}
+
+static void native_load_tls(struct thread_struct *t, unsigned int cpu)
+{
+	u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN);
+	gdt[0] = t->tls_array[0];
+	gdt[1] = t->tls_array[1];
+	gdt[2] = t->tls_array[2];
+}
+
+static inline void native_write_dt_entry(void *dt, int entry, u32 entry_low, u32 entry_high)
+{
+	u32 *lp = (u32 *)((char *)dt + entry*8);
+	lp[0] = entry_low;
+	lp[1] = entry_high;
+}
+
+static void native_write_ldt_entry(void *dt, int entrynum, u32 low, u32 high)
+{
+	native_write_dt_entry(dt, entrynum, low, high);
+}
+
+static void native_write_gdt_entry(void *dt, int entrynum, u32 low, u32 high)
+{
+	native_write_dt_entry(dt, entrynum, low, high);
+}
+
+static void native_write_idt_entry(void *dt, int entrynum, u32 low, u32 high)
+{
+	native_write_dt_entry(dt, entrynum, low, high);
+}
+
+static void native_load_rsp0(struct tss_struct *tss,
+				      struct thread_struct *thread)
+{
+	tss->rsp0 = thread->rsp0;
+}
+
+static void native_io_delay(void)
+{
+	asm volatile("outb %al,$0x80");
+}
+
+void native_pagetable_setup_start(pgd_t *base)
+{
+	int i;
+
+	/*
+	 * Init entries of the first-level page table to the
+	 * zero page, if they haven't already been set up.
+	 *
+	 * In a normal native boot, we'll be running on a
+	 * pagetable rooted in swapper_pg_dir, but not in PAE
+	 * mode, so this will end up clobbering the mappings
+	 * for the lower 24Mbytes of the address space,
+	 * without affecting the kernel address space.
+	 */
+	for (i = 0; i < USER_PTRS_PER_PGD; i++)
+		set_pgd(&base[i],
+			__pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
+	memset(&base[USER_PTRS_PER_PGD], 0, sizeof(pgd_t));
+}
+
+void native_pagetable_setup_done(pgd_t *base)
+{
+	/*
+	 * Add low memory identity-mappings - SMP needs it when
+	 * starting up on an AP from real-mode. In the non-PAE
+	 * case we already have these mappings through head.S.
+	 * All user-space mappings are explicitly cleared after
+	 * SMP startup.
+	 */
+	set_pgd(&base[0], base[USER_PTRS_PER_PGD]);
+}
+
+
+static void native_flush_tlb(void)
+{
+	__native_flush_tlb();
+}
+
+/*
+ * Global pages have to be flushed a bit differently. Not a real
+ * performance problem because this does not happen often.
+ */
+static void native_flush_tlb_all(void)
+{
+	__native_flush_tlb_all();
+}
+
+static void native_flush_tlb_one(u64 addr)
+{
+	__native_flush_tlb_one(addr);
+}
+
+pte_t native_make_pte(unsigned long pte)
+{
+	return (pte_t){ pte };
+}
+
+pud_t native_make_pud(unsigned long pud)
+{
+	return (pud_t){ pud };
+}
+
+pmd_t native_make_pmd(unsigned long pmd)
+{
+	return (pmd_t){ pmd };
+}
+
+pgd_t native_make_pgd(unsigned long pgd)
+{
+	return (pgd_t){ pgd };
+}
+
+pte_t native_ptep_get_and_clear(struct mm_struct *mm, u64 addr,
+					 pte_t *ptep)
+{
+	return __pte(xchg(&(ptep)->pte, 0));
+}
+
+void native_set_pte_at(struct mm_struct *mm, u64 addr, pte_t *ptep,
+				pte_t pteval)
+{
+	native_set_pte(ptep,pteval);
+}
+
+void native_pte_clear(struct mm_struct *mm, u64 addr, pte_t *ptep)
+{
+	native_set_pte_at(mm,addr,ptep,__pte(0));
+}
+
+void native_pmd_clear(pmd_t *pmd)
+{
+	native_set_pmd(pmd,__pmd(0));
+}
+
+void native_swapgs(unsigned long rip)
+{
+	asm volatile ("swapgs" :: :"memory" );
+}
+
+/* These are in entry.S */
+extern void native_iret(void);
+extern void native_sysret(void);
+
+static int __init print_banner(void)
+{
+	paravirt_ops.banner();
+	return 0;
+}
+core_initcall(print_banner);
+
+/* We simply declare start_kernel to be the paravirt probe of last resort. */
+paravirt_probe_failsafe(start_kernel);
+
+extern unsigned long __vsyscall_0;
+struct paravirt_ops paravirt_ops = {
+	.name = "bare hardware",
+	.mem_type = "BIOS-e820",
+	.paravirt_enabled = 0,
+	.pgd_alignment = sizeof(pgd_t) * PTRS_PER_PGD,
+
+	.vsyscall_page = &__vsyscall_0,
+ 	.patch = native_patch,
+	.banner = default_banner,
+	.arch_setup = native_nop,
+	.memory_setup = setup_memory_region,
+	.syscall_init = x86_64_syscall_init,
+	.get_wallclock = do_get_cmos_time,
+	.set_wallclock = do_set_rtc_mmss,
+	.time_init = time_init_hook,
+	.init_IRQ = native_init_IRQ,
+
+	.cpuid = native_cpuid,
+	.get_debugreg = native_get_debugreg,
+	.set_debugreg = native_set_debugreg,
+	.clts = native_clts,
+	.read_cr0 = native_read_cr0,
+	.write_cr0 = native_write_cr0,
+	.read_cr2 = native_read_cr2,
+	.write_cr2 = native_write_cr2,
+	.read_cr3 = native_read_cr3,
+	.write_cr3 = native_write_cr3,
+	.read_cr4 = native_read_cr4,
+	.write_cr4 = native_write_cr4,
+	.save_fl = native_save_fl,
+	.restore_fl = native_restore_fl,
+	.irq_disable = native_irq_disable,
+	.irq_enable = native_irq_enable,
+	.safe_halt = native_safe_halt,
+	.halt = native_halt,
+	.wbinvd = native_wbinvd,
+	.read_msr = native_read_msr_safe,
+	.write_msr = native_write_msr_safe,
+	.read_tsc = native_read_tsc,
+	.read_tscp = native_read_tscp,
+	.read_pmc = native_read_pmc,
+	.load_tr_desc = native_load_tr_desc,
+	.set_ldt = native_set_ldt,
+	.load_gdt = native_load_gdt,
+	.load_idt = native_load_idt,
+	.store_gdt = native_store_gdt,
+	.store_idt = native_store_idt,
+	.store_tr = native_store_tr,
+	.load_tls = native_load_tls,
+	.write_ldt_entry = native_write_ldt_entry,
+	.write_gdt_entry = native_write_gdt_entry,
+	.write_idt_entry = native_write_idt_entry,
+	.load_rsp0 = native_load_rsp0,
+
+	.io_delay = native_io_delay,
+	.const_udelay = __const_udelay,
+
+#ifdef CONFIG_X86_LOCAL_APIC
+	.apic_write = native_apic_write,
+	.apic_read = native_apic_read,
+#endif
+	.set_lazy_mode = (void *)native_nop,
+	.ebda_info = native_ebda_info,
+
+	.pagetable_setup_start = native_pagetable_setup_start,
+	.pagetable_setup_done = native_pagetable_setup_done,
+
+	.flush_tlb_user = native_flush_tlb,
+	.flush_tlb_kernel = native_flush_tlb_all,
+	.flush_tlb_single = native_flush_tlb_one,
+
+	.alloc_pt = (void *)native_nop,
+	.alloc_pd = (void *)native_nop,
+	.alloc_pd_clone = (void *)native_nop,
+	.release_pt = (void *)native_nop,
+	.release_pd = (void *)native_nop,
+
+	.set_pte = native_set_pte,
+	.set_pte_at = native_set_pte_at,
+	.set_pmd = native_set_pmd,
+	.set_pud = native_set_pud,
+	.set_pgd = native_set_pgd,
+
+	.pte_update = (void *)native_nop,
+	.pte_update_defer = (void *)native_nop,
+
+	.ptep_get_and_clear = native_ptep_get_and_clear,
+
+	.pte_clear = native_pte_clear,
+	.pmd_clear = native_pmd_clear,
+	.pud_clear = native_pud_clear,
+	.pgd_clear = native_pgd_clear,
+
+	.pte_val = native_pte_val,
+	.pud_val = native_pud_val,
+	.pmd_val = native_pmd_val,
+	.pgd_val = native_pgd_val,
+
+	.make_pte = native_make_pte,
+	.make_pmd = native_make_pmd,
+	.make_pud = native_make_pud,
+	.make_pgd = native_make_pgd,
+
+	.swapgs = native_swapgs,
+	.sysret = native_sysret,
+	.iret = native_iret,
+
+	.dup_mmap = (void *)native_nop,
+	.exit_mmap = (void *)native_nop,
+	.activate_mm = (void *)native_nop,
+
+	.startup_ipi_hook = (void *)native_nop,
+};
+
+EXPORT_SYMBOL(paravirt_ops);
Index: clean-start/include/asm-x86_64/paravirt.h
===================================================================
--- /dev/null
+++ clean-start/include/asm-x86_64/paravirt.h
@@ -0,0 +1,678 @@
+#ifndef __ASM_PARAVIRT_H
+#define __ASM_PARAVIRT_H
+/* Various instructions on x86 need to be replaced for
+ * para-virtualization: those hooks are defined here. */
+#include <linux/linkage.h>
+#include <linux/stringify.h>
+#include <asm/page.h>
+#include <asm/types.h>
+#include <asm/pda.h>
+
+#ifdef CONFIG_PARAVIRT
+/* These are the most performance critical ops, so we want to be able to patch
+ * callers */
+#define PARAVIRT_IRQ_DISABLE 0
+#define PARAVIRT_IRQ_ENABLE 1
+#define PARAVIRT_RESTORE_FLAGS 2
+#define PARAVIRT_SAVE_FLAGS 3
+#define PARAVIRT_SAVE_FLAGS_IRQ_DISABLE 4
+#define PARAVIRT_INTERRUPT_RETURN 5
+#define PARAVIRT_SYSRETQ 6
+#define PARAVIRT_SWAPGS	7
+
+/* Bitmask of what can be clobbered: usually at least rax. */
+#define CLBR_NONE 0x0
+#define CLBR_RAX 0x1
+#define CLBR_RCX 0x2
+#define CLBR_RDX 0x4
+#define CLBR_ANY 0xf
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+struct thread_struct;
+struct desc_struct;
+struct desc_ptr;
+struct tss_struct;
+struct mm_struct;
+
+struct paravirt_ops
+{
+	int paravirt_enabled;
+
+	int pgd_alignment;
+
+	const char *name;
+	char *mem_type;
+
+	unsigned long *vsyscall_page;
+
+	/*
+	 * Patch may replace one of the defined code sequences with arbitrary
+	 * code, subject to the same register constraints.  This generally
+	 * means the code is not free to clobber any registers other than RAX.
+	 * The patch function should return the number of bytes of code
+	 * generated, as we nop pad the rest in generic code.
+	 */
+	unsigned (*patch)(u8 type, u16 clobber, void *firstinsn, unsigned len);
+
+	void (*arch_setup)(void);
+	void (*memory_setup)(void);
+	void (*init_IRQ)(void);
+	/* entry point for our hypervisor syscall handler */
+	void (*syscall_init)(void);
+
+	void (*pagetable_setup_start)(pgd_t *pgd_base);
+	void (*pagetable_setup_done)(pgd_t *pgd_base);
+
+	void (*banner)(void);
+
+	unsigned long (*get_wallclock)(void);
+	void (*set_wallclock)(unsigned long);
+	void (*time_init)(void);
+
+	void (*cpuid)(unsigned int *eax, unsigned int *ebx,
+		      unsigned int *ecx, unsigned int *edx);
+
+	unsigned long (*get_debugreg)(int regno);
+	void (*set_debugreg)(int regno, unsigned long value);
+
+	void (*clts)(void);
+
+	unsigned long (*read_cr0)(void);
+	void (*write_cr0)(unsigned long);
+
+	unsigned long (*read_cr2)(void);
+	void (*write_cr2)(unsigned long);
+
+	unsigned long (*read_cr3)(void);
+	void (*write_cr3)(unsigned long);
+
+	unsigned long (*read_cr4)(void);
+	void (*write_cr4)(unsigned long);
+
+	unsigned long (*save_fl)(void);
+	void (*restore_fl)(unsigned long);
+	void (*irq_disable)(void);
+	void (*irq_enable)(void);
+
+	void (*safe_halt)(void);
+	void (*halt)(void);
+	void (*wbinvd)(void);
+
+	/* err = 0/-EFAULT.  wrmsr returns 0/-EFAULT. */
+	unsigned long (*read_msr)(unsigned int msr, int *err);
+	int (*write_msr)(unsigned int msr, unsigned long val);
+
+	u64 (*read_tsc)(void);
+	u64 (*read_tscp)(int *aux);
+	u64 (*read_pmc)(void);
+
+	void (*load_tr_desc)(void);
+	void (*load_gdt)(const struct desc_ptr *);
+	void (*load_idt)(const struct desc_ptr *);
+	void (*store_gdt)(struct desc_ptr *);
+	void (*store_idt)(struct desc_ptr *);
+	void (*set_ldt)(const void *desc, unsigned entries);
+	unsigned long (*store_tr)(void);
+	void (*load_tls)(struct thread_struct *t, unsigned int cpu);
+	void (*write_ldt_entry)(void *dt, int entrynum,
+					 u32 low, u32 high);
+	void (*write_gdt_entry)(void *dt, int entrynum,
+					 u32 low, u32 high);
+	void (*write_idt_entry)(void *dt, int entrynum,
+					 u32 low, u32 high);
+	void (*load_rsp0)(struct tss_struct *tss,
+				   struct thread_struct *thread);
+
+	void (*io_delay)(void);
+	void (*const_udelay)(unsigned long loops);
+
+	void (*activate_mm)(struct mm_struct *prev,
+				     struct mm_struct *next);
+	void (*dup_mmap)(struct mm_struct *oldmm,
+				  struct mm_struct *mm);
+	void (*exit_mmap)(struct mm_struct *mm);
+
+#ifdef CONFIG_X86_LOCAL_APIC
+	void (*apic_write)(unsigned long reg, unsigned int v);
+	unsigned int (*apic_read)(unsigned long reg);
+#endif
+
+	void (*flush_tlb_user)(void);
+	void (*flush_tlb_kernel)(void);
+	void (*flush_tlb_single)(u64 addr);
+
+	void (*alloc_pt)(u64 pfn);
+	void (*alloc_pd)(u64 pfn);
+	void (*alloc_pd_clone)(u64 pfn, u64 clonepfn, u64 start, u64 count);
+	void (*release_pt)(u64 pfn);
+	void (*release_pd)(u64 pfn);
+
+	void (*set_pte)(pte_t *ptep, pte_t pteval);
+	void (*set_pte_at)(struct mm_struct *mm, u64 addr, pte_t *ptep, pte_t pteval);
+	void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
+	void (*set_pud)(pud_t *pudp, pud_t pudval);
+	void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval);
+
+	void (*pte_update)(struct mm_struct *mm, u64 addr, pte_t *ptep);
+	void (*pte_update_defer)(struct mm_struct *mm, u64 addr, pte_t *ptep);
+
+	pte_t (*ptep_get_and_clear)(struct mm_struct *mm, u64 addr, pte_t *ptep);
+
+	void (*pte_clear)(struct mm_struct *mm, u64 addr, pte_t *ptep);
+	void (*pmd_clear)(pmd_t *pmdp);
+	void (*pud_clear)(pud_t *pudp);
+	void (*pgd_clear)(pgd_t *pgdp);
+
+	unsigned long (*pte_val)(pte_t);
+	unsigned long (*pud_val)(pud_t);
+	unsigned long (*pmd_val)(pmd_t);
+	unsigned long (*pgd_val)(pgd_t);
+
+	pte_t (*make_pte)(unsigned long pte);
+	pud_t (*make_pud)(unsigned long pud);
+	pmd_t (*make_pmd)(unsigned long pmd);
+	pgd_t (*make_pgd)(unsigned long pgd);
+
+	void (*swapgs)(unsigned long rip);
+	void (*ebda_info)(unsigned *addr, unsigned *size);
+	void (*set_lazy_mode)(int mode);
+
+	/* These two are jmp to, not actually called. */
+	void (*sysret)(void);
+	void (*iret)(void);
+
+	void (*startup_ipi_hook)(int phys_apicid, unsigned long start_eip, unsigned long start_esp);
+};
+
+#define MAP_TYPE_STR paravirt_ops.mem_type
+
+/* Mark a paravirt probe function. */
+#define paravirt_probe(fn)						\
+ static asmlinkage void (*__paravirtprobe_##fn)(void) __attribute_used__ \
+		__attribute__((__section__(".paravirtprobe"))) = fn
+
+#define paravirt_probe_failsafe(fn)						\
+ static asmlinkage void (*__paravirtprobe_##fn)(void) __attribute_used__ \
+		__attribute__((__section__(".paravirtprobe_failsafe"))) = fn
+extern struct paravirt_ops paravirt_ops;
+
+void native_pagetable_setup_start(pgd_t *pgd);
+
+pte_t native_make_pte(unsigned long pte);
+pud_t native_make_pud(unsigned long pud);
+pmd_t native_make_pmd(unsigned long pmd);
+pgd_t native_make_pgd(unsigned long pgd);
+
+#define paravirt_enabled() (paravirt_ops.paravirt_enabled)
+
+static inline void load_rsp0(struct tss_struct *tss,
+			     struct thread_struct *thread)
+{
+	paravirt_ops.load_rsp0(tss, thread);
+}
+
+#define ARCH_SETUP			paravirt_ops.arch_setup();
+
+static inline unsigned long get_wallclock(void)
+{
+	return paravirt_ops.get_wallclock();
+}
+
+static inline void set_wallclock(unsigned long nowtime)
+{
+	paravirt_ops.set_wallclock(nowtime);
+}
+
+static inline void do_time_init(void)
+{
+	return paravirt_ops.time_init();
+}
+
+/* The paravirtualized CPUID instruction. */
+static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
+			   unsigned int *ecx, unsigned int *edx)
+{
+	paravirt_ops.cpuid(eax, ebx, ecx, edx);
+}
+
+/*
+ * These special macros can be used to get or set a debugging register
+ */
+#define get_debugreg(var, reg) var = paravirt_ops.get_debugreg(reg)
+#define set_debugreg(val, reg) paravirt_ops.set_debugreg(reg, val)
+
+#define clts() paravirt_ops.clts()
+
+#define read_cr0() paravirt_ops.read_cr0()
+#define write_cr0(x) paravirt_ops.write_cr0(x)
+
+#define read_cr2() paravirt_ops.read_cr2()
+#define write_cr2(x) paravirt_ops.write_cr2(x)
+
+#define read_cr3() paravirt_ops.read_cr3()
+#define write_cr3(x) paravirt_ops.write_cr3(x)
+
+#define read_cr4() paravirt_ops.read_cr4()
+#define write_cr4(x) paravirt_ops.write_cr4(x)
+
+
+#define ptep_get_and_clear(mm,addr,xp)	\
+			(paravirt_ops.ptep_get_and_clear(mm,addr,xp))
+
+static inline void raw_safe_halt(void)
+{
+	paravirt_ops.safe_halt();
+}
+
+static inline void halt(void)
+{
+	paravirt_ops.safe_halt();
+}
+#define wbinvd() paravirt_ops.wbinvd()
+
+
+#define rdmsr(msr,val1,val2) do {				\
+	int _err;						\
+	u64 _l = paravirt_ops.read_msr(msr,&_err);		\
+	val1 = (u32)_l;						\
+	val2 = _l >> 32;					\
+} while(0)
+
+/* rdmsr with exception handling */
+#define rdmsr_safe(msr,a,b) ({					\
+	int _err;						\
+	u64 _l = paravirt_ops.read_msr(msr,&_err);		\
+	(*a) = (u32)_l;						\
+	(*b) = _l >> 32;					\
+	_err; })
+
+#define wrmsr(msr,val1,val2) do {				\
+	u64 _l = ((u64)(val2) << 32) | (val1);			\
+	paravirt_ops.write_msr((msr), _l);			\
+} while(0)
+
+#define rdmsrl(msr,val) do {					\
+	int _err;						\
+	val = paravirt_ops.read_msr((msr),&_err);		\
+} while(0)
+
+#define checking_wrmsrl(msr,val) (paravirt_ops.write_msr((msr),(val)))
+
+#define wrmsrl(msr,val) (paravirt_ops.write_msr((msr),(val)))
+
+#define wrmsr_safe(msr,a,b) ({					\
+	u64 _l = ((u64)(b) << 32) | (a);			\
+	paravirt_ops.write_msr((msr),_l);			\
+})
+
+#define rdtsc(low,high) do {					\
+	u64 _l = paravirt_ops.read_tsc();			\
+	low = (u32)_l;						\
+	high = _l >> 32;					\
+} while(0)
+
+#define rdtscl(low) do {					\
+	u64 _l = paravirt_ops.read_tsc();			\
+	low = (int)_l;						\
+} while(0)
+
+#define rdtscll(val) (val = paravirt_ops.read_tsc())
+
+#define rdtscp(low,high,aux) do {				\
+	u64 _val = paravirt_ops.read_tscp(&aux);		\
+	low = (int)_val;					\
+	high = _val >> 32;					\
+} while (0)
+
+#define rdtscpll(val, aux) (val) = paravirt_ops.read_tscp(&aux)
+
+#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
+
+#define rdpmc(counter,low,high) do {				\
+	u64 _l = paravirt_ops.read_pmc();			\
+	low = (u32)_l;						\
+	high = _l >> 32;					\
+} while(0)
+
+#define load_TR_desc() (paravirt_ops.load_tr_desc())
+#define load_gdt(dtr) (paravirt_ops.load_gdt(dtr))
+#define load_idt(dtr) (paravirt_ops.load_idt(dtr))
+#define set_ldt(addr, entries) (paravirt_ops.set_ldt((addr), (entries)))
+#define store_gdt(dtr) (paravirt_ops.store_gdt(dtr))
+#define store_idt(dtr) (paravirt_ops.store_idt(dtr))
+#define store_tr(tr) ((tr) = paravirt_ops.store_tr())
+#define load_TLS(t,cpu) (paravirt_ops.load_tls((t),(cpu)))
+#define write_ldt_entry(dt, entry, low, high)				\
+	(paravirt_ops.write_ldt_entry((dt), (entry), (low), (high)))
+#define write_gdt_entry(dt, entry, low, high)				\
+	(paravirt_ops.write_gdt_entry((dt), (entry), (low), (high)))
+#define write_idt_entry(dt, entry, low, high)				\
+	(paravirt_ops.write_idt_entry((dt), (entry), (low), (high)))
+
+#define __pte(x)	paravirt_ops.make_pte(x)
+#define pte_val(x)	paravirt_ops.pte_val(x)
+
+#define __pgd(x)	paravirt_ops.make_pgd(x)
+#define pgd_val(x)	paravirt_ops.pgd_val(x)
+
+#define __pud(x)	paravirt_ops.make_pud(x)
+#define pud_val(x)	paravirt_ops.pud_val(x)
+
+#define __pmd(x)	paravirt_ops.make_pmd(x)
+#define pmd_val(x)	paravirt_ops.pmd_val(x)
+
+#define ebda_info(addr,size) paravirt_ops.ebda_info(addr,size)
+
+/* The paravirtualized I/O functions */
+static inline void slow_down_io(void) {
+	paravirt_ops.io_delay();
+#ifdef REALLY_SLOW_IO
+	paravirt_ops.io_delay();
+	paravirt_ops.io_delay();
+	paravirt_ops.io_delay();
+#endif
+}
+
+#ifdef CONFIG_X86_LOCAL_APIC
+/*
+ * Basic functions accessing APICs.
+ */
+static inline void apic_write(unsigned long reg, unsigned long v)
+{
+	paravirt_ops.apic_write(reg,v);
+}
+
+static inline unsigned long apic_read(unsigned long reg)
+{
+	return paravirt_ops.apic_read(reg);
+}
+#endif
+
+static inline void paravirt_pagetable_setup_start(pgd_t *base)
+{
+	if (paravirt_ops.pagetable_setup_start)
+		(*paravirt_ops.pagetable_setup_start)(base);
+}
+
+static inline void paravirt_pagetable_setup_done(pgd_t *base)
+{
+	if (paravirt_ops.pagetable_setup_done)
+		(*paravirt_ops.pagetable_setup_done)(base);
+}
+
+void native_pte_clear(struct mm_struct *mm, u64 addr, pte_t *ptep);
+void native_pmd_clear(pmd_t *pmd);
+void native_nop(void);
+
+static inline void paravirt_activate_mm(struct mm_struct *prev,
+					struct mm_struct *next)
+{
+	paravirt_ops.activate_mm(prev, next);
+}
+
+static inline void paravirt_dup_mmap(struct mm_struct *oldmm,
+				     struct mm_struct *mm)
+{
+	paravirt_ops.dup_mmap(oldmm, mm);
+}
+
+static inline void paravirt_exit_mmap(struct mm_struct *mm)
+{
+	paravirt_ops.exit_mmap(mm);
+}
+
+#define __flush_tlb() paravirt_ops.flush_tlb_user()
+#define __flush_tlb_all() paravirt_ops.flush_tlb_kernel()
+#define __flush_tlb_one(addr) paravirt_ops.flush_tlb_single(addr)
+
+#define paravirt_alloc_pt(pfn) paravirt_ops.alloc_pt(pfn)
+#define paravirt_release_pt(pfn) paravirt_ops.release_pt(pfn)
+
+#define paravirt_alloc_pd(pfn) paravirt_ops.alloc_pd(pfn)
+#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) \
+	paravirt_ops.alloc_pd_clone(pfn, clonepfn, start, count)
+#define paravirt_release_pd(pfn) paravirt_ops.release_pd(pfn)
+
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+	paravirt_ops.set_pte(ptep, pteval);
+}
+
+static inline void set_pte_at(struct mm_struct *mm, u64 addr, pte_t *ptep, pte_t pteval)
+{
+	paravirt_ops.set_pte_at(mm, addr, ptep, pteval);
+}
+
+static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+	paravirt_ops.set_pmd(pmdp, pmdval);
+}
+
+static inline void pte_update(struct mm_struct *mm, u32 addr, pte_t *ptep)
+{
+	paravirt_ops.pte_update(mm, addr, ptep);
+}
+
+static inline void pte_update_defer(struct mm_struct *mm, u32 addr, pte_t *ptep)
+{
+	paravirt_ops.pte_update_defer(mm, addr, ptep);
+}
+
+
+static inline void set_pgd(pgd_t *pgdp, pgd_t pgdval)
+{
+	paravirt_ops.set_pgd(pgdp, pgdval);
+}
+
+static inline void set_pud(pud_t *pudp, pud_t pudval)
+{
+	paravirt_ops.set_pud(pudp, pudval);
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+	paravirt_ops.pte_clear(mm, addr, ptep);
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+	paravirt_ops.pmd_clear(pmdp);
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+	paravirt_ops.pud_clear(pudp);
+}
+
+static inline void pgd_clear(pgd_t *pgdp)
+{
+	paravirt_ops.pgd_clear(pgdp);
+}
+
+/* Lazy mode for batching updates / context switch */
+#define PARAVIRT_LAZY_NONE 0
+#define PARAVIRT_LAZY_MMU  1
+#define PARAVIRT_LAZY_CPU  2
+
+#define  __HAVE_ARCH_ENTER_LAZY_CPU_MODE
+#define arch_enter_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_CPU)
+#define arch_leave_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE)
+
+#define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+#define arch_enter_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_MMU)
+#define arch_leave_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE)
+
+/* These all sit in the .parainstructions section to tell us what to patch. */
+struct paravirt_patch {
+	u8 *instr; 		/* original instructions */
+	u8 instrtype;		/* type of this instruction */
+	u8 len;			/* length of original instruction */
+	u16 clobbers;		/* what registers you may clobber */
+} __attribute__((aligned(8)));
+
+#define paravirt_alt(insn_string, typenum, clobber)	\
+	"771:\n\t" insn_string "\n" "772:\n"		\
+	".pushsection .parainstructions,\"a\"\n"	\
+	".align 8\n"					\
+	"  .quad 771b\n"				\
+	"  .byte " __stringify(typenum) "\n"		\
+	"  .byte 772b-771b\n"				\
+	"  .short " __stringify(clobber) "\n"		\
+	".popsection"
+
+/* These functions tends to be very simple. So, if they touch any register,
+ * the calle-saved ones may already fulfill their needs, and hopefully we
+ * have no need to save any. */
+static inline unsigned long __raw_local_save_flags(void)
+{
+	unsigned long f;
+
+	__asm__ __volatile__(paravirt_alt("call *%1;",
+					  PARAVIRT_SAVE_FLAGS, CLBR_NONE)
+			     : "=a"(f): "m"(paravirt_ops.save_fl)
+			     : "memory", "cc");
+	return f;
+}
+
+static inline void raw_local_irq_restore(unsigned long f)
+{
+	__asm__ __volatile__(paravirt_alt("call *%1;", PARAVIRT_RESTORE_FLAGS,
+					CLBR_NONE)
+			     : : "D" (f) , "m" (paravirt_ops.restore_fl)
+			     : "memory", "rax", "cc");
+}
+
+static inline void raw_local_irq_disable(void)
+{
+	__asm__ __volatile__(paravirt_alt("call *%0;",
+					  PARAVIRT_IRQ_DISABLE, CLBR_NONE)
+			     : : "m" (paravirt_ops.irq_disable)
+			     : "memory", "rax", "cc");
+}
+
+static inline void raw_local_irq_enable(void)
+{
+	__asm__ __volatile__(paravirt_alt("call *%0;",
+					  PARAVIRT_IRQ_ENABLE, CLBR_NONE)
+			     : : "m" (paravirt_ops.irq_enable)
+			     : "memory", "rax", "cc");
+}
+
+static inline unsigned long __raw_local_irq_save(void)
+{
+	unsigned long f;
+
+	__asm__ __volatile__(paravirt_alt( "call *%1;"
+					   "call *%2;",
+					  PARAVIRT_SAVE_FLAGS_IRQ_DISABLE,
+					  CLBR_NONE)
+			     : "=a"(f)
+			     : "m" (paravirt_ops.save_fl),
+			       "m" (paravirt_ops.irq_disable)
+			     : "memory", "cc");
+	return f;
+}
+
+#define CLI_STRING paravirt_alt("call *paravirt_ops+%c[irq_disable];",	\
+		     PARAVIRT_IRQ_DISABLE, CLBR_NONE)
+
+#define STI_STRING paravirt_alt("call *paravirt_ops+%c[irq_enable];",	\
+		     PARAVIRT_IRQ_ENABLE, CLBR_NONE)
+
+#define CLI_STI_CLOBBERS , "%rax"
+#define CLI_STI_INPUT_ARGS \
+	,								\
+	[irq_disable] "i" (offsetof(struct paravirt_ops, irq_disable)),	\
+	[irq_enable] "i" (offsetof(struct paravirt_ops, irq_enable))
+
+#else  /* __ASSEMBLY__ */
+
+#define PARA_PATCH(ptype, clobbers, ops)	\
+771:;						\
+	ops;					\
+772:;						\
+	.pushsection .parainstructions,"a";	\
+	.align 8;				\
+	 .quad 771b;				\
+	 .byte ptype;				\
+	 .byte 772b-771b;			\
+	 .short clobbers;			\
+	.popsection
+
+#define INTERRUPT_RETURN				\
+	PARA_PATCH(PARAVIRT_INTERRUPT_RETURN, CLBR_ANY,	\
+	jmp *%cs:paravirt_ops+PARAVIRT_iret)
+
+#define DISABLE_INTERRUPTS(clobbers)			\
+	PARA_PATCH(PARAVIRT_IRQ_DISABLE, clobbers,	\
+	call *paravirt_ops+PARAVIRT_irq_disable)
+
+#define ENABLE_INTERRUPTS(clobbers)			\
+	PARA_PATCH(PARAVIRT_IRQ_ENABLE, clobbers,	\
+	call *%cs:paravirt_ops+PARAVIRT_irq_enable)
+
+#define SYSRETQ						\
+	PARA_PATCH(PARAVIRT_SYSRETQ, CLBR_ANY,		\
+	jmp *%cs:paravirt_ops+PARAVIRT_sysret)
+
+#define SWAPGS						\
+	PARA_PATCH(PARAVIRT_SWAPGS, CLBR_NONE,		\
+	call *paravirt_ops+PARAVIRT_swapgs)		\
+
+/* this is needed in early_idt_handler */
+#define GET_CR2_INTO_RAX 				\
+	call *paravirt_ops+PARAVIRT_read_cr2
+
+#endif /* __ASSEMBLY__ */
+#else  /* !CONFIG_PARAVIRT */
+
+static inline void paravirt_pagetable_setup_start(pgd_t *base)
+{
+	int i;
+
+	/*
+	 * Init entries of the first-level page table to the
+	 * zero page, if they haven't already been set up.
+	 *
+	 * In a normal native boot, we'll be running on a
+	 * pagetable rooted in swapper_pg_dir, but not in PAE
+	 * mode, so this will end up clobbering the mappings
+	 * for the lower 24Mbytes of the address space,
+	 * without affecting the kernel address space.
+	 */
+	for (i = 0; i < USER_PTRS_PER_PGD; i++)
+		set_pgd(&base[i],
+			__pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
+	memset(&base[USER_PTRS_PER_PGD], 0, sizeof(pgd_t));
+}
+
+static inline void paravirt_pagetable_setup_done(pgd_t *base)
+{
+	/*
+	 * Add low memory identity-mappings - SMP needs it when
+	 * starting up on an AP from real-mode. In the non-PAE
+	 * case we already have these mappings through head.S.
+	 * All user-space mappings are explicitly cleared after
+	 * SMP startup.
+	 */
+	set_pgd(&base[0], base[USER_PTRS_PER_PGD]);
+}
+
+static inline void paravirt_activate_mm(struct mm_struct *prev,
+					struct mm_struct *next)
+{
+}
+
+static inline void paravirt_dup_mmap(struct mm_struct *oldmm,
+				     struct mm_struct *mm)
+{
+}
+
+static inline void paravirt_exit_mmap(struct mm_struct *mm)
+{
+}
+
+#endif /* CONFIG_PARAVIRT */
+#endif	/* __ASM_PARAVIRT_H */
Index: clean-start/arch/x86_64/kernel/Makefile
===================================================================
--- clean-start.orig/arch/x86_64/kernel/Makefile
+++ clean-start/arch/x86_64/kernel/Makefile
@@ -41,6 +41,8 @@ obj-$(CONFIG_AUDIT)		+= audit.o
 obj-$(CONFIG_MODULES)		+= module.o
 obj-$(CONFIG_PCI)		+= early-quirks.o
 
+obj-$(CONFIG_PARAVIRT)		+= paravirt.o
+
 obj-y				+= topology.o
 obj-y				+= intel_cacheinfo.o
 

--

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [RFC/PATCH PV_OPS X86_64 02/17] paravirt_ops - msr
       [not found] <20070308054422.820010000@redhat.com>
  2007-03-08  6:01 ` [RFC/PATCH PV_OPS X86_64 01/17] paravirt_ops - core changes Steven Rostedt
@ 2007-03-08  6:01 ` Steven Rostedt
  2007-03-08  6:01 ` [RFC/PATCH PV_OPS X86_64 03/17] paravirt_ops - system routines Steven Rostedt
                   ` (14 subsequent siblings)
  16 siblings, 0 replies; 19+ messages in thread
From: Steven Rostedt @ 2007-03-08  6:01 UTC (permalink / raw)
  To: virtualization; +Cc: Chris Wright, Glauber de Oliveira Costa

plain text document attachment (xx-paravirt-msr-header.patch)
Code consolidations of msr routines for paravirt ops.

Signed-off-by: Steven Rostedt srostedt@redhat.com
Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>



Index: clean-start/include/asm-x86_64/msr.h
===================================================================
--- clean-start.orig/include/asm-x86_64/msr.h
+++ clean-start/include/asm-x86_64/msr.h
@@ -2,6 +2,62 @@
 #define X86_64_MSR_H 1
 
 #ifndef __ASSEMBLY__
+#include <asm/errno.h>
+
+static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
+				 unsigned int *ecx, unsigned int *edx)
+{
+	__asm__("cpuid"
+		: "=a" (*eax),
+		  "=b" (*ebx),
+		  "=c" (*ecx),
+		  "=d" (*edx)
+		: "0" (*eax), "2" (*ecx));
+}
+
+/* wrmsr with exception handling */
+static inline int native_write_msr_safe(unsigned int msr, unsigned long val)
+{
+	int err;
+	asm volatile("2: wrmsr ; xorl %0,%0\n"
+		     "1:\n\t"
+		     ".section .fixup,\"ax\"\n\t"
+		     "3:  movl %4,%0 ; jmp 1b\n\t"
+		     ".previous\n\t"
+ 		     ".section __ex_table,\"a\"\n"
+		     "   .align 8\n\t"
+		     "   .quad 2b,3b\n\t"
+		     ".previous"
+		     : "=a" (err)
+		     : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)),
+		       "i" (-EFAULT));
+	return err;
+}
+
+/* rdmsr with exception handling */
+static inline unsigned long native_read_msr_safe(unsigned int msr, int *err)
+{
+	unsigned long a, b;
+
+	  asm volatile ("1:       rdmsr\n"
+                      "2:\n"
+                      ".section .fixup,\"ax\"\n"
+                      "3:       movl %4,%0\n"
+                      " jmp 2b\n"
+                      ".previous\n"
+                      ".section __ex_table,\"a\"\n"
+                      " .align 8\n"
+                      " .quad 1b,3b\n"
+                      ".previous":"=&bDS" (*err), "=a"(a), "=d"(b)
+                      :"c"(msr), "i"(-EIO), "0"(0));
+	return a | (b << 32);
+}
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+
+#define __cpuid native_cpuid
 /*
  * Access to machine-specific registers (available on 586 and better only)
  * Note: the rd* operations modify the parameters directly (without using
@@ -28,37 +84,18 @@
 
 #define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) 
 
-/* wrmsr with exception handling */
-#define wrmsr_safe(msr,a,b) ({ int ret__;			\
-	asm volatile("2: wrmsr ; xorl %0,%0\n"			\
-		     "1:\n\t"					\
-		     ".section .fixup,\"ax\"\n\t"		\
-		     "3:  movl %4,%0 ; jmp 1b\n\t"		\
-		     ".previous\n\t"				\
- 		     ".section __ex_table,\"a\"\n"		\
-		     "   .align 8\n\t"				\
-		     "   .quad 	2b,3b\n\t"			\
-		     ".previous"				\
-		     : "=a" (ret__)				\
-		     : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
-	ret__; })
-
-#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
-
-#define rdmsr_safe(msr,a,b) \
-	({ int ret__;						\
-	  asm volatile ("1:       rdmsr\n"			\
-                      "2:\n"					\
-                      ".section .fixup,\"ax\"\n"		\
-                      "3:       movl %4,%0\n"			\
-                      " jmp 2b\n"				\
-                      ".previous\n"				\
-                      ".section __ex_table,\"a\"\n"		\
-                      " .align 8\n"				\
-                      " .quad 1b,3b\n"				\
-                      ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\
-                      :"c"(msr), "i"(-EIO), "0"(0));		\
-	  ret__; })		
+#define wrmsr_safe(msr,val1,val2)					\
+		native_write_msr_safe(msr,(u64)(val1)|(u64)(val2)<<32)
+
+#define rdmsr_safe(msr,val1,val2)				\
+	({							\
+		int __err;					\
+		unsigned long __val;				\
+		__val = native_read_msr_safe(msr, &__err);	\
+		*val1 = (u32)__val;				\
+		*val2 = (u32)(__val>>32);			\
+		__err;						\
+	})
 
 #define rdtsc(low,high) \
      __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
@@ -66,8 +103,6 @@
 #define rdtscl(low) \
      __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
 
-#define rdtscp(low,high,aux) \
-     asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))
 
 #define rdtscll(val) do { \
      unsigned int __a,__d; \
@@ -75,42 +110,43 @@
      (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
 } while(0)
 
+#define rdpmc(counter,low,high) \
+     __asm__ __volatile__("rdpmc" \
+			  : "=a" (low), "=d" (high) \
+			  : "c" (counter))
+
+#define rdtscp(low,high,aux) \
+     asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))
+
 #define rdtscpll(val, aux) do { \
      unsigned long __a, __d; \
      asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
      (val) = (__d << 32) | __a; \
 } while (0)
 
+#define checking_wrmsrl(msr,val) native_write_msr_safe(msr, val)
+
+#endif /* CONFIG_PARAVIRT */
+
 #define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
 
 #define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
 
-#define rdpmc(counter,low,high) \
-     __asm__ __volatile__("rdpmc" \
-			  : "=a" (low), "=d" (high) \
-			  : "c" (counter))
-
-static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
+static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx,
 			 unsigned int *ecx, unsigned int *edx)
 {
-	__asm__("cpuid"
-		: "=a" (*eax),
-		  "=b" (*ebx),
-		  "=c" (*ecx),
-		  "=d" (*edx)
-		: "0" (op));
+	*eax = op;
+	*ecx = 0;
+	__cpuid(eax, ebx, ecx, edx);
 }
 
 /* Some CPUID calls want 'count' to be placed in ecx */
-static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
-	       	int *edx)
+static inline void cpuid_count(int op, int count,
+			 int *eax, int *ebx, int *ecx, int *edx)
 {
-	__asm__("cpuid"
-		: "=a" (*eax),
-		  "=b" (*ebx),
-		  "=c" (*ecx),
-		  "=d" (*edx)
-		: "0" (op), "c" (count));
+	*eax = op;
+	*ecx = count;
+	__cpuid(eax, ebx, ecx, edx);
 }
 
 /*
@@ -118,42 +154,34 @@ static inline void cpuid_count(int op, i
  */
 static inline unsigned int cpuid_eax(unsigned int op)
 {
-	unsigned int eax;
-
-	__asm__("cpuid"
-		: "=a" (eax)
-		: "0" (op)
-		: "bx", "cx", "dx");
+	unsigned int eax, ebx, ecx, edx;
+	eax = op;
+	ecx = 0;
+	__cpuid(&eax, &ebx, &ecx, &edx);
 	return eax;
 }
 static inline unsigned int cpuid_ebx(unsigned int op)
 {
-	unsigned int eax, ebx;
-
-	__asm__("cpuid"
-		: "=a" (eax), "=b" (ebx)
-		: "0" (op)
-		: "cx", "dx" );
+	unsigned int eax, ebx, ecx, edx;
+	eax = op;
+	ecx = 0;
+	__cpuid(&eax, &ebx, &ecx, &edx);
 	return ebx;
 }
 static inline unsigned int cpuid_ecx(unsigned int op)
 {
-	unsigned int eax, ecx;
-
-	__asm__("cpuid"
-		: "=a" (eax), "=c" (ecx)
-		: "0" (op)
-		: "bx", "dx" );
+	unsigned int eax, ebx, ecx, edx;
+	eax = op;
+	ecx = 0;
+	__cpuid(&eax, &ebx, &ecx, &edx);
 	return ecx;
 }
 static inline unsigned int cpuid_edx(unsigned int op)
 {
-	unsigned int eax, edx;
-
-	__asm__("cpuid"
-		: "=a" (eax), "=d" (edx)
-		: "0" (op)
-		: "bx", "cx");
+	unsigned int eax, ebx, ecx, edx;
+	eax = op;
+	ecx = 0;
+	__cpuid(&eax, &ebx, &ecx, &edx);
 	return edx;
 }
 

--

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [RFC/PATCH PV_OPS X86_64 03/17] paravirt_ops - system routines
       [not found] <20070308054422.820010000@redhat.com>
  2007-03-08  6:01 ` [RFC/PATCH PV_OPS X86_64 01/17] paravirt_ops - core changes Steven Rostedt
  2007-03-08  6:01 ` [RFC/PATCH PV_OPS X86_64 02/17] paravirt_ops - msr Steven Rostedt
@ 2007-03-08  6:01 ` Steven Rostedt
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 04/17] pavarvirt_ops - apci header updates Steven Rostedt
                   ` (13 subsequent siblings)
  16 siblings, 0 replies; 19+ messages in thread
From: Steven Rostedt @ 2007-03-08  6:01 UTC (permalink / raw)
  To: virtualization; +Cc: Chris Wright, Glauber de Oliveira Costa

plain text document attachment (xx-paravirt-processor-header.patch)
System routine updates for the paravirt_ops interface.

Signed-off-by: Steven Rostedt srostedt@redhat.com
Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>


Index: clean-start/include/asm-x86_64/processor.h
===================================================================
--- clean-start.orig/include/asm-x86_64/processor.h
+++ clean-start/include/asm-x86_64/processor.h
@@ -139,35 +139,6 @@ extern unsigned short num_cache_leaves;
 #define X86_CR4_OSXMMEXCPT	0x0400	/* enable unmasked SSE exceptions */
 
 /*
- * Save the cr4 feature set we're using (ie
- * Pentium 4MB enable and PPro Global page
- * enable), so that any CPU's that boot up
- * after us can get the correct flags.
- */
-extern unsigned long mmu_cr4_features;
-
-static inline void set_in_cr4 (unsigned long mask)
-{
-	mmu_cr4_features |= mask;
-	__asm__("movq %%cr4,%%rax\n\t"
-		"orq %0,%%rax\n\t"
-		"movq %%rax,%%cr4\n"
-		: : "irg" (mask)
-		:"ax");
-}
-
-static inline void clear_in_cr4 (unsigned long mask)
-{
-	mmu_cr4_features &= ~mask;
-	__asm__("movq %%cr4,%%rax\n\t"
-		"andq %0,%%rax\n\t"
-		"movq %%rax,%%cr4\n"
-		: : "irg" (~mask)
-		:"ax");
-}
-
-
-/*
  * User space process size. 47bits minus one guard page.
  */
 #define TASK_SIZE64	(0x800000000000UL - 4096)
@@ -299,6 +270,10 @@ struct thread_struct {
 	set_fs(USER_DS);							 \
 } while(0) 
 
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define paravirt_enabled() 0
 #define get_debugreg(var, register)				\
 		__asm__("movq %%db" #register ", %0"		\
 			:"=r" (var))
@@ -306,6 +281,31 @@ struct thread_struct {
 		__asm__("movq %0,%%db" #register		\
 			: /* no output */			\
 			:"r" (value))
+#define load_rsp0(tss, thread)				\
+	do { (tss)->rsp0 = (thread)->rsp0; } while(0)
+#endif
+
+/*
+ * Save the cr4 feature set we're using (ie
+ * Pentium 4MB enable and PPro Global page
+ * enable), so that any CPU's that boot up
+ * after us can get the correct flags.
+ */
+extern unsigned long mmu_cr4_features;
+
+static inline void set_in_cr4 (unsigned long mask)
+{
+	mmu_cr4_features |= mask;
+	write_cr4(read_cr4() | mask);
+}
+
+static inline void clear_in_cr4 (unsigned long mask)
+{
+	mmu_cr4_features &= ~mask;
+	write_cr4(read_cr4() & ~mask);
+}
+
+
 
 struct task_struct;
 struct mm_struct;
Index: clean-start/include/asm-x86_64/system.h
===================================================================
--- clean-start.orig/include/asm-x86_64/system.h
+++ clean-start/include/asm-x86_64/system.h
@@ -65,46 +65,84 @@ extern void load_gs_index(unsigned); 
 		".previous"			\
 		: :"r" (value), "r" (0))
 
+static inline void native_clts(void)
+{
+	asm volatile ("clts");
+}
+
+static inline unsigned long native_read_cr0(void)
+{
+	unsigned long val;
+	asm volatile("movq %%cr0,%0\n\t" :"=r" (val));
+	return val;
+}
+
+static inline void native_write_cr0(unsigned long val)
+{
+	asm volatile("movq %0,%%cr0": :"r" (val));
+}
+
+static inline unsigned long native_read_cr2(void)
+{
+	unsigned long val;
+	asm volatile("movq %%cr2,%0\n\t" :"=r" (val));
+	return val;
+}
+
+static inline void native_write_cr2(unsigned long val)
+{
+	asm volatile("movq %0,%%cr2": :"r" (val));
+}
+
+static inline unsigned long native_read_cr3(void)
+{
+	unsigned long val;
+	asm volatile("movq %%cr3,%0\n\t" :"=r" (val));
+	return val;
+}
+
+static inline void native_write_cr3(unsigned long val)
+{
+	asm volatile("movq %0,%%cr3": :"r" (val));
+}
+
+static inline unsigned long native_read_cr4(void)
+{
+	unsigned long val;
+	asm volatile("movq %%cr4,%0\n\t" :"=r" (val));
+	return val;
+}
+
+static inline void native_write_cr4(unsigned long val)
+{
+	asm volatile("movq %0,%%cr4": :"r" (val));
+}
+
+static inline void native_wbinvd(void)
+{
+	asm volatile("wbinvd": : :"memory");
+}
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
 /*
  * Clear and set 'TS' bit respectively
  */
-#define clts() __asm__ __volatile__ ("clts")
-
-static inline unsigned long read_cr0(void)
-{ 
-	unsigned long cr0;
-	asm volatile("movq %%cr0,%0" : "=r" (cr0));
-	return cr0;
-} 
-
-static inline void write_cr0(unsigned long val) 
-{ 
-	asm volatile("movq %0,%%cr0" :: "r" (val));
-} 
-
-static inline unsigned long read_cr3(void)
-{ 
-	unsigned long cr3;
-	asm("movq %%cr3,%0" : "=r" (cr3));
-	return cr3;
-} 
-
-static inline unsigned long read_cr4(void)
-{ 
-	unsigned long cr4;
-	asm("movq %%cr4,%0" : "=r" (cr4));
-	return cr4;
-} 
-
-static inline void write_cr4(unsigned long val)
-{ 
-	asm volatile("movq %0,%%cr4" :: "r" (val));
-} 
+#define clts	 native_clts
+#define read_cr0 native_read_cr0
+#define write_cr0 native_write_cr0
+#define read_cr2 native_read_cr2
+#define write_cr2 native_write_cr2
+#define read_cr3 native_read_cr3
+#define write_cr3 native_write_cr3
+#define read_cr4 native_read_cr4
+#define write_cr4 native_write_cr4
+#define wbinvd	native_wbinvd
+#endif /* CONFIG_PARAVIRT */
 
 #define stts() write_cr0(8 | read_cr0())
 
-#define wbinvd() \
-	__asm__ __volatile__ ("wbinvd": : :"memory");
 
 /*
  * On SMP systems, when the scheduler does migration-cost autodetection,

--

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [RFC/PATCH PV_OPS X86_64 04/17] pavarvirt_ops - apci header updates
       [not found] <20070308054422.820010000@redhat.com>
                   ` (2 preceding siblings ...)
  2007-03-08  6:01 ` [RFC/PATCH PV_OPS X86_64 03/17] paravirt_ops - system routines Steven Rostedt
@ 2007-03-08  6:02 ` Steven Rostedt
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 05/17] pravirt_ops - segments Steven Rostedt
                   ` (12 subsequent siblings)
  16 siblings, 0 replies; 19+ messages in thread
From: Steven Rostedt @ 2007-03-08  6:02 UTC (permalink / raw)
  To: virtualization; +Cc: Chris Wright, Glauber de Oliveira Costa

plain text document attachment (xx-paravirt-apic-header.patch)
This patch updates the apic header file for switching 
apic_read and write for use with paravirt ops.

Signed-off-by: Steven Rostedt srostedt@redhat.com
Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>

Index: clean-start/include/asm-x86_64/apic.h
===================================================================
--- clean-start.orig/include/asm-x86_64/apic.h
+++ clean-start/include/asm-x86_64/apic.h
@@ -37,16 +37,24 @@ struct pt_regs;
  * Basic functions accessing APICs.
  */
 
-static __inline void apic_write(unsigned long reg, unsigned int v)
+static __inline void native_apic_write(unsigned long reg, unsigned int v)
 {
 	*((volatile unsigned int *)(APIC_BASE+reg)) = v;
 }
 
-static __inline unsigned int apic_read(unsigned long reg)
+static __inline unsigned int native_apic_read(unsigned long reg)
 {
 	return *((volatile unsigned int *)(APIC_BASE+reg));
 }
 
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define apic_write native_apic_write
+#define apic_read native_apic_read
+
+#endif /* CONFIG_PARAVIRT */
+
 static __inline__ void apic_wait_icr_idle(void)
 {
 	while (apic_read( APIC_ICR ) & APIC_ICR_BUSY)

--

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [RFC/PATCH PV_OPS X86_64 05/17] pravirt_ops - segments
       [not found] <20070308054422.820010000@redhat.com>
                   ` (3 preceding siblings ...)
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 04/17] pavarvirt_ops - apci header updates Steven Rostedt
@ 2007-03-08  6:02 ` Steven Rostedt
  2007-03-08  6:49   ` Chris Wright
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 06/17] paravirt_op - miscellaneous updates Steven Rostedt
                   ` (11 subsequent siblings)
  16 siblings, 1 reply; 19+ messages in thread
From: Steven Rostedt @ 2007-03-08  6:02 UTC (permalink / raw)
  To: virtualization; +Cc: Chris Wright, Glauber de Oliveira Costa

plain text document attachment (xx-paravirt-segment-header.patch)
Added two new segments for the hypervisor.

Signed-off-by: Steven Rostedt srostedt@redhat.com
Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>


Index: clean-start/include/asm-x86_64/segment.h
===================================================================
--- clean-start.orig/include/asm-x86_64/segment.h
+++ clean-start/include/asm-x86_64/segment.h
@@ -37,8 +37,14 @@
 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
 
+#define __HV_CS 0x80  /* 16*8 */
+#define __HV_DS 0x88  /* 17*8 */
+
+#define GDT_ENTRY_HV_CS 16
+#define GDT_ENTRY_HV_DS 17
+
 #define IDT_ENTRIES 256
-#define GDT_ENTRIES 16
+#define GDT_ENTRIES 18
 #define GDT_SIZE (GDT_ENTRIES * 8)
 #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) 
 

--

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [RFC/PATCH PV_OPS X86_64 06/17] paravirt_op - miscellaneous updates.
       [not found] <20070308054422.820010000@redhat.com>
                   ` (4 preceding siblings ...)
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 05/17] pravirt_ops - segments Steven Rostedt
@ 2007-03-08  6:02 ` Steven Rostedt
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 07/17] paravirt_ops - descriptor changes Steven Rostedt
                   ` (10 subsequent siblings)
  16 siblings, 0 replies; 19+ messages in thread
From: Steven Rostedt @ 2007-03-08  6:02 UTC (permalink / raw)
  To: virtualization; +Cc: Chris Wright, Glauber de Oliveira Costa

plain text document attachment (xx-paravirt-misc-headers.patch)
Miscellaneous header updates.  Sort of a work-in-progress.

Signed-off-by: Steven Rostedt srostedt@redhat.com
Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>


Index: clean-start/include/asm-x86_64/proto.h
===================================================================
--- clean-start.orig/include/asm-x86_64/proto.h
+++ clean-start/include/asm-x86_64/proto.h
@@ -27,7 +27,11 @@ extern void init_memory_mapping(unsigned
 
 extern void system_call(void); 
 extern int kernel_syscall(void);
+#ifdef CONFIG_PARAVIRT
+extern void x86_64_syscall_init(void);
+#else
 extern void syscall_init(void);
+#endif
 
 extern void ia32_syscall(void);
 extern void ia32_cstar_target(void); 
Index: clean-start/include/asm-x86_64/spinlock.h
===================================================================
--- clean-start.orig/include/asm-x86_64/spinlock.h
+++ clean-start/include/asm-x86_64/spinlock.h
@@ -6,6 +6,10 @@
 #include <asm/page.h>
 #include <asm/processor.h>
 
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#endif
+
 /*
  * Your basic SMP spinlocks, allowing only a single CPU anywhere
  *

--

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [RFC/PATCH PV_OPS X86_64 07/17] paravirt_ops - descriptor changes.
       [not found] <20070308054422.820010000@redhat.com>
                   ` (5 preceding siblings ...)
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 06/17] paravirt_op - miscellaneous updates Steven Rostedt
@ 2007-03-08  6:02 ` Steven Rostedt
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 08/17] paravirt_ops - memory managment Steven Rostedt
                   ` (9 subsequent siblings)
  16 siblings, 0 replies; 19+ messages in thread
From: Steven Rostedt @ 2007-03-08  6:02 UTC (permalink / raw)
  To: virtualization; +Cc: Chris Wright, Glauber de Oliveira Costa

plain text document attachment (xx-paravirt-desc-header.patch)
Update the descriptors for an interface with paravirt ops

Signed-off-by: Steven Rostedt srostedt@redhat.com
Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>



Index: clean-start/include/asm-x86_64/desc.h
===================================================================
--- clean-start.orig/include/asm-x86_64/desc.h
+++ clean-start/include/asm-x86_64/desc.h
@@ -16,9 +16,8 @@
 
 extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
 
-#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
-#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
-#define clear_LDT()  asm volatile("lldt %w0"::"r" (0))
+/* the cpu gdt accessor */
+#define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address)
 
 /*
  * This is the ldt that every process will get unless we need
@@ -28,8 +27,6 @@ extern struct desc_struct default_ldt[];
 extern struct gate_struct idt_table[]; 
 extern struct desc_ptr cpu_gdt_descr[];
 
-/* the cpu gdt accessor */
-#define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address)
 
 static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)  
 {
@@ -115,7 +112,35 @@ static inline void set_seg_base(unsigned
 	d->base0 = addr & 0xffff;
 	d->base1 = (addr >> 16) & 0xff;
 	d->base2 = (addr >> 24) & 0xff;
-} 
+}
+
+static inline void native_load_tr_desc(void)
+{
+	asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8));
+}
+
+static inline void native_load_gdt(const struct desc_ptr *dtr)
+{
+	asm volatile("lgdt %w0"::"m" (*dtr));
+}
+
+static inline void native_load_idt(const struct desc_ptr *dtr)
+{
+	asm volatile("lidt %w0"::"m" (*dtr));
+}
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define load_idt native_load_idt
+#define load_gdt native_load_gdt
+#define load_TR_desc native_load_tr_desc
+#define set_ldt native_set_ldt
+#define load_TLS native_load_TLS
+#endif /* CONFIG_PARAVIRT */
+
+#define clear_LDT() set_ldt(NULL,0)
+
 
 #define LDT_entry_a(info) \
 	((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
@@ -149,7 +174,7 @@ static inline void set_seg_base(unsigned
 # error update this code.
 #endif
 
-static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
+static inline void native_load_TLS(struct thread_struct *t, unsigned int cpu)
 {
 	u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN);
 	gdt[0] = t->tls_array[0];
@@ -157,27 +182,33 @@ static inline void load_TLS(struct threa
 	gdt[2] = t->tls_array[2];
 } 
 
+static inline void native_set_ldt(const void *addr,
+					unsigned int entries)
+{
+	if (likely(entries == 0))
+		__asm__ __volatile__ ("lldt %w0" :: "r" (0));
+	else {
+		unsigned cpu = smp_processor_id();
+
+		set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_LDT], (unsigned long)addr,
+			      DESC_LDT, entries * 8 - 1);
+		__asm__ __volatile__ ("lldt %w0"::"r" (GDT_ENTRY_LDT*8));
+	}
+}
+
 /*
  * load one particular LDT into the current CPU
  */
-static inline void load_LDT_nolock (mm_context_t *pc, int cpu)
+static inline void load_LDT_nolock (mm_context_t *pc)
 {
-	int count = pc->size;
-
-	if (likely(!count)) {
-		clear_LDT();
-		return;
-	}
-		
-	set_ldt_desc(cpu, pc->ldt, count);
-	load_LDT_desc();
+	set_ldt(pc->ldt, pc->size);
 }
 
 static inline void load_LDT(mm_context_t *pc)
 {
-	int cpu = get_cpu();
-	load_LDT_nolock(pc, cpu);
-	put_cpu();
+	preempt_disable();
+	load_LDT_nolock(pc);
+	preempt_enable();
 }
 
 extern struct desc_ptr idt_descr;
Index: clean-start/include/asm-x86_64/desc_defs.h
===================================================================
--- clean-start.orig/include/asm-x86_64/desc_defs.h
+++ clean-start/include/asm-x86_64/desc_defs.h
@@ -43,6 +43,11 @@ struct gate_struct {
 #define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
 #define PTR_HIGH(x) ((unsigned long)(x) >> 32)
 
+#define DESC_ADDRESS(d) ((unsigned long)((unsigned long)d.base2 << 32) 	\
+				| (d.base1 << 16) | d.base0)
+
+#define GATE_ADDRESS(g) ((unsigned long)((unsigned long)g.offset_high << 32) \
+				| (g.offset_middle << 16) | g.offset_low)
 enum {
 	DESC_TSS = 0x9,
 	DESC_LDT = 0x2,

--

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [RFC/PATCH PV_OPS X86_64 08/17] paravirt_ops - memory managment
       [not found] <20070308054422.820010000@redhat.com>
                   ` (6 preceding siblings ...)
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 07/17] paravirt_ops - descriptor changes Steven Rostedt
@ 2007-03-08  6:02 ` Steven Rostedt
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 09/17] paravirt_ops - bios changes Steven Rostedt
                   ` (8 subsequent siblings)
  16 siblings, 0 replies; 19+ messages in thread
From: Steven Rostedt @ 2007-03-08  6:02 UTC (permalink / raw)
  To: virtualization; +Cc: Chris Wright, Glauber de Oliveira Costa

plain text document attachment (xx-paravirt-mm.patch)
Memory management for paravirt_ops.

Signed-off-by: Steven Rostedt srostedt@redhat.com
Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>


Index: clean-start/arch/x86_64/mm/fault.c
===================================================================
--- clean-start.orig/arch/x86_64/mm/fault.c
+++ clean-start/arch/x86_64/mm/fault.c
@@ -180,7 +180,7 @@ void dump_pagetable(unsigned long addres
 	pmd_t *pmd;
 	pte_t *pte;
 
-	asm("movq %%cr3,%0" : "=r" (pgd));
+	pgd = (pgd_t *)read_cr3();
 
 	pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); 
 	pgd += pgd_index(address);
@@ -347,7 +347,7 @@ asmlinkage void __kprobes do_page_fault(
 	prefetchw(&mm->mmap_sem);
 
 	/* get the address */
-	__asm__("movq %%cr2,%0":"=r" (address));
+	address = read_cr2();
 
 	info.si_code = SEGV_MAPERR;
 
Index: clean-start/arch/x86_64/mm/init.c
===================================================================
--- clean-start.orig/arch/x86_64/mm/init.c
+++ clean-start/arch/x86_64/mm/init.c
@@ -384,7 +384,7 @@ void __meminit init_memory_mapping(unsig
 	} 
 
 	if (!after_bootmem)
-		asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
+		mmu_cr4_features = read_cr4();
 	__flush_tlb_all();
 }
 
@@ -398,7 +398,7 @@ void __cpuinit zap_low_mappings(int cpu)
 		 * For AP's, zap the low identity mappings by changing the cr3
 		 * to init_level4_pgt and doing local flush tlb all
 		 */
-		asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
+		write_cr3(__pa_symbol(&init_level4_pgt));
 	}
 	__flush_tlb_all();
 }
Index: clean-start/include/asm-x86_64/mmu_context.h
===================================================================
--- clean-start.orig/include/asm-x86_64/mmu_context.h
+++ clean-start/include/asm-x86_64/mmu_context.h
@@ -42,7 +42,7 @@ static inline void switch_mm(struct mm_s
 		load_cr3(next->pgd);
 
 		if (unlikely(next->context.ldt != prev->context.ldt)) 
-			load_LDT_nolock(&next->context, cpu);
+			load_LDT_nolock(&next->context);
 	}
 #ifdef CONFIG_SMP
 	else {
@@ -55,7 +55,7 @@ static inline void switch_mm(struct mm_s
 			 * to make sure to use no freed page tables.
 			 */
 			load_cr3(next->pgd);
-			load_LDT_nolock(&next->context, cpu);
+			load_LDT_nolock(&next->context);
 		}
 	}
 #endif
Index: clean-start/include/asm-x86_64/page.h
===================================================================
--- clean-start.orig/include/asm-x86_64/page.h
+++ clean-start/include/asm-x86_64/page.h
@@ -64,16 +64,44 @@ typedef struct { unsigned long pgd; } pg
 
 typedef struct { unsigned long pgprot; } pgprot_t;
 
+static inline unsigned long native_pte_val(pte_t pte)
+{
+	return pte.pte;
+}
+
+static inline unsigned long native_pud_val(pud_t pud)
+{
+	return pud.pud;
+}
+
+
+static inline unsigned long native_pmd_val(pmd_t pmd)
+{
+	return pmd.pmd;
+}
+
+static inline unsigned long native_pgd_val(pgd_t pgd)
+{
+	return pgd.pgd;
+}
+
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+
 #define pte_val(x)	((x).pte)
 #define pmd_val(x)	((x).pmd)
 #define pud_val(x)	((x).pud)
 #define pgd_val(x)	((x).pgd)
-#define pgprot_val(x)	((x).pgprot)
 
 #define __pte(x) ((pte_t) { (x) } )
 #define __pmd(x) ((pmd_t) { (x) } )
 #define __pud(x) ((pud_t) { (x) } )
 #define __pgd(x) ((pgd_t) { (x) } )
+#endif /* CONFIG_PARAVIRT */
+
+#define pgprot_val(x)	((x).pgprot)
 #define __pgprot(x)	((pgprot_t) { (x) } )
 
 #define __PHYSICAL_START	((unsigned long)CONFIG_PHYSICAL_START)
Index: clean-start/include/asm-x86_64/pgtable.h
===================================================================
--- clean-start.orig/include/asm-x86_64/pgtable.h
+++ clean-start/include/asm-x86_64/pgtable.h
@@ -55,50 +55,62 @@ extern unsigned long empty_zero_page[PAG
  */
 #define PTRS_PER_PTE	512
 
-#define pte_ERROR(e) \
-	printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
-#define pmd_ERROR(e) \
-	printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
-#define pud_ERROR(e) \
-	printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e))
-#define pgd_ERROR(e) \
-	printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
-
-#define pgd_none(x)	(!pgd_val(x))
-#define pud_none(x)	(!pud_val(x))
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define set_pte native_set_pte
+#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+#define set_pmd native_set_pmd
+#define set_pud native_set_pud
+#define set_pgd native_set_pgd
+#define pte_clear(mm,addr,xp)	do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
+#define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
+#define pud_clear native_pud_clear
+#define pgd_clear native_pgd_clear
+#define ptep_get_and_clear(mm,addr,xp)	__pte(xchg(&(xp)->pte, 0))
+#endif
 
-static inline void set_pte(pte_t *dst, pte_t val)
+static inline void native_set_pte(pte_t *dst, pte_t val)
 {
-	pte_val(*dst) = pte_val(val);
+	dst->pte = pte_val(val);
 } 
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 
-static inline void set_pmd(pmd_t *dst, pmd_t val)
+
+static inline void native_set_pmd(pmd_t *dst, pmd_t val)
 {
-        pmd_val(*dst) = pmd_val(val); 
+	dst->pmd = pmd_val(val);
 } 
 
-static inline void set_pud(pud_t *dst, pud_t val)
+static inline void native_set_pud(pud_t *dst, pud_t val)
 {
-	pud_val(*dst) = pud_val(val);
+	dst->pud = pud_val(val);
 }
 
-static inline void pud_clear (pud_t *pud)
+static inline void native_set_pgd(pgd_t *dst, pgd_t val)
 {
-	set_pud(pud, __pud(0));
+	dst->pgd = pgd_val(val);
 }
-
-static inline void set_pgd(pgd_t *dst, pgd_t val)
+static inline void native_pud_clear (pud_t *pud)
 {
-	pgd_val(*dst) = pgd_val(val); 
-} 
+	set_pud(pud, __pud(0));
+}
 
-static inline void pgd_clear (pgd_t * pgd)
+static inline void native_pgd_clear (pgd_t * pgd)
 {
 	set_pgd(pgd, __pgd(0));
 }
 
-#define ptep_get_and_clear(mm,addr,xp)	__pte(xchg(&(xp)->pte, 0))
+#define pgd_none(x)	(!pgd_val(x))
+#define pud_none(x)	(!pud_val(x))
+
+#define pte_ERROR(e) \
+	printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
+#define pmd_ERROR(e) \
+	printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
+#define pud_ERROR(e) \
+	printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e))
+#define pgd_ERROR(e) \
+	printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
 
 struct mm_struct;
 
@@ -238,7 +250,6 @@ static inline unsigned long pmd_bad(pmd_
 
 #define pte_none(x)	(!pte_val(x))
 #define pte_present(x)	(pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
-#define pte_clear(mm,addr,xp)	do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
 
 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))	/* FIXME: is this
 						   right? */
@@ -247,11 +258,11 @@ static inline unsigned long pmd_bad(pmd_
 
 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
 {
-	pte_t pte;
-	pte_val(pte) = (page_nr << PAGE_SHIFT);
-	pte_val(pte) |= pgprot_val(pgprot);
-	pte_val(pte) &= __supported_pte_mask;
-	return pte;
+	unsigned long pte;
+	pte = (page_nr << PAGE_SHIFT);
+	pte |= pgprot_val(pgprot);
+	pte &= __supported_pte_mask;
+	return __pte(pte);
 }
 
 /*
@@ -345,7 +356,6 @@ static inline int pmd_large(pmd_t pte) {
 			pmd_index(address))
 #define pmd_none(x)	(!pmd_val(x))
 #define pmd_present(x)	(pmd_val(x) & _PAGE_PRESENT)
-#define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
 #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
 #define pmd_pfn(x)  ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
 
@@ -362,19 +372,20 @@ static inline int pmd_large(pmd_t pte) {
 /* physical address -> PTE */
 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
 { 
-	pte_t pte;
-	pte_val(pte) = physpage | pgprot_val(pgprot); 
-	pte_val(pte) &= __supported_pte_mask;
-	return pte; 
+	unsigned long pte;
+	pte = physpage | pgprot_val(pgprot);
+	pte &= __supported_pte_mask;
+	return __pte(pte);
 }
  
 /* Change flags of a PTE */
-static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+static inline pte_t pte_modify(pte_t pte_old, pgprot_t newprot)
 { 
-	pte_val(pte) &= _PAGE_CHG_MASK;
-	pte_val(pte) |= pgprot_val(newprot);
-	pte_val(pte) &= __supported_pte_mask;
-       return pte; 
+	unsigned long pte = pte_val(pte_old);
+	pte &= _PAGE_CHG_MASK;
+	pte |= pgprot_val(newprot);
+	pte &= __supported_pte_mask;
+       return __pte(pte);
 }
 
 #define pte_index(address) \
Index: clean-start/include/asm-x86_64/tlbflush.h
===================================================================
--- clean-start.orig/include/asm-x86_64/tlbflush.h
+++ clean-start/include/asm-x86_64/tlbflush.h
@@ -4,6 +4,7 @@
 #include <linux/mm.h>
 #include <asm/processor.h>
 
+
 static inline unsigned long get_cr3(void)
 {
 	unsigned long cr3;
@@ -16,7 +17,7 @@ static inline void set_cr3(unsigned long
 	asm volatile("mov %0,%%cr3" :: "r" (cr3) : "memory");
 }
 
-static inline void __flush_tlb(void)
+static inline void __native_flush_tlb(void)
 {
 	set_cr3(get_cr3());
 }
@@ -33,17 +34,24 @@ static inline void set_cr4(unsigned long
 	asm volatile("mov %0,%%cr4" :: "r" (cr4) : "memory");
 }
 
-static inline void __flush_tlb_all(void)
+static inline void __native_flush_tlb_all(void)
 {
 	unsigned long cr4 = get_cr4();
 	set_cr4(cr4 & ~X86_CR4_PGE);	/* clear PGE */
 	set_cr4(cr4);			/* write old PGE again and flush TLBs */
 }
 
-#define __flush_tlb_one(addr) \
+#define __native_flush_tlb_one(addr) \
 	__asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory")
 

+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define __flush_tlb __native_flush_tlb
+#define __flush_tlb_one __native_flush_tlb_one
+#define __flush_tlb_all __native_flush_tlb_all
+#endif /* CONFIG_PARAVIRT */
 /*
  * TLB flushing:
  *

--

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [RFC/PATCH PV_OPS X86_64 09/17] paravirt_ops - bios changes
       [not found] <20070308054422.820010000@redhat.com>
                   ` (7 preceding siblings ...)
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 08/17] paravirt_ops - memory managment Steven Rostedt
@ 2007-03-08  6:02 ` Steven Rostedt
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 10/17] paravirt_ops - boot changes Steven Rostedt
                   ` (7 subsequent siblings)
  16 siblings, 0 replies; 19+ messages in thread
From: Steven Rostedt @ 2007-03-08  6:02 UTC (permalink / raw)
  To: virtualization; +Cc: Chris Wright, Glauber de Oliveira Costa

plain text document attachment (xx-paravirt-bios.patch)
Add an interface for paravirt ops and e820.

Signed-off-by: Steven Rostedt srostedt@redhat.com
Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>



Index: clean-start/arch/x86_64/kernel/e820.c
===================================================================
--- clean-start.orig/arch/x86_64/kernel/e820.c
+++ clean-start/arch/x86_64/kernel/e820.c
@@ -567,7 +567,7 @@ void __init setup_memory_region(void)
 	if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0)
 		early_panic("Cannot find a valid memory map");
 	printk(KERN_INFO "BIOS-provided physical RAM map:\n");
-	e820_print_map("BIOS-e820");
+	e820_print_map(MAP_TYPE_STR);
 }
 
 static int __init parse_memopt(char *p)
Index: clean-start/include/asm-x86_64/e820.h
===================================================================
--- clean-start.orig/include/asm-x86_64/e820.h
+++ clean-start/include/asm-x86_64/e820.h
@@ -55,7 +55,22 @@ extern void finish_e820_parsing(void);
 
 extern struct e820map e820;
 
+#define EBDA_ADDR_POINTER 0x40E
+static inline void native_ebda_info(unsigned *addr,unsigned *size)
+{
+	*addr = *(unsigned short *)EBDA_ADDR_POINTER;
+	*addr <<= 4;
+        *size = *(unsigned short *)(unsigned long)*addr;
+}
+
 extern unsigned ebda_addr, ebda_size;
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define ebda_info native_ebda_info
+#define MAP_TYPE_STR "BIOS-e820"
+#endif
+
 #endif/*!__ASSEMBLY__*/
 
 #endif/*__E820_HEADER*/

--

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [RFC/PATCH PV_OPS X86_64 10/17] paravirt_ops - boot changes
       [not found] <20070308054422.820010000@redhat.com>
                   ` (8 preceding siblings ...)
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 09/17] paravirt_ops - bios changes Steven Rostedt
@ 2007-03-08  6:02 ` Steven Rostedt
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 11/17] paravirt_ops - asm-offset updates Steven Rostedt
                   ` (6 subsequent siblings)
  16 siblings, 0 replies; 19+ messages in thread
From: Steven Rostedt @ 2007-03-08  6:02 UTC (permalink / raw)
  To: virtualization; +Cc: Chris Wright, Glauber de Oliveira Costa

plain text document attachment (xx-paravirt-boot.patch)
Boot up code modifications to get paravirt ops running.

Signed-off-by: Steven Rostedt srostedt@redhat.com
Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>



Index: clean-start/arch/x86_64/kernel/head.S
===================================================================
--- clean-start.orig/arch/x86_64/kernel/head.S
+++ clean-start/arch/x86_64/kernel/head.S
@@ -16,6 +16,13 @@
 #include <asm/page.h>
 #include <asm/msr.h>
 #include <asm/cache.h>
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/asm-offsets.h>
+#include <asm/paravirt.h>
+#else
+#define GET_CR2_INTO_RAX mov %cr2, %rax
+#endif
 	
 /* we are not able to switch in one step to the final KERNEL ADRESS SPACE
  * because we need identity-mapped pages on setup so define __START_KERNEL to
@@ -106,6 +113,14 @@ startup_64:
 	 * reload the page tables here.
 	 */
 
+#ifdef CONFIG_PARAVIRT
+	/* a CS ended in 0x3 indicates we're in userspace. That's where
+	 * our paravirt guests run. */
+	movq	%cs, %rax
+	testq	$0x3, %rax
+	jnz	startup_paravirt
+#endif
+
 	/* Enable PAE mode and PGE */
 	xorq	%rax, %rax
 	btsq	$5, %rax
@@ -208,10 +223,11 @@ ENTRY(early_idt_handler)
 	cmpl $2,early_recursion_flag(%rip)
 	jz  1f
 	incl early_recursion_flag(%rip)
-	xorl %eax,%eax
 	movq 8(%rsp),%rsi	# get rip
 	movq (%rsp),%rdx
-	movq %cr2,%rcx
+	GET_CR2_INTO_RAX
+	movq %rax,%rcx
+	xorq %rax, %rax
 	leaq early_idt_msg(%rip),%rdi
 	call early_printk
 	cmpl $2,early_recursion_flag(%rip)
@@ -232,6 +248,47 @@ early_idt_msg:
 early_idt_ripmsg:
 	.asciz "RIP %s\n"
 
+#ifdef CONFIG_PARAVIRT
+ENTRY(startup_paravirt)
+	cld
+
+	/* initial stack location */
+ 	movq $(init_thread_union+THREAD_SIZE),%rsp
+
+	/* We take pains to preserve all the regs. */
+	pushq	%r11
+	pushq	%r10
+	pushq	%r9
+	pushq	%r8
+	pushq	%rsi
+	pushq	%rdi
+	pushq	%rdx
+	pushq	%rcx
+	pushq	%rax
+
+	/* paravirt.o is last in link, and that probe fn never returns */
+	pushq	$__start_paravirtprobe
+1:
+	movq	0(%rsp), %rax
+	pushq	(%rax)
+	movq	8(%rsp), %rdi
+	call	*(%rsp)
+	popq	%rax
+
+	movq	0x10(%rsp), %rax
+	movq	0x18(%rsp), %rcx
+	movq	0x20(%rsp), %rdx
+	movq	0x28(%rsp), %rdi
+	movq	0x30(%rsp), %rsi
+	movq	0x38(%rsp), %r8
+	movq	0x40(%rsp), %r9
+	movq	0x48(%rsp), %r10
+	movq	0x50(%rsp), %r11
+
+	addl	$8, (%rsp)
+	jmp	1b
+#endif
+
 .code32
 ENTRY(no_long_mode)
 	/* This isn't an x86-64 CPU so hang */
@@ -317,7 +374,9 @@ ENTRY(wakeup_level4_pgt)
 #endif
 
 #ifndef CONFIG_HOTPLUG_CPU
+  #ifndef CONFIG_PARAVIRT
 	__INITDATA
+  #endif
 #endif
 	/*
 	 * This default setting generates an ident mapping at address 0x100000
Index: clean-start/arch/x86_64/kernel/head64.c
===================================================================
--- clean-start.orig/arch/x86_64/kernel/head64.c
+++ clean-start/arch/x86_64/kernel/head64.c
@@ -10,6 +10,7 @@
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/percpu.h>
+#include <linux/module.h>
 
 #include <asm/processor.h>
 #include <asm/proto.h>
@@ -20,6 +21,9 @@
 #include <asm/pgtable.h>
 #include <asm/sections.h>
 
+/* Virtualized guests may want to use it */
+EXPORT_SYMBOL(cpu_gdt_descr);
+
 /* Don't add a printk in there. printk relies on the PDA which is not initialized 
    yet. */
 static void __init clear_bss(void)
@@ -62,7 +66,7 @@ void __init x86_64_start_kernel(char * r
 
 	for (i = 0; i < IDT_ENTRIES; i++)
 		set_intr_gate(i, early_idt_handler);
-	asm volatile("lidt %0" :: "m" (idt_descr));
+	load_idt((const struct desc_ptr *)&idt_descr);
 
 	early_printk("Kernel alive\n");
 
@@ -70,7 +74,7 @@ void __init x86_64_start_kernel(char * r
 	 * switch to init_level4_pgt from boot_level4_pgt
 	 */
 	memcpy(init_level4_pgt, boot_level4_pgt, PTRS_PER_PGD*sizeof(pgd_t));
-	asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
+	write_cr3(__pa_symbol(&init_level4_pgt));
 
  	for (i = 0; i < NR_CPUS; i++)
  		cpu_pda(i) = &boot_cpu_pda[i];
Index: clean-start/arch/x86_64/kernel/process.c
===================================================================
--- clean-start.orig/arch/x86_64/kernel/process.c
+++ clean-start/arch/x86_64/kernel/process.c
@@ -42,6 +42,7 @@
 #include <asm/system.h>
 #include <asm/io.h>
 #include <asm/processor.h>
+#include <asm/system.h>
 #include <asm/i387.h>
 #include <asm/mmu_context.h>
 #include <asm/pda.h>
@@ -338,10 +339,10 @@ void __show_regs(struct pt_regs * regs)
 	rdmsrl(MSR_GS_BASE, gs); 
 	rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 
 
-	asm("movq %%cr0, %0": "=r" (cr0));
-	asm("movq %%cr2, %0": "=r" (cr2));
-	asm("movq %%cr3, %0": "=r" (cr3));
-	asm("movq %%cr4, %0": "=r" (cr4));
+	cr0 = read_cr0();
+	cr2 = read_cr2();
+	cr3 = read_cr3();
+	cr4 = read_cr4();
 
 	printk("FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", 
 	       fs,fsindex,gs,gsindex,shadowgs); 
@@ -578,7 +579,7 @@ __switch_to(struct task_struct *prev_p, 
 	/*
 	 * Reload esp0, LDT and the page table pointer:
 	 */
-	tss->rsp0 = next->rsp0;
+	load_rsp0(tss, next);
 
 	/* 
 	 * Switch DS and ES.
Index: clean-start/arch/x86_64/kernel/reboot.c
===================================================================
--- clean-start.orig/arch/x86_64/kernel/reboot.c
+++ clean-start/arch/x86_64/kernel/reboot.c
@@ -15,6 +15,7 @@
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/apic.h>
+#include <asm/desc.h>
 
 /*
  * Power off function, if any
@@ -131,7 +132,7 @@ void machine_emergency_restart(void)
 		}
 
 		case BOOT_TRIPLE: 
-			__asm__ __volatile__("lidt (%0)": :"r" (&no_idt));
+			load_idt((const struct desc_ptr *)&no_idt);
 			__asm__ __volatile__("int3");
 
 			reboot_type = BOOT_KBD;
Index: clean-start/arch/x86_64/kernel/setup.c
===================================================================
--- clean-start.orig/arch/x86_64/kernel/setup.c
+++ clean-start/arch/x86_64/kernel/setup.c
@@ -327,10 +327,7 @@ static void discover_ebda(void)
 	 * there is a real-mode segmented pointer pointing to the 
 	 * 4K EBDA area at 0x40E
 	 */
-	ebda_addr = *(unsigned short *)EBDA_ADDR_POINTER;
-	ebda_addr <<= 4;
-
-	ebda_size = *(unsigned short *)(unsigned long)ebda_addr;
+	ebda_info(&ebda_addr,&ebda_size);
 
 	/* Round EBDA up to pages */
 	if (ebda_size == 0)
@@ -341,6 +338,12 @@ static void discover_ebda(void)
 		ebda_size = 64*1024;
 }
 
+/* Overridden in paravirt.c if CONFIG_PARAVIRT */
+void __attribute__((weak)) memory_setup(void)
+{
+       return setup_memory_region();
+}
+
 void __init setup_arch(char **cmdline_p)
 {
 	printk(KERN_INFO "Command line: %s\n", saved_command_line);
@@ -356,7 +359,7 @@ void __init setup_arch(char **cmdline_p)
 	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
 	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
 #endif
-	setup_memory_region();
+	memory_setup();
 	copy_edd();
 
 	if (!MOUNT_ROOT_RDONLY)
@@ -561,7 +564,6 @@ static int __cpuinit get_model_name(stru
 	return 1;
 }
 
-
 static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
 {
 	unsigned int n, dummy, eax, ebx, ecx, edx;
Index: clean-start/arch/x86_64/kernel/setup64.c
===================================================================
--- clean-start.orig/arch/x86_64/kernel/setup64.c
+++ clean-start/arch/x86_64/kernel/setup64.c
@@ -123,7 +123,7 @@ void pda_init(int cpu)
 	asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0)); 
 	/* Memory clobbers used to order PDA accessed */
 	mb();
-	wrmsrl(MSR_GS_BASE, pda);
+	wrmsrl(MSR_GS_BASE, (u64)pda);
 	mb();
 
 	pda->cpunumber = cpu; 
@@ -152,7 +152,7 @@ char boot_exception_stacks[(N_EXCEPTION_
 __attribute__((section(".bss.page_aligned")));
 
 /* May not be marked __init: used by software suspend */
-void syscall_init(void)
+void x86_64_syscall_init(void)
 {
 	/* 
 	 * LSTAR and STAR live in a bit strange symbiosis.
@@ -160,7 +160,7 @@ void syscall_init(void)
 	 * but only a 32bit target. LSTAR sets the 64bit rip. 	 
 	 */ 
 	wrmsrl(MSR_STAR,  ((u64)__USER32_CS)<<48  | ((u64)__KERNEL_CS)<<32); 
-	wrmsrl(MSR_LSTAR, system_call); 
+	wrmsrl(MSR_LSTAR, (u64)system_call);
 
 #ifdef CONFIG_IA32_EMULATION   		
 	syscall32_cpu_init ();
@@ -170,6 +170,12 @@ void syscall_init(void)
 	wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000); 
 }
 
+/* Overriden in paravirt.c if CONFIG_PARAVIRT */
+void __attribute__((weak)) syscall_init(void)
+{
+	x86_64_syscall_init();
+}
+
 void __cpuinit check_efer(void)
 {
 	unsigned long efer;
@@ -223,8 +229,8 @@ void __cpuinit cpu_init (void)
  		memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
 
 	cpu_gdt_descr[cpu].size = GDT_SIZE;
-	asm volatile("lgdt %0" :: "m" (cpu_gdt_descr[cpu]));
-	asm volatile("lidt %0" :: "m" (idt_descr));
+	load_gdt((const struct desc_ptr *)&cpu_gdt_descr[cpu]);
+	load_idt((const struct desc_ptr *)&idt_descr);
 
 	memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
 	syscall_init();
@@ -267,6 +273,7 @@ void __cpuinit cpu_init (void)
 		BUG();
 	enter_lazy_tlb(&init_mm, me);
 
+	load_rsp0(t, &current->thread);
 	set_tss_desc(cpu, t);
 	load_TR_desc();
 	load_LDT(&init_mm.context);
Index: clean-start/arch/x86_64/kernel/smpboot.c
===================================================================
--- clean-start.orig/arch/x86_64/kernel/smpboot.c
+++ clean-start/arch/x86_64/kernel/smpboot.c
@@ -848,7 +848,7 @@ do_rest:
 	start_rip = setup_trampoline();
 
 	init_rsp = c_idle.idle->thread.rsp;
-	per_cpu(init_tss,cpu).rsp0 = init_rsp;
+	load_rsp0(&per_cpu(init_tss,cpu), &c_idle.idle->thread);
 	initial_code = start_secondary;
 	clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
 
Index: clean-start/arch/x86_64/kernel/vmlinux.lds.S
===================================================================
--- clean-start.orig/arch/x86_64/kernel/vmlinux.lds.S
+++ clean-start/arch/x86_64/kernel/vmlinux.lds.S
@@ -61,6 +61,13 @@ SECTIONS
 	CONSTRUCTORS
 	} :data
 
+  .paravirtprobe : AT(ADDR(.paravirtprobe) - LOAD_OFFSET) {
+	__start_paravirtprobe = .;
+	*(.paravirtprobe);
+	*(.paravirtprobe_failsafe);
+	__stop_paravirtprobe = .;
+  }
+
   _edata = .;			/* End of data section */
 
   . = ALIGN(PAGE_SIZE);
@@ -180,14 +187,20 @@ SECTIONS
   __con_initcall_end = .;
   SECURITY_INIT
   . = ALIGN(8);
-  __alt_instructions = .;
   .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
+	__alt_instructions = .;
 	*(.altinstructions)
+  	__alt_instructions_end = .;
   }
-  __alt_instructions_end = .; 
   .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
 	*(.altinstr_replacement)
   }
+  . = ALIGN(8);
+  .parainstructions : AT(ADDR(.parainstructions)  - LOAD_OFFSET) {
+	__start_parainstructions = .;
+	*(.parainstructions)
+	__stop_parainstructions = .;
+  }
   /* .exit.text is discard at runtime, not link time, to deal with references
      from .altinstructions and .eh_frame */
   .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) }

--

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [RFC/PATCH PV_OPS X86_64 11/17] paravirt_ops - asm-offset updates
       [not found] <20070308054422.820010000@redhat.com>
                   ` (9 preceding siblings ...)
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 10/17] paravirt_ops - boot changes Steven Rostedt
@ 2007-03-08  6:02 ` Steven Rostedt
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 12/17] paravirt_ops - interrupt/exception changes Steven Rostedt
                   ` (5 subsequent siblings)
  16 siblings, 0 replies; 19+ messages in thread
From: Steven Rostedt @ 2007-03-08  6:02 UTC (permalink / raw)
  To: virtualization; +Cc: Chris Wright, Glauber de Oliveira Costa

plain text document attachment (xx-paravirt-asm-offsets.patch)
Add offsets for use of paravirt-ops in assembly.

Signed-off-by: Steven Rostedt srostedt@redhat.com
Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>


Index: work-nopv/arch/x86_64/kernel/asm-offsets.c
===================================================================
--- work-nopv.orig/arch/x86_64/kernel/asm-offsets.c
+++ work-nopv/arch/x86_64/kernel/asm-offsets.c
@@ -15,6 +15,9 @@
 #include <asm/segment.h>
 #include <asm/thread_info.h>
 #include <asm/ia32.h>
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#endif
 
 #define DEFINE(sym, val) \
         asm volatile("\n->" #sym " %0 " #val : : "i" (val))
@@ -71,5 +74,17 @@ int main(void)
 	DEFINE(TSS_ist, offsetof(struct tss_struct, ist));
 	BLANK();
 	DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
+
+#ifdef CONFIG_PARAVIRT
+#define ENTRY(entry) DEFINE(PARAVIRT_ ## entry, offsetof(struct paravirt_ops, entry))
+	BLANK();
+	ENTRY(paravirt_enabled);
+	ENTRY(irq_disable);
+	ENTRY(irq_enable);
+	ENTRY(sysret);
+	ENTRY(iret);
+	ENTRY(read_cr2);
+	ENTRY(swapgs);
+#endif
 	return 0;
 }

--

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [RFC/PATCH PV_OPS X86_64 12/17] paravirt_ops - interrupt/exception changes
       [not found] <20070308054422.820010000@redhat.com>
                   ` (10 preceding siblings ...)
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 11/17] paravirt_ops - asm-offset updates Steven Rostedt
@ 2007-03-08  6:02 ` Steven Rostedt
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 13/17] paravirt_ops - time updates Steven Rostedt
                   ` (4 subsequent siblings)
  16 siblings, 0 replies; 19+ messages in thread
From: Steven Rostedt @ 2007-03-08  6:02 UTC (permalink / raw)
  To: virtualization; +Cc: Chris Wright, Glauber de Oliveira Costa

plain text document attachment (xx-paravirt-irqs.patch)
Interrupt updates for paravirt ops.


Signed-off-by: Steven Rostedt srostedt@redhat.com
Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>



Index: clean-start/arch/x86_64/ia32/ia32entry.S
===================================================================
--- clean-start.orig/arch/x86_64/ia32/ia32entry.S
+++ clean-start/arch/x86_64/ia32/ia32entry.S
@@ -16,6 +16,13 @@
 #include <asm/irqflags.h>
 #include <linux/linkage.h>
 
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define ENABLE_INTERRUPTS(CLBR)		sti
+#define DISABLE_INTERRUPTS(CLBR)	cli
+#endif
+
 #define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8)
 
 	.macro IA32_ARG_FIXUP noebp=0
@@ -81,7 +88,7 @@ ENTRY(ia32_sysenter_target)
 	 * No need to follow this irqs on/off section: the syscall
 	 * disabled irqs, here we enable it straight after entry:
 	 */
-	sti	
+	ENABLE_INTERRUPTS(CLBR_NONE)
  	movl	%ebp,%ebp		/* zero extension */
 	pushq	$__USER32_DS
 	CFI_ADJUST_CFA_OFFSET 8
@@ -123,7 +130,7 @@ sysenter_do_call:	
 	call	*ia32_sys_call_table(,%rax,8)
 	movq	%rax,RAX-ARGOFFSET(%rsp)
 	GET_THREAD_INFO(%r10)
-	cli
+	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
 	testl	$_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
 	jnz	int_ret_from_sys_call
@@ -141,7 +148,7 @@ sysenter_do_call:	
 	CFI_REGISTER rip,rdx
 	TRACE_IRQS_ON
 	swapgs
-	sti		/* sti only takes effect after the next instruction */
+	ENABLE_INTERRUPTS(CLBR_NONE)		/* sti only takes effect after the next instruction */
 	/* sysexit */
 	.byte	0xf, 0x35
 
@@ -199,7 +206,7 @@ ENTRY(ia32_cstar_target)
 	 * No need to follow this irqs on/off section: the syscall
 	 * disabled irqs and here we enable it straight after entry:
 	 */
-	sti
+	ENABLE_INTERRUPTS(CLBR_NONE)
 	SAVE_ARGS 8,1,1
 	movl 	%eax,%eax	/* zero extension */
 	movq	%rax,ORIG_RAX-ARGOFFSET(%rsp)
@@ -232,7 +239,7 @@ cstar_do_call:	
 	call *ia32_sys_call_table(,%rax,8)
 	movq %rax,RAX-ARGOFFSET(%rsp)
 	GET_THREAD_INFO(%r10)
-	cli
+	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
 	testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
 	jnz  int_ret_from_sys_call
@@ -307,7 +314,7 @@ ENTRY(ia32_syscall)
 	 * No need to follow this irqs on/off section: the syscall
 	 * disabled irqs and here we enable it straight after entry:
 	 */
-	sti
+	ENABLE_INTERRUPTS(CLBR_NONE)
 	movl %eax,%eax
 	pushq %rax
 	CFI_ADJUST_CFA_OFFSET 8
Index: clean-start/arch/x86_64/kernel/entry.S
===================================================================
--- clean-start.orig/arch/x86_64/kernel/entry.S
+++ clean-start/arch/x86_64/kernel/entry.S
@@ -51,6 +51,15 @@
 #include <asm/page.h>
 #include <asm/irqflags.h>
 
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define ENABLE_INTERRUPTS(x)	sti
+#define DISABLE_INTERRUPTS(x)	cli
+#define INTERRUPT_RETURN	iretq
+#define SYSRETQ			sysretq
+#define SWAPGS			swapgs
+#endif
 	.code64
 
 #ifndef CONFIG_PREEMPT
@@ -179,6 +188,7 @@ rff_trace:
 	CFI_ENDPROC
 END(ret_from_fork)
 
+
 /*
  * System call entry. Upto 6 arguments in registers are supported.
  *
@@ -223,7 +233,7 @@ ENTRY(system_call)
 	 * No need to follow this irqs off/on section - it's straight
 	 * and short:
 	 */
-	sti					
+	ENABLE_INTERRUPTS(CLBR_NONE)
 	SAVE_ARGS 8,1
 	movq  %rax,ORIG_RAX-ARGOFFSET(%rsp) 
 	movq  %rcx,RIP-ARGOFFSET(%rsp)
@@ -270,7 +280,7 @@ sysret_careful:
 	bt $TIF_NEED_RESCHED,%edx
 	jnc sysret_signal
 	TRACE_IRQS_ON
-	sti
+	ENABLE_INTERRUPTS(CLBR_NONE)
 	pushq %rdi
 	CFI_ADJUST_CFA_OFFSET 8
 	call schedule
@@ -281,7 +291,7 @@ sysret_careful:
 	/* Handle a signal */ 
 sysret_signal:
 	TRACE_IRQS_ON
-	sti
+	ENABLE_INTERRUPTS(CLBR_NONE)
 	testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
 	jz    1f
 
@@ -294,7 +304,7 @@ sysret_signal:
 1:	movl $_TIF_NEED_RESCHED,%edi
 	/* Use IRET because user could have changed frame. This
 	   works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
-	cli
+	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
 	jmp int_with_check
 	
@@ -326,7 +336,7 @@ tracesys:			 
  */
 	.globl int_ret_from_sys_call
 int_ret_from_sys_call:
-	cli
+	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
 	testl $3,CS-ARGOFFSET(%rsp)
 	je retint_restore_args
@@ -347,20 +357,20 @@ int_careful:
 	bt $TIF_NEED_RESCHED,%edx
 	jnc  int_very_careful
 	TRACE_IRQS_ON
-	sti
+	ENABLE_INTERRUPTS(CLBR_NONE)
 	pushq %rdi
 	CFI_ADJUST_CFA_OFFSET 8
 	call schedule
 	popq %rdi
 	CFI_ADJUST_CFA_OFFSET -8
-	cli
+	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
 	jmp int_with_check
 
 	/* handle signals and tracing -- both require a full stack frame */
 int_very_careful:
 	TRACE_IRQS_ON
-	sti
+	ENABLE_INTERRUPTS(CLBR_NONE)
 	SAVE_REST
 	/* Check for syscall exit trace */	
 	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
@@ -383,7 +393,7 @@ int_signal:
 1:	movl $_TIF_NEED_RESCHED,%edi	
 int_restore_rest:
 	RESTORE_REST
-	cli
+	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
 	jmp int_with_check
 	CFI_ENDPROC
@@ -504,7 +514,7 @@ END(stub_rt_sigreturn)
 	CFI_DEF_CFA_REGISTER	rbp
 	testl $3,CS(%rdi)
 	je 1f
-	swapgs	
+	SWAPGS
 	/* irqcount is used to check if a CPU is already on an interrupt
 	   stack or not. While this is essentially redundant with preempt_count
 	   it is a little cheaper to use a separate counter in the PDA
@@ -525,7 +535,7 @@ ENTRY(common_interrupt)
 	interrupt do_IRQ
 	/* 0(%rsp): oldrsp-ARGOFFSET */
 ret_from_intr:
-	cli	
+	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
 	decl %gs:pda_irqcount
 	leaveq
@@ -552,13 +562,13 @@ retint_swapgs:	 	
 	/*
 	 * The iretq could re-enable interrupts:
 	 */
-	cli
+	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_IRETQ
-	swapgs 
+	SWAPGS
 	jmp restore_args
 
 retint_restore_args:				
-	cli
+	DISABLE_INTERRUPTS(CLBR_NONE)
 	/*
 	 * The iretq could re-enable interrupts:
 	 */
@@ -566,10 +576,14 @@ retint_restore_args:				
 restore_args:
 	RESTORE_ARGS 0,8,0						
 iret_label:	
-	iretq
+#ifdef CONFIG_PARAVIRT
+	INTERRUPT_RETURN
+ENTRY(native_iret)
+#endif
+1:	iretq
 
 	.section __ex_table,"a"
-	.quad iret_label,bad_iret	
+	.quad 1b, bad_iret
 	.previous
 	.section .fixup,"ax"
 	/* force a signal here? this matches i386 behaviour */
@@ -577,24 +591,27 @@ iret_label:	
 bad_iret:
 	movq $11,%rdi	/* SIGSEGV */
 	TRACE_IRQS_ON
-	sti
-	jmp do_exit			
-	.previous	
-	
+	ENABLE_INTERRUPTS(CLBR_NONE)
+	jmp do_exit
+	.previous
+#ifdef CONFIG_PARAVIRT
+ENDPROC(native_iret)
+#endif
+
 	/* edi: workmask, edx: work */
 retint_careful:
 	CFI_RESTORE_STATE
 	bt    $TIF_NEED_RESCHED,%edx
 	jnc   retint_signal
 	TRACE_IRQS_ON
-	sti
+	ENABLE_INTERRUPTS(CLBR_NONE)
 	pushq %rdi
 	CFI_ADJUST_CFA_OFFSET	8
 	call  schedule
 	popq %rdi		
 	CFI_ADJUST_CFA_OFFSET	-8
 	GET_THREAD_INFO(%rcx)
-	cli
+	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
 	jmp retint_check
 	
@@ -602,14 +619,14 @@ retint_signal:
 	testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
 	jz    retint_swapgs
 	TRACE_IRQS_ON
-	sti
+	ENABLE_INTERRUPTS(CLBR_NONE)
 	SAVE_REST
 	movq $-1,ORIG_RAX(%rsp) 			
 	xorl %esi,%esi		# oldset
 	movq %rsp,%rdi		# &pt_regs
 	call do_notify_resume
 	RESTORE_REST
-	cli
+	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
 	movl $_TIF_NEED_RESCHED,%edi
 	GET_THREAD_INFO(%rcx)
@@ -722,7 +739,7 @@ END(spurious_interrupt)
 	rdmsr
 	testl %edx,%edx
 	js    1f
-	swapgs
+	SWAPGS
 	xorl  %ebx,%ebx
 1:
 	.if \ist
@@ -738,7 +755,7 @@ END(spurious_interrupt)
 	.if \ist
 	addq	$EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
 	.endif
-	cli
+	DISABLE_INTERRUPTS(CLBR_NONE)
 	.if \irqtrace
 	TRACE_IRQS_OFF
 	.endif
@@ -767,10 +784,10 @@ paranoid_swapgs\trace:
 	.if \trace
 	TRACE_IRQS_IRETQ 0
 	.endif
-	swapgs
+	SWAPGS
 paranoid_restore\trace:
 	RESTORE_ALL 8
-	iretq
+	INTERRUPT_RETURN
 paranoid_userspace\trace:
 	GET_THREAD_INFO(%rcx)
 	movl threadinfo_flags(%rcx),%ebx
@@ -785,11 +802,11 @@ paranoid_userspace\trace:
 	.if \trace
 	TRACE_IRQS_ON
 	.endif
-	sti
+	ENABLE_INTERRUPTS(CLBR_NONE)
 	xorl %esi,%esi 			/* arg2: oldset */
 	movq %rsp,%rdi 			/* arg1: &pt_regs */
 	call do_notify_resume
-	cli
+	DISABLE_INTERRUPTS(CLBR_NONE)
 	.if \trace
 	TRACE_IRQS_OFF
 	.endif
@@ -798,9 +815,9 @@ paranoid_schedule\trace:
 	.if \trace
 	TRACE_IRQS_ON
 	.endif
-	sti
+	ENABLE_INTERRUPTS(CLBR_NONE)
 	call schedule
-	cli
+	DISABLE_INTERRUPTS(CLBR_NONE)
 	.if \trace
 	TRACE_IRQS_OFF
 	.endif
@@ -851,7 +868,7 @@ KPROBE_ENTRY(error_entry)
 	testl $3,CS(%rsp)
 	je  error_kernelspace
 error_swapgs:	
-	swapgs
+	SWAPGS
 error_sti:	
 	movq %rdi,RDI(%rsp) 	
 	movq %rsp,%rdi
@@ -862,7 +879,7 @@ error_sti:	
 error_exit:		
 	movl %ebx,%eax		
 	RESTORE_REST
-	cli
+	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
 	GET_THREAD_INFO(%rcx)	
 	testl %eax,%eax
@@ -875,7 +892,7 @@ error_exit:		
 	 * The iret might restore flags:
 	 */
 	TRACE_IRQS_IRETQ
-	swapgs 
+	SWAPGS
 	RESTORE_ARGS 0,8,0						
 	jmp iret_label
 	CFI_ENDPROC
@@ -904,12 +921,12 @@ ENTRY(load_gs_index)
 	CFI_STARTPROC
 	pushf
 	CFI_ADJUST_CFA_OFFSET 8
-	cli
-        swapgs
+	DISABLE_INTERRUPTS(CLBR_NONE)
+        SWAPGS
 gs_change:     
         movl %edi,%gs   
 2:	mfence		/* workaround */
-	swapgs
+	SWAPGS
         popf
 	CFI_ADJUST_CFA_OFFSET -8
         ret
@@ -923,7 +940,7 @@ ENDPROC(load_gs_index)
         .section .fixup,"ax"
 	/* running with kernelgs */
 bad_gs: 
-	swapgs			/* switch back to user gs */
+	SWAPGS			/* switch back to user gs */
 	xorl %eax,%eax
         movl %eax,%gs
         jmp  2b
@@ -1064,6 +1081,13 @@ KPROBE_ENTRY(int3)
  	CFI_ENDPROC
 KPROBE_END(int3)
 
+#ifdef CONFIG_PARAVIRT
+ENTRY(native_sysret)
+	sysretq
+ENDPROC(native_sysret)
+
+#endif /* CONFIG_PARAVIRT */
+
 ENTRY(overflow)
 	zeroentry do_overflow
 END(overflow)
Index: clean-start/arch/x86_64/kernel/i8259.c
===================================================================
--- clean-start.orig/arch/x86_64/kernel/i8259.c
+++ clean-start/arch/x86_64/kernel/i8259.c
@@ -77,7 +77,7 @@ BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) BU
 	IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
 
 /* for the irq vectors */
-static void (*interrupt[NR_VECTORS - FIRST_EXTERNAL_VECTOR])(void) = {
+void (*interrupt[NR_VECTORS - FIRST_EXTERNAL_VECTOR])(void) = {
 					  IRQLIST_16(0x2), IRQLIST_16(0x3),
 	IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6), IRQLIST_16(0x7),
 	IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb),
@@ -501,7 +501,10 @@ static int __init init_timer_sysfs(void)
 
 device_initcall(init_timer_sysfs);
 
-void __init init_IRQ(void)
+/* Overridden in paravirt.c */
+void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
+
+void __init native_init_IRQ(void)
 {
 	int i;
 
Index: clean-start/arch/x86_64/kernel/traps.c
===================================================================
--- clean-start.orig/arch/x86_64/kernel/traps.c
+++ clean-start/arch/x86_64/kernel/traps.c
@@ -1067,6 +1067,7 @@ asmlinkage void math_state_restore(void)
 	task_thread_info(me)->status |= TS_USEDFPU;
 	me->fpu_counter++;
 }
+EXPORT_SYMBOL_GPL(math_state_restore);
 
 void __init trap_init(void)
 {
Index: clean-start/include/asm-x86_64/irq.h
===================================================================
--- clean-start.orig/include/asm-x86_64/irq.h
+++ clean-start/include/asm-x86_64/irq.h
@@ -46,6 +46,9 @@ static __inline__ int irq_canonicalize(i
 extern void fixup_irqs(cpumask_t map);
 #endif
 
+void init_IRQ(void);
+void native_init_IRQ(void);
+
 #define __ARCH_HAS_DO_SOFTIRQ 1
 
 #endif /* _ASM_IRQ_H */
Index: clean-start/include/asm-x86_64/irqflags.h
===================================================================
--- clean-start.orig/include/asm-x86_64/irqflags.h
+++ clean-start/include/asm-x86_64/irqflags.h
@@ -11,6 +11,15 @@
 #define _ASM_IRQFLAGS_H
 
 #ifndef __ASSEMBLY__
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+
+static inline int raw_irqs_disabled_flags(unsigned long flags)
+{
+	return !(flags & (1 << 9));
+}
+#else
+
 /*
  * Interrupt control:
  */
@@ -30,8 +39,6 @@ static inline unsigned long __raw_local_
 	return flags;
 }
 
-#define raw_local_save_flags(flags) \
-		do { (flags) = __raw_local_save_flags(); } while (0)
 
 static inline void raw_local_irq_restore(unsigned long flags)
 {
@@ -100,8 +107,6 @@ static inline unsigned long __raw_local_
 	return flags;
 }
 
-#define raw_local_irq_save(flags) \
-		do { (flags) = __raw_local_irq_save(); } while (0)
 
 static inline int raw_irqs_disabled(void)
 {
@@ -128,6 +133,7 @@ static inline void halt(void)
 	__asm__ __volatile__("hlt": : :"memory");
 }
 
+#endif /* CONFIG_PARAVIRT */
 #else /* __ASSEMBLY__: */
 # ifdef CONFIG_TRACE_IRQFLAGS
 #  define TRACE_IRQS_ON		call trace_hardirqs_on_thunk
@@ -138,4 +144,9 @@ static inline void halt(void)
 # endif
 #endif
 
+#define raw_local_save_flags(flags) \
+		do { (flags) = __raw_local_save_flags(); } while (0)
+
+#define raw_local_irq_save(flags) \
+		do { (flags) = __raw_local_irq_save(); } while (0)
 #endif

--

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [RFC/PATCH PV_OPS X86_64 13/17] paravirt_ops - time updates
       [not found] <20070308054422.820010000@redhat.com>
                   ` (11 preceding siblings ...)
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 12/17] paravirt_ops - interrupt/exception changes Steven Rostedt
@ 2007-03-08  6:02 ` Steven Rostedt
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 14/17] paravirt_ops - vsyscall Steven Rostedt
                   ` (3 subsequent siblings)
  16 siblings, 0 replies; 19+ messages in thread
From: Steven Rostedt @ 2007-03-08  6:02 UTC (permalink / raw)
  To: virtualization; +Cc: Chris Wright, Glauber de Oliveira Costa

plain text document attachment (xx-paravirt-time.patch)
General time changes for paravirt_ops.

Signed-off-by: Steven Rostedt srostedt@redhat.com
Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>


Index: clean-start/include/asm-x86_64/time.h
===================================================================
--- /dev/null
+++ clean-start/include/asm-x86_64/time.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_X86_64_TIME_H
+#define _ASM_X86_64_TIME_H
+
+inline void time_init_hook(void);
+unsigned long do_get_cmos_time(void);
+void do_set_rtc_mmss(unsigned long nowtime);
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else /* !CONFIG_PARAVIRT */
+
+#define get_wallclock() do_get_cmos_time()
+#define set_wallclock(x) do_set_rtc_mmss(x)
+#define do_time_init() time_init_hook()
+
+#endif /* CONFIG_PARAVIRT */
+
+#endif
Index: clean-start/arch/x86_64/kernel/time.c
===================================================================
--- clean-start.orig/arch/x86_64/kernel/time.c
+++ clean-start/arch/x86_64/kernel/time.c
@@ -42,6 +42,7 @@
 #include <linux/cpufreq.h>
 #include <linux/hpet.h>
 #include <asm/apic.h>
+#include <asm/time.h>
 
 #ifdef CONFIG_CPU_FREQ
 static void cpufreq_delayed_get(void);
@@ -204,17 +205,11 @@ EXPORT_SYMBOL(profile_pc);
  * sheet for details.
  */
 
-static void set_rtc_mmss(unsigned long nowtime)
+void do_set_rtc_mmss(unsigned long nowtime)
 {
 	int real_seconds, real_minutes, cmos_minutes;
 	unsigned char control, freq_select;
 
-/*
- * IRQs are disabled when we're called from the timer interrupt,
- * no need for spin_lock_irqsave()
- */
-
-	spin_lock(&rtc_lock);
 
 /*
  * Tell the clock it's being set and stop it.
@@ -263,9 +258,18 @@ static void set_rtc_mmss(unsigned long n
 	CMOS_WRITE(control, RTC_CONTROL);
 	CMOS_WRITE(freq_select, RTC_FREQ_SELECT);
 
-	spin_unlock(&rtc_lock);
 }
 
+static void set_rtc_mmss(unsigned long nowtime)
+{
+/*
+ * IRQs are disabled when we're called from the timer interrupt,
+ * no need for spin_lock_irqsave()
+ */
+	spin_lock(&rtc_lock);
+	set_wallclock(nowtime);
+	spin_unlock(&rtc_lock);
+}
 
 /* monotonic_clock(): returns # of nanoseconds passed since time_init()
  *		Note: This function is required to return accurate
@@ -494,13 +498,11 @@ unsigned long long sched_clock(void)
 	return cycles_2_ns(a);
 }
 
-static unsigned long get_cmos_time(void)
+unsigned long do_get_cmos_time(void)
 {
 	unsigned int year, mon, day, hour, min, sec;
-	unsigned long flags;
 	unsigned extyear = 0;
 
-	spin_lock_irqsave(&rtc_lock, flags);
 
 	do {
 		sec = CMOS_READ(RTC_SECONDS);
@@ -516,7 +518,6 @@ static unsigned long get_cmos_time(void)
 #endif
 	} while (sec != CMOS_READ(RTC_SECONDS));
 
-	spin_unlock_irqrestore(&rtc_lock, flags);
 
 	/*
 	 * We know that x86-64 always uses BCD format, no need to check the
@@ -545,6 +546,15 @@ static unsigned long get_cmos_time(void)
 	return mktime(year, mon, day, hour, min, sec);
 }
 
+static unsigned long get_cmos_time(void)
+{
+	unsigned long retval, flags;
+	/* XXX : lock being held more than necessary. */
+	spin_lock_irqsave(&rtc_lock, flags);
+	retval = get_wallclock();
+	spin_unlock_irqrestore(&rtc_lock, flags);
+	return retval;
+}
 #ifdef CONFIG_CPU_FREQ
 
 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
@@ -893,6 +903,11 @@ static struct irqaction irq0 = {
 	timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL
 };
 
+inline void time_init_hook()
+{
+	setup_irq(0, &irq0);
+}
+
 void __init time_init(void)
 {
 	if (nohpet)
@@ -932,7 +947,7 @@ void __init time_init(void)
 	vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz;
 	vxtime.last_tsc = get_cycles_sync();
 	set_cyc2ns_scale(cpu_khz);
-	setup_irq(0, &irq0);
+	do_time_init();
 
 #ifndef CONFIG_SMP
 	time_init_gtod();
Index: clean-start/include/asm-x86_64/timex.h
===================================================================
--- clean-start.orig/include/asm-x86_64/timex.h
+++ clean-start/include/asm-x86_64/timex.h
@@ -31,14 +31,29 @@ static __always_inline cycles_t get_cycl
 {
 	unsigned long long ret;
 	unsigned eax;
+	unsigned int (*fn)(unsigned int) = &cpuid_eax;
 	/* Don't do an additional sync on CPUs where we know
 	   RDTSC is already synchronous. */
-	alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC,
-			  "=a" (eax), "0" (1) : "ebx","ecx","edx","memory");
+	alternative_io("call *%3", ASM_NOP2, X86_FEATURE_SYNC_RDTSC,
+			"=a" (eax) , "D" (1) , "m" (fn));
 	rdtscll(ret);
 	return ret;
 }
 
+/* Inside a vsyscall, we cannot call paravirt functions. (like rdtsc
+ * and cpuid). For the host, use this function instead */
+static __always_inline cycles_t vget_cycles_sync(void)
+{
+	unsigned long ret;
+	unsigned eax;
+	/* Don't do an additional sync on CPUs where we know
+	   RDTSC is already synchronous. */
+	alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC,
+			  "=a" (eax), "0" (1) : "ebx","ecx","edx","memory");
+
+	asm volatile("rdtsc" : "=A" (ret));
+	return ret;
+}
 extern unsigned int cpu_khz;
 
 extern int read_current_timer(unsigned long *timer_value);

--

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [RFC/PATCH PV_OPS X86_64 14/17] paravirt_ops - vsyscall
       [not found] <20070308054422.820010000@redhat.com>
                   ` (12 preceding siblings ...)
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 13/17] paravirt_ops - time updates Steven Rostedt
@ 2007-03-08  6:02 ` Steven Rostedt
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 15/17] paravirt_op - kconfig Steven Rostedt
                   ` (2 subsequent siblings)
  16 siblings, 0 replies; 19+ messages in thread
From: Steven Rostedt @ 2007-03-08  6:02 UTC (permalink / raw)
  To: virtualization; +Cc: Chris Wright, Glauber de Oliveira Costa

plain text document attachment (xx-paravirt-vsyscall.patch)
vsyscall interface updates for paravirt ops.

Signed-off-by: Steven Rostedt srostedt@redhat.com
Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>


Index: clean-start/arch/x86_64/kernel/vsyscall.c
===================================================================
--- clean-start.orig/arch/x86_64/kernel/vsyscall.c
+++ clean-start/arch/x86_64/kernel/vsyscall.c
@@ -73,7 +73,7 @@ static __always_inline void do_vgettimeo
 		usec = __xtime.tv_nsec / 1000;
 
 		if (__vxtime.mode != VXTIME_HPET) {
-			t = get_cycles_sync();
+			t = vget_cycles_sync();
 			if (t < __vxtime.last_tsc)
 				t = __vxtime.last_tsc;
 			usec += ((t - __vxtime.last_tsc) *
@@ -147,8 +147,8 @@ time_t __vsyscall(1) vtime(time_t *t)
 long __vsyscall(2)
 vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
 {
-	unsigned int dummy, p;
-	unsigned long j = 0;
+	unsigned int p;
+	unsigned long dummy, j = 0;
 
 	/* Fast cache - only recompute value once per jiffies and avoid
 	   relatively costly rdtscp/cpuid otherwise.
@@ -162,7 +162,8 @@ vgetcpu(unsigned *cpu, unsigned *node, s
 		p = tcache->blob[1];
 	} else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
 		/* Load per CPU data from RDTSCP */
-		rdtscp(dummy, dummy, p);
+		/* rdtscp() cannot be called due to the paravirt indirection */
+		asm("rdtscp" : "=A" (dummy), "=c" (p));
 	} else {
 		/* Load per CPU data from GDT */
 		asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
@@ -256,7 +257,11 @@ static void __cpuinit vsyscall_set_cpu(i
 	node = cpu_to_node[cpu];
 #endif
 	if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP))
-		write_rdtscp_aux((node << 12) | cpu);
+		/* This is write_rdtscp_aux. It cannot be called directly
+		 * due to the paravirt indirection */
+		asm("wrmsr"  :  /* no output */
+			     :  "d"(0),
+				"a" ((node << 12) | cpu), "c" (0xc0000103));
 
 	/* Store cpu number in limit so that it can be loaded quickly
 	   in user space in vgetcpu.
@@ -285,8 +290,12 @@ cpu_vsyscall_notifier(struct notifier_bl
 
 static void __init map_vsyscall(void)
 {
+#ifndef CONFIG_PARAVIRT
 	extern char __vsyscall_0;
 	unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
+#else
+	unsigned long physaddr_page0 = __pa_symbol(paravirt_ops.vsyscall_page);
+#endif
 
 	/* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */
 	__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
@@ -299,7 +308,14 @@ static int __init vsyscall_init(void)
 	BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
 	BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
 	BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
-	map_vsyscall();
+#ifdef CONFIG_PARAVIRT
+	if (paravirt_ops.vsyscall_page)
+#endif
+		map_vsyscall();
+#ifdef CONFIG_PARAVIRT
+	else
+		__sysctl_vsyscall = 0;
+#endif
 #ifdef CONFIG_SYSCTL
 	register_sysctl_table(kernel_root_table2, 0);
 #endif

--

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [RFC/PATCH PV_OPS X86_64 15/17] paravirt_op - kconfig
       [not found] <20070308054422.820010000@redhat.com>
                   ` (13 preceding siblings ...)
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 14/17] paravirt_ops - vsyscall Steven Rostedt
@ 2007-03-08  6:02 ` Steven Rostedt
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 16/17] paravirt_ops - touch ups Steven Rostedt
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 17/17] paravirt_ops - pda entry Steven Rostedt
  16 siblings, 0 replies; 19+ messages in thread
From: Steven Rostedt @ 2007-03-08  6:02 UTC (permalink / raw)
  To: virtualization; +Cc: Chris Wright, Glauber de Oliveira Costa

plain text document attachment (xx-paravirt-kconfig.patch)
Add the config options for paravirt_ops and x86_64

Signed-off-by: Steven Rostedt srostedt@redhat.com
Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>


Index: clean-start/arch/x86_64/Kconfig.debug
===================================================================
--- clean-start.orig/arch/x86_64/Kconfig.debug
+++ clean-start/arch/x86_64/Kconfig.debug
@@ -55,6 +55,17 @@ config DEBUG_STACK_USAGE
 
 	  This option will slow down process creation somewhat.
 
+config DEBUG_PARAVIRT
+       bool "Enable some paravirtualization debugging"
+       default y
+       depends on PARAVIRT && DEBUG_KERNEL
+       help
+         Currently deliberately clobbers regs which are allowed to be
+         clobbered in inlined paravirt hooks, even in native mode.
+         If turning this off solves a problem, then DISABLE_INTERRUPTS() or
+         ENABLE_INTERRUPTS() is lying about what registers can be clobbered.
+
+
 #config X86_REMOTE_DEBUG
 #       bool "kgdb debugging stub"
 
Index: clean-start/arch/x86_64/Kconfig
===================================================================
--- clean-start.orig/arch/x86_64/Kconfig
+++ clean-start/arch/x86_64/Kconfig
@@ -349,6 +349,18 @@ config NODES_SHIFT
 
 # Dummy CONFIG option to select ACPI_NUMA from drivers/acpi/Kconfig.
 
+config PARAVIRT
+       bool "Paravirtualization support (EXPERIMENTAL)"
+       depends on EXPERIMENTAL
+       help
+         Paravirtualization is a way of running multiple instances of
+         Linux on the same machine, under a hypervisor.  This option
+         changes the kernel so it can modify itself when it is run
+         under a hypervisor, improving performance significantly.
+         However, when run without a hypervisor the kernel is
+         theoretically slower.  If in doubt, say N.
+
+
 config X86_64_ACPI_NUMA
        bool "ACPI NUMA detection"
        depends on NUMA

--

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [RFC/PATCH PV_OPS X86_64 16/17] paravirt_ops - touch ups
       [not found] <20070308054422.820010000@redhat.com>
                   ` (14 preceding siblings ...)
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 15/17] paravirt_op - kconfig Steven Rostedt
@ 2007-03-08  6:02 ` Steven Rostedt
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 17/17] paravirt_ops - pda entry Steven Rostedt
  16 siblings, 0 replies; 19+ messages in thread
From: Steven Rostedt @ 2007-03-08  6:02 UTC (permalink / raw)
  To: virtualization; +Cc: Chris Wright, Glauber de Oliveira Costa

plain text document attachment (xx-paravirt-touchups.patch)
Some miscellaneous clean ups.

Signed-off-by: Steven Rostedt srostedt@redhat.com
Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>


Index: clean-start/arch/x86_64/ia32/syscall32.c
===================================================================
--- clean-start.orig/arch/x86_64/ia32/syscall32.c
+++ clean-start/arch/x86_64/ia32/syscall32.c
@@ -119,5 +119,5 @@ void syscall32_cpu_init(void)
 	checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
 	checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
 
-	wrmsrl(MSR_CSTAR, ia32_cstar_target);
+	wrmsrl(MSR_CSTAR, (u64)ia32_cstar_target);
 }
Index: clean-start/arch/x86_64/mm/pageattr.c
===================================================================
--- clean-start.orig/arch/x86_64/mm/pageattr.c
+++ clean-start/arch/x86_64/mm/pageattr.c
@@ -81,7 +81,7 @@ static void flush_kernel_map(void *arg)
 		void *adr = page_address(pg);
 		if (cpu_has_clflush)
 			cache_flush_page(adr);
-		__flush_tlb_one(adr);
+		__flush_tlb_one((u64)adr);
 	}
 }
 
Index: clean-start/include/linux/irqflags.h
===================================================================
--- clean-start.orig/include/linux/irqflags.h
+++ clean-start/include/linux/irqflags.h
@@ -74,11 +74,11 @@
 #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
 
 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
-static inline void safe_halt(void)
-{
-	trace_hardirqs_on();
-	raw_safe_halt();
-}
+#define safe_halt()						\
+	do {							\
+		trace_hardirqs_on();				\
+		raw_safe_halt();				\
+	} while (0)
 
 #define local_save_flags(flags)		raw_local_save_flags(flags)
 

--

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [RFC/PATCH PV_OPS X86_64 17/17] paravirt_ops - pda entry
       [not found] <20070308054422.820010000@redhat.com>
                   ` (15 preceding siblings ...)
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 16/17] paravirt_ops - touch ups Steven Rostedt
@ 2007-03-08  6:02 ` Steven Rostedt
  16 siblings, 0 replies; 19+ messages in thread
From: Steven Rostedt @ 2007-03-08  6:02 UTC (permalink / raw)
  To: virtualization; +Cc: Chris Wright, Glauber de Oliveira Costa

plain text document attachment (xx-paravirt-pda.patch)
We don't actually use this. But we started to. This patch adds a 
vcpu entry into the PDA.  This can come in handy, but we are not
sure if we want it.  This is why it's in its own little patch.

Signed-off-by: Steven Rostedt srostedt@redhat.com
Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>


Index: work-nopv/arch/x86_64/kernel/asm-offsets.c
===================================================================
--- work-nopv.orig/arch/x86_64/kernel/asm-offsets.c
+++ work-nopv/arch/x86_64/kernel/asm-offsets.c
@@ -48,6 +48,9 @@ int main(void)
 	ENTRY(cpunumber);
 	ENTRY(irqstackptr);
 	ENTRY(data_offset);
+	/* XXX: this should probably be a paravirt_ops stub, to be filled in
+	 * with the hypervisor code */
+	ENTRY(vcpu);
 	BLANK();
 #undef ENTRY
 #ifdef CONFIG_IA32_EMULATION
Index: clean-start/include/asm-x86_64/pda.h
===================================================================
--- clean-start.orig/include/asm-x86_64/pda.h
+++ clean-start/include/asm-x86_64/pda.h
@@ -29,6 +29,7 @@ struct x8664_pda {
 	short isidle;
 	struct mm_struct *active_mm;
 	unsigned apic_timer_irqs;
+	void *vcpu;
 } ____cacheline_aligned_in_smp;
 
 extern struct x8664_pda *_cpu_pda[];

--

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [RFC/PATCH PV_OPS X86_64 05/17] pravirt_ops - segments
  2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 05/17] pravirt_ops - segments Steven Rostedt
@ 2007-03-08  6:49   ` Chris Wright
  2007-03-08 13:06     ` Steven Rostedt
  0 siblings, 1 reply; 19+ messages in thread
From: Chris Wright @ 2007-03-08  6:49 UTC (permalink / raw)
  To: Steven Rostedt; +Cc: Chris Wright, virtualization, Glauber de Oliveira Costa

* Steven Rostedt (rostedt@goodmis.org) wrote:
> --- clean-start.orig/include/asm-x86_64/segment.h
> +++ clean-start/include/asm-x86_64/segment.h
> @@ -37,8 +37,14 @@
>  #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
>  #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
>  
> +#define __HV_CS 0x80  /* 16*8 */
> +#define __HV_DS 0x88  /* 17*8 */
> +
> +#define GDT_ENTRY_HV_CS 16
> +#define GDT_ENTRY_HV_DS 17
> +

Hmm, this one is not technically needed for pv_ops, looks like lguest
bleeding in ;-)

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [RFC/PATCH PV_OPS X86_64 05/17] pravirt_ops - segments
  2007-03-08  6:49   ` Chris Wright
@ 2007-03-08 13:06     ` Steven Rostedt
  0 siblings, 0 replies; 19+ messages in thread
From: Steven Rostedt @ 2007-03-08 13:06 UTC (permalink / raw)
  To: Chris Wright; +Cc: virtualization, Glauber de Oliveira Costa

On Wed, 2007-03-07 at 22:49 -0800, Chris Wright wrote:
> * Steven Rostedt (rostedt@goodmis.org) wrote:
> > --- clean-start.orig/include/asm-x86_64/segment.h
> > +++ clean-start/include/asm-x86_64/segment.h
> > @@ -37,8 +37,14 @@
> >  #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
> >  #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
> >  
> > +#define __HV_CS 0x80  /* 16*8 */
> > +#define __HV_DS 0x88  /* 17*8 */
> > +
> > +#define GDT_ENTRY_HV_CS 16
> > +#define GDT_ENTRY_HV_DS 17
> > +
> 
> Hmm, this one is not technically needed for pv_ops, looks like lguest
> bleeding in ;-)

I thought this could be HV agnostic, so I slipped it in.  But that's
questionable, and hence, its own separate patch.

-- Steve

^ permalink raw reply	[flat|nested] 19+ messages in thread

end of thread, other threads:[~2007-03-08 13:06 UTC | newest]

Thread overview: 19+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <20070308054422.820010000@redhat.com>
2007-03-08  6:01 ` [RFC/PATCH PV_OPS X86_64 01/17] paravirt_ops - core changes Steven Rostedt
2007-03-08  6:01 ` [RFC/PATCH PV_OPS X86_64 02/17] paravirt_ops - msr Steven Rostedt
2007-03-08  6:01 ` [RFC/PATCH PV_OPS X86_64 03/17] paravirt_ops - system routines Steven Rostedt
2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 04/17] pavarvirt_ops - apci header updates Steven Rostedt
2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 05/17] pravirt_ops - segments Steven Rostedt
2007-03-08  6:49   ` Chris Wright
2007-03-08 13:06     ` Steven Rostedt
2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 06/17] paravirt_op - miscellaneous updates Steven Rostedt
2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 07/17] paravirt_ops - descriptor changes Steven Rostedt
2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 08/17] paravirt_ops - memory managment Steven Rostedt
2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 09/17] paravirt_ops - bios changes Steven Rostedt
2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 10/17] paravirt_ops - boot changes Steven Rostedt
2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 11/17] paravirt_ops - asm-offset updates Steven Rostedt
2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 12/17] paravirt_ops - interrupt/exception changes Steven Rostedt
2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 13/17] paravirt_ops - time updates Steven Rostedt
2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 14/17] paravirt_ops - vsyscall Steven Rostedt
2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 15/17] paravirt_op - kconfig Steven Rostedt
2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 16/17] paravirt_ops - touch ups Steven Rostedt
2007-03-08  6:02 ` [RFC/PATCH PV_OPS X86_64 17/17] paravirt_ops - pda entry Steven Rostedt

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).