From: David Mosberger <davidm@hpl.hp.com>
To: linux-ia64@vger.kernel.org
Subject: [Linux-ia64] kernel update (relative to v2.4.0-test1)
Date: Thu, 01 Jun 2000 08:54:13 +0000 [thread overview]
Message-ID: <marc-linux-ia64-105590678205111@msgid-missing> (raw)
Here is a quick kernel update. I'm not sure this is distribution
material, but it should work better than last weeks patch. Some
of the improvements:
- "branch long" emulation by Stephan Zeisset is finally in
(sorry about the delay...).
- added Stephane Eranian's /proc/palinfo "driver"; very nice
to find out details on your CPU...
- SMP should work again
- ptrace interface should work again (at least strace works...)
- the unwind support now has a real cache and should be
SMP-safe
- SMP-related CPU initialization clean up; the bootstrap
processor now does an identify_cpu() early in the
bootprocess (same as in the UP case); this is necessary
because we now rely on PAL info stored in cpu_data[] to
bootstrap the system
- some more ia-32 signal fixes from Don
- unaligned accesses in big-endian mode now result in SIGBUS (instead
of sliently bogus data) (Stephane Eranian)
- updated for 2.4.0-test1
- remove Itanium dependencies from pgtable.h again (there is no desire
or need to make the kernel implementation specific)
- speculative accesses that result in an alternate d-tlb fault are now
always NaTed
- make simscsi driver SMP-safe
The full diff is available at
ftp://ftp.kernel.org/pub/linux/kernel/ports/ia64/linux-2.4.0-test1-ia64-000531.diff*
as usual.
--david
diff -urN linux-davidm/arch/ia64/config.in linux-2.4.0-test1-lia/arch/ia64/config.in
--- linux-davidm/arch/ia64/config.in Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/arch/ia64/config.in Thu Jun 1 00:59:33 2000
@@ -4,7 +4,6 @@
comment 'General setup'
define_bool CONFIG_IA64 y
-define_bool CONFIG_ITANIUM y # easy choice for now... ;-)
define_bool CONFIG_ISA n
define_bool CONFIG_SBUS n
@@ -22,6 +21,8 @@
64KB CONFIG_IA64_PAGE_SIZE_64KB" 16KB
if [ "$CONFIG_IA64_DIG" = "y" ]; then
+ define_bool CONFIG_ITANIUM y
+ define_bool CONFIG_IA64_BRL_EMU y
bool ' Enable Itanium A-step specific code' CONFIG_ITANIUM_ASTEP_SPECIFIC
bool ' Enable Itanium A1-step specific code' CONFIG_ITANIUM_A1_SPECIFIC
bool ' Enable use of global TLB purge instruction (ptc.g)' CONFIG_ITANIUM_PTCG
@@ -44,6 +45,7 @@
bool 'SMP support' CONFIG_SMP
bool 'Performance monitor support' CONFIG_PERFMON
+bool '/proc/palinfo support' CONFIG_IA64_PALINFO
bool 'Networking support' CONFIG_NET
bool 'System V IPC' CONFIG_SYSVIPC
diff -urN linux-davidm/arch/ia64/hp/hpsim_irq.c linux-2.4.0-test1-lia/arch/ia64/hp/hpsim_irq.c
--- linux-davidm/arch/ia64/hp/hpsim_irq.c Fri Mar 10 15:24:02 2000
+++ linux-2.4.0-test1-lia/arch/ia64/hp/hpsim_irq.c Thu Jun 1 01:00:14 2000
@@ -5,7 +5,8 @@
* Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
*/
-#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/irq.h>
static unsigned int
diff -urN linux-davidm/arch/ia64/ia32/ia32_signal.c linux-2.4.0-test1-lia/arch/ia64/ia32/ia32_signal.c
--- linux-davidm/arch/ia64/ia32/ia32_signal.c Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/arch/ia64/ia32/ia32_signal.c Thu Jun 1 01:00:27 2000
@@ -55,7 +55,7 @@
};
static int
-copy_siginfo_to_user32(siginfo_t32 *to, siginfo_t *from)
+copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from)
{
int err;
@@ -326,8 +326,8 @@
? current->exec_domain->signal_invmap[sig]
: sig),
&frame->sig);
- err |= __put_user(&frame->info, &frame->pinfo);
- err |= __put_user(&frame->uc, &frame->puc);
+ err |= __put_user((long)&frame->info, &frame->pinfo);
+ err |= __put_user((long)&frame->uc, &frame->puc);
err |= copy_siginfo_to_user32(&frame->info, info);
/* Create the ucontext. */
diff -urN linux-davidm/arch/ia64/kernel/Makefile linux-2.4.0-test1-lia/arch/ia64/kernel/Makefile
--- linux-davidm/arch/ia64/kernel/Makefile Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/arch/ia64/kernel/Makefile Thu Jun 1 01:00:52 2000
@@ -1,11 +1,6 @@
#
# Makefile for the linux kernel.
#
-# Note! Dependencies are done automagically by 'make dep', which also
-# removes any old dependencies. DON'T put your own dependencies here
-# unless it's something special (ie not a .c file).
-#
-# Note 2! The CFLAGS definitions are now in the main makefile...
.S.s:
$(CPP) $(AFLAGS) -o $*.s $<
@@ -24,6 +19,10 @@
O_OBJS += machvec.o
endif
+ifdef CONFIG_IA64_PALINFO
+O_OBJS += palinfo.o
+endif
+
ifdef CONFIG_PCI
O_OBJS += pci.o
endif
@@ -34,6 +33,10 @@
ifdef CONFIG_IA64_MCA
O_OBJS += mca.o mca_asm.o
+endif
+
+ifdef CONFIG_IA64_BRL_EMU
+O_OBJS += brl_emu.o
endif
clean::
diff -urN linux-davidm/arch/ia64/kernel/brl_emu.c linux-2.4.0-test1-lia/arch/ia64/kernel/brl_emu.c
--- linux-davidm/arch/ia64/kernel/brl_emu.c Wed Dec 31 16:00:00 1969
+++ linux-2.4.0-test1-lia/arch/ia64/kernel/brl_emu.c Thu Jun 1 01:01:03 2000
@@ -0,0 +1,220 @@
+/*
+ * Emulation of the "brl" instruction for IA64 processors that
+ * don't support it in hardware.
+ * Author: Stephan Zeisset, Intel Corp. <Stephan.Zeisset@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+
+extern char ia64_set_b1, ia64_set_b2, ia64_set_b3, ia64_set_b4, ia64_set_b5;
+
+struct illegal_op_return {
+ unsigned long fkt, arg1, arg2, arg3;
+};
+
+/*
+ * The unimplemented bits of a virtual address must be set
+ * to the value of the most significant implemented bit.
+ * unimpl_va_mask includes all unimplemented bits and
+ * the most significant implemented bit, so the result
+ * of an and operation with the mask must be all 0's
+ * or all 1's for the address to be valid.
+ */
+#define unimplemented_virtual_address(va) ( \
+ ((va) & my_cpu_data.unimpl_va_mask) != 0 && \
+ ((va) & my_cpu_data.unimpl_va_mask) != my_cpu_data.unimpl_va_mask \
+)
+
+/*
+ * The unimplemented bits of a physical address must be 0.
+ * unimpl_pa_mask includes all unimplemented bits, so the result
+ * of an and operation with the mask must be all 0's for the
+ * address to be valid.
+ */
+#define unimplemented_physical_address(pa) ( \
+ ((pa) & my_cpu_data.unimpl_pa_mask) != 0 \
+)
+
+/*
+ * Handle an illegal operation fault that was caused by an
+ * unimplemented "brl" instruction.
+ * If we are not successful (e.g because the illegal operation
+ * wasn't caused by a "brl" after all), we return -1.
+ * If we are successful, we return either 0 or the address
+ * of a "fixup" function for manipulating preserved register
+ * state.
+ */
+
+struct illegal_op_return
+ia64_emulate_brl (struct pt_regs *regs, unsigned long ar_ec)
+{
+ unsigned long bundle[2];
+ unsigned long opcode, btype, qp, offset;
+ unsigned long next_ip;
+ struct siginfo siginfo;
+ struct illegal_op_return rv;
+ int tmp_taken, unimplemented_address;
+
+ rv.fkt = (unsigned long) -1;
+
+ /*
+ * Decode the instruction bundle.
+ */
+
+ if (copy_from_user(bundle, (void *) (regs->cr_iip), sizeof(bundle)))
+ return rv;
+
+ next_ip = (unsigned long) regs->cr_iip + 16;
+
+ /* "brl" must be in slot 2. */
+ if (ia64_psr(regs)->ri != 1) return rv;
+
+ /* Must be "mlx" template */
+ if ((bundle[0] & 0x1e) != 0x4) return rv;
+
+ opcode = (bundle[1] >> 60);
+ btype = ((bundle[1] >> 29) & 0x7);
+ qp = ((bundle[1] >> 23) & 0x3f);
+ offset = ((bundle[1] & 0x0800000000000000L) << 4)
+ | ((bundle[1] & 0x00fffff000000000L) >> 32)
+ | ((bundle[1] & 0x00000000007fffffL) << 40)
+ | ((bundle[0] & 0xffff000000000000L) >> 24);
+
+ tmp_taken = regs->pr & (1L << qp);
+
+ switch(opcode) {
+
+ case 0xC:
+ /*
+ * Long Branch.
+ */
+ if (btype != 0) return rv;
+ rv.fkt = 0;
+ if (!(tmp_taken)) {
+ /*
+ * Qualifying predicate is 0.
+ * Skip instruction.
+ */
+ regs->cr_iip = next_ip;
+ ia64_psr(regs)->ri = 0;
+ return rv;
+ }
+ break;
+
+ case 0xD:
+ /*
+ * Long Call.
+ */
+ rv.fkt = 0;
+ if (!(tmp_taken)) {
+ /*
+ * Qualifying predicate is 0.
+ * Skip instruction.
+ */
+ regs->cr_iip = next_ip;
+ ia64_psr(regs)->ri = 0;
+ return rv;
+ }
+
+ /*
+ * BR[btype] = IP+16
+ */
+ switch(btype) {
+ case 0:
+ regs->b0 = next_ip;
+ break;
+ case 1:
+ rv.fkt = (unsigned long) &ia64_set_b1;
+ break;
+ case 2:
+ rv.fkt = (unsigned long) &ia64_set_b2;
+ break;
+ case 3:
+ rv.fkt = (unsigned long) &ia64_set_b3;
+ break;
+ case 4:
+ rv.fkt = (unsigned long) &ia64_set_b4;
+ break;
+ case 5:
+ rv.fkt = (unsigned long) &ia64_set_b5;
+ break;
+ case 6:
+ regs->b6 = next_ip;
+ break;
+ case 7:
+ regs->b7 = next_ip;
+ break;
+ }
+ rv.arg1 = next_ip;
+
+ /*
+ * AR[PFS].pfm = CFM
+ * AR[PFS].pec = AR[EC]
+ * AR[PFS].ppl = PSR.cpl
+ */
+ regs->ar_pfs = ((regs->cr_ifs & 0x3fffffffff)
+ | (ar_ec << 52)
+ | ((unsigned long) ia64_psr(regs)->cpl << 62));
+
+ /*
+ * CFM.sof -= CFM.sol
+ * CFM.sol = 0
+ * CFM.sor = 0
+ * CFM.rrb.gr = 0
+ * CFM.rrb.fr = 0
+ * CFM.rrb.pr = 0
+ */
+ regs->cr_ifs = ((regs->cr_ifs & 0xffffffc00000007f)
+ - ((regs->cr_ifs >> 7) & 0x7f));
+
+ break;
+
+ default:
+ /*
+ * Unknown opcode.
+ */
+ return rv;
+
+ }
+
+ regs->cr_iip += offset;
+ ia64_psr(regs)->ri = 0;
+
+ if (ia64_psr(regs)->it = 0)
+ unimplemented_address = unimplemented_physical_address(regs->cr_iip);
+ else
+ unimplemented_address = unimplemented_virtual_address(regs->cr_iip);
+
+ if (unimplemented_address) {
+ /*
+ * The target address contains unimplemented bits.
+ */
+ printk("Woah! Unimplemented Instruction Address Trap!\n");
+ siginfo.si_signo = SIGILL;
+ siginfo.si_errno = 0;
+ siginfo.si_code = ILL_BADIADDR;
+ force_sig_info(SIGILL, &siginfo, current);
+ } else if (ia64_psr(regs)->tb) {
+ /*
+ * Branch Tracing is enabled.
+ * Force a taken branch signal.
+ */
+ siginfo.si_signo = SIGTRAP;
+ siginfo.si_errno = 0;
+ siginfo.si_code = TRAP_BRANCH;
+ force_sig_info(SIGTRAP, &siginfo, current);
+ } else if (ia64_psr(regs)->ss) {
+ /*
+ * Single Step is enabled.
+ * Force a trace signal.
+ */
+ siginfo.si_signo = SIGTRAP;
+ siginfo.si_errno = 0;
+ siginfo.si_code = TRAP_TRACE;
+ force_sig_info(SIGTRAP, &siginfo, current);
+ }
+ return rv;
+}
diff -urN linux-davidm/arch/ia64/kernel/efi.c linux-2.4.0-test1-lia/arch/ia64/kernel/efi.c
--- linux-davidm/arch/ia64/kernel/efi.c Fri Apr 21 15:21:24 2000
+++ linux-2.4.0-test1-lia/arch/ia64/kernel/efi.c Thu Jun 1 01:01:16 2000
@@ -5,9 +5,9 @@
*
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 1999 Hewlett-Packard Co.
+ * Copyright (C) 1999-2000 Hewlett-Packard Co.
* Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 1999-2000 Stephane Eranian <eranian@hpl.hp.com>
*
* All EFI Runtime Services are not implemented yet as EFI only
* supports physical mode addressing on SoftSDV. This is to be fixed
@@ -22,6 +22,7 @@
#include <asm/efi.h>
#include <asm/io.h>
+#include <asm/pgtable.h>
#include <asm/processor.h>
#define EFI_DEBUG 0
@@ -207,6 +208,61 @@
}
}
+/*
+ * Look for the PAL_CODE region reported by EFI and maps it using an
+ * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor
+ * Abstraction Layer chapter 11 in ADAG
+ */
+static void
+map_pal_code (void)
+{
+ void *efi_map_start, *efi_map_end, *p;
+ efi_memory_desc_t *md;
+ u64 efi_desc_size;
+ int pal_code_count=0;
+ u64 mask, flags;
+ u64 vaddr;
+
+ efi_map_start = __va(ia64_boot_param.efi_memmap);
+ efi_map_end = efi_map_start + ia64_boot_param.efi_memmap_size;
+ efi_desc_size = ia64_boot_param.efi_memdesc_size;
+
+ for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+ md = p;
+ if (md->type != EFI_PAL_CODE) continue;
+
+ if (++pal_code_count > 1) {
+ printk(KERN_ERR "Too many EFI Pal Code memory ranges, dropped @ %lx\n",
+ md->phys_addr);
+ continue;
+ }
+ mask = ~((1 << _PAGE_SIZE_4M)-1); /* XXX should be dynamic? */
+ vaddr = PAGE_OFFSET + md->phys_addr;
+
+ printk(__FUNCTION__": mapping PAL code [0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
+ md->phys_addr, md->phys_addr + (md->num_pages << 12),
+ vaddr & mask, (vaddr & mask) + 4*1024*1024);
+
+ /*
+ * Cannot write to CRx with PSR.ic=1
+ */
+ ia64_clear_ic(flags);
+
+ /*
+ * ITR0/DTR0: used for kernel code/data
+ * ITR1/DTR1: used by HP simulator
+ * ITR2/DTR2: map PAL code
+ * ITR3/DTR3: used to map PAL calls buffer
+ */
+ ia64_itr(0x1, 2, vaddr & mask,
+ pte_val(mk_pte_phys(md->phys_addr,
+ __pgprot(__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RX))),
+ _PAGE_SIZE_4M);
+ local_irq_restore(flags);
+ ia64_srlz_i ();
+ }
+}
+
void __init
efi_init (void)
{
@@ -291,6 +347,8 @@
}
}
#endif
+
+ map_pal_code();
}
void
diff -urN linux-davidm/arch/ia64/kernel/efi_stub.S linux-2.4.0-test1-lia/arch/ia64/kernel/efi_stub.S
--- linux-davidm/arch/ia64/kernel/efi_stub.S Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/arch/ia64/kernel/efi_stub.S Thu Jun 1 01:01:29 2000
@@ -41,52 +41,6 @@
.text
/*
- * Switch execution mode from virtual to physical or vice versa.
- *
- * Inputs:
- * r16 = new psr to establish
- */
-ENTRY(switch_mode)
- {
- alloc r2=ar.pfs,0,0,0,0
- rsm psr.i | psr.ic // disable interrupts and interrupt collection
- mov r15=ip
- }
- ;;
- {
- flushrs // must be first insn in group
- srlz.i
- shr.u r19=r15,61 // r19 <- top 3 bits of current IP
- }
- ;;
- mov cr.ipsr=r16 // set new PSR
- add r3\x1f-switch_mode,r15
- xor r15=0x7,r19 // flip the region bits
-
- mov r17=ar.bsp
- mov r14=rp // get return address into a general register
-
- // switch RSE backing store:
- ;;
- dep r17=r15,r17,61,3 // make ar.bsp physical or virtual
- mov r18=ar.rnat // save ar.rnat
- ;;
- mov ar.bspstore=r17 // this steps on ar.rnat
- dep r3=r15,r3,61,3 // make rfi return address physical or virtual
- ;;
- mov cr.iip=r3
- mov cr.ifs=r0
- dep sp=r15,sp,61,3 // make stack pointer physical or virtual
- ;;
- mov ar.rnat=r18 // restore ar.rnat
- dep r14=r15,r14,61,3 // make function return address physical or virtual
- rfi // must be last insn in group
- ;;
-1: mov rp=r14
- br.ret.sptk.few rp
-END(switch_mode)
-
-/*
* Inputs:
* in0 = address of function descriptor of EFI routine to call
* in1..in7 = arguments to routine
@@ -121,7 +75,7 @@
;;
andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared
mov out3=in4
- br.call.sptk.few rp=switch_mode
+ br.call.sptk.few rp=ia64_switch_mode
.ret0:
mov out4=in5
mov out5=in6
@@ -130,7 +84,7 @@
.ret1:
mov ar.rsc=r0 // put RSE in enforced lazy, LE mode
mov r16=loc3
- br.call.sptk.few rp=switch_mode // return to virtual mode
+ br.call.sptk.few rp=ia64_switch_mode // return to virtual mode
.ret2:
mov ar.rsc=loc4 // restore RSE configuration
mov ar.pfs=loc1
diff -urN linux-davidm/arch/ia64/kernel/entry.S linux-2.4.0-test1-lia/arch/ia64/kernel/entry.S
--- linux-davidm/arch/ia64/kernel/entry.S Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/arch/ia64/kernel/entry.S Thu Jun 1 01:02:14 2000
@@ -375,8 +375,6 @@
alloc loc1=ar.pfs,8,3,0,0
;; // WAW on CFM at the br.call
mov loc0=rp
- .fframe IA64_SWITCH_STACK_SIZE
- adds sp=-IA64_SWITCH_STACK_SIZE,sp
br.call.sptk.many rp=save_switch_stack_with_current_frame // must preserve b6!!
.ret2: mov loc2¶
br.call.sptk.few rp=syscall_trace
@@ -532,7 +530,7 @@
2:
// check & deliver pending signals:
(p2) br.call.spnt.few rp=handle_signal_delivery
-#if defined(CONFIG_SMP) || defined(CONFIG_IA64_SOFTSDV_HACKS)
+#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_IA64_SOFTSDV_HACKS)
// Check for lost ticks
rsm psr.i
mov r2 = ar.itc
@@ -747,7 +745,7 @@
#endif /* CONFIG_SMP */
-#if defined(CONFIG_SMP) || defined(CONFIG_IA64_SOFTSDV_HACKS)
+#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_IA64_SOFTSDV_HACKS)
ENTRY(invoke_ia64_reset_itm)
UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8))
@@ -762,7 +760,7 @@
br.ret.sptk.many rp
END(invoke_ia64_reset_itm)
-#endif /* defined(CONFIG_SMP) || defined(CONFIG_IA64_SOFTSDV_HACKS) */
+#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC || CONFIG_IA64_SOFTSDV_HACKS */
/*
* Invoke do_softirq() while preserving in0-in7, which may be needed
@@ -847,7 +845,7 @@
setup_switch_stack:
UNW(.prologue)
- mov r16=loc0
+ mov r16=loc1
DO_SAVE_SWITCH_STACK
UNW(.body)
br.cond.sptk.many back_from_setup_switch_stack
diff -urN linux-davidm/arch/ia64/kernel/entry.h linux-2.4.0-test1-lia/arch/ia64/kernel/entry.h
--- linux-davidm/arch/ia64/kernel/entry.h Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/arch/ia64/kernel/entry.h Thu Jun 1 01:02:23 2000
@@ -11,8 +11,8 @@
#define PT_REGS_UNWIND_INFO \
UNW(.prologue); \
- UNW(.unwabi @svr4, 105); \
- UNW(.fframe IA64_PT_REGS_SIZE); \
+ UNW(.unwabi @svr4, 'i'); \
+ UNW(.fframe IA64_PT_REGS_SIZE+16); \
UNW(.spillsp rp, PT(CR_IIP)); \
UNW(.spillsp ar.pfs, PT(CR_IFS)); \
UNW(.spillsp ar.unat, PT(AR_UNAT)); \
diff -urN linux-davidm/arch/ia64/kernel/fw-emu.c linux-2.4.0-test1-lia/arch/ia64/kernel/fw-emu.c
--- linux-davidm/arch/ia64/kernel/fw-emu.c Fri Mar 10 15:24:02 2000
+++ linux-2.4.0-test1-lia/arch/ia64/kernel/fw-emu.c Thu Jun 1 01:02:33 2000
@@ -124,7 +124,18 @@
.proc pal_emulator_static
pal_emulator_static:
mov r8=-1
- cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */
+
+ mov r9%6
+ ;;
+ cmp.gtu p6,p7=r9,r28 /* r28 <= 255? */
+(p6) br.cond.sptk.few static
+ ;;
+ mov r9Q2
+ ;;
+ cmp.gtu p6,p7=r9,r28
+(p6) br.cond.sptk.few stacked
+ ;;
+static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */
(p7) br.cond.sptk.few 1f
;;
mov r8=0 /* status = 0 */
@@ -157,7 +168,12 @@
;;
mov ar.lc=r9
mov r8=r0
-1: br.cond.sptk.few rp
+1:
+ br.cond.sptk.few rp
+
+stacked:
+ br.ret.sptk.few rp
+
.endp pal_emulator_static\n");
/* Macro to emulate SAL call using legacy IN and OUT calls to CF8, CFC etc.. */
diff -urN linux-davidm/arch/ia64/kernel/head.S linux-2.4.0-test1-lia/arch/ia64/kernel/head.S
--- linux-davidm/arch/ia64/kernel/head.S Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/arch/ia64/kernel/head.S Thu Jun 1 01:02:47 2000
@@ -633,3 +633,73 @@
mov f127ð
br.ret.sptk.few rp
END(__ia64_init_fpu)
+
+/*
+ * Switch execution mode from virtual to physical or vice versa.
+ *
+ * Inputs:
+ * r16 = new psr to establish
+ *
+ * Note: RSE must already be in enforced lazy mode
+ */
+GLOBAL_ENTRY(ia64_switch_mode)
+ {
+ alloc r2=ar.pfs,0,0,0,0
+ rsm psr.i | psr.ic // disable interrupts and interrupt collection
+ mov r15=ip
+ }
+ ;;
+ {
+ flushrs // must be first insn in group
+ srlz.i
+ shr.u r19=r15,61 // r19 <- top 3 bits of current IP
+ }
+ ;;
+ mov cr.ipsr=r16 // set new PSR
+ add r3\x1f-ia64_switch_mode,r15
+ xor r15=0x7,r19 // flip the region bits
+
+ mov r17=ar.bsp
+ mov r14=rp // get return address into a general register
+
+ // switch RSE backing store:
+ ;;
+ dep r17=r15,r17,61,3 // make ar.bsp physical or virtual
+ mov r18=ar.rnat // save ar.rnat
+ ;;
+ mov ar.bspstore=r17 // this steps on ar.rnat
+ dep r3=r15,r3,61,3 // make rfi return address physical or virtual
+ ;;
+ mov cr.iip=r3
+ mov cr.ifs=r0
+ dep sp=r15,sp,61,3 // make stack pointer physical or virtual
+ ;;
+ mov ar.rnat=r18 // restore ar.rnat
+ dep r14=r15,r14,61,3 // make function return address physical or virtual
+ rfi // must be last insn in group
+ ;;
+1: mov rp=r14
+ br.ret.sptk.few rp
+END(ia64_switch_mode)
+
+#ifdef CONFIG_IA64_BRL_EMU
+
+/*
+ * Assembly routines used by brl_emu.c to set preserved register state.
+ */
+
+#define SET_REG(reg) \
+ GLOBAL_ENTRY(ia64_set_##reg); \
+ alloc r16=ar.pfs,1,0,0,0; \
+ mov reg=r32; \
+ ;; \
+ br.ret.sptk rp; \
+ END(ia64_set_##reg)
+
+SET_REG(b1);
+SET_REG(b2);
+SET_REG(b3);
+SET_REG(b4);
+SET_REG(b5);
+
+#endif /* CONFIG_IA64_BRL_EMU */
diff -urN linux-davidm/arch/ia64/kernel/ivt.S linux-2.4.0-test1-lia/arch/ia64/kernel/ivt.S
--- linux-davidm/arch/ia64/kernel/ivt.S Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/arch/ia64/kernel/ivt.S Thu Jun 1 01:03:38 2000
@@ -316,7 +316,7 @@
movl r17=__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RX
;;
shr.u r18=r16,57 // move address bit 61 to bit 4
- dep r16=0,r16,IA64_PHYS_BITS,(64-IA64_PHYS_BITS) // clear ed, resvd, and unimpl. phys bits
+ dep r16=0,r16,IA64_MAX_PHYS_BITS,(64-IA64_MAX_PHYS_BITS) // clear ed & reserved bits
;;
andcm r18=0x10,r18 // bit 4=~address-bit(61)
dep r16=r17,r16,0,12 // insert PTE control bits into r16
@@ -331,18 +331,26 @@
// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
mov r16=cr.ifa // get address that caused the TLB miss
movl r17=__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RW
+ mov r20=cr.isr
+ mov r21=cr.ipsr
+ mov r19=pr
;;
+ tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
shr.u r18=r16,57 // move address bit 61 to bit 4
- dep r16=0,r16,IA64_PHYS_BITS,(64-IA64_PHYS_BITS) // clear ed, resvd, and unimpl. phys bits
+ dep r16=0,r16,IA64_MAX_PHYS_BITS,(64-IA64_MAX_PHYS_BITS) // clear ed & reserved bits
;;
andcm r18=0x10,r18 // bit 4=~address-bit(61)
dep r16=r17,r16,0,12 // insert PTE control bits into r16
;;
or r16=r16,r18 // set bit 4 (uncached) if the access was to region 6
+(p6) mov cr.ipsr=r21
;;
- itc.d r16 // insert the TLB entry
+(p7) itc.d r16 // insert the TLB entry
+ mov pr=r19,-1
rfi
+ ;;
+
//-----------------------------------------------------------------------------------
// call do_page_fault (predicates are in r31, psr.dt is off, r16 is faulting address)
page_fault:
@@ -647,6 +655,50 @@
// 0x3c00 Entry 15 (size 64 bundles) Reserved
FAULT(15)
+//
+// Squatting in this space ...
+//
+// This special case dispatcher for illegal operation faults
+// allows preserved registers to be modified through a
+// callback function (asm only) that is handed back from
+// the fault handler in r8. Up to three arguments can be
+// passed to the callback function by returning an aggregate
+// with the callback as its first element, followed by the
+// arguments.
+//
+dispatch_illegal_op_fault:
+ SAVE_MIN_WITH_COVER
+ //
+ // The "alloc" can cause a mandatory store which could lead to
+ // an "Alt DTLB" fault which we can handle only if psr.ic is on.
+ //
+ ssm psr.ic | psr.dt
+ ;;
+ srlz.i // guarantee that interrupt collection is enabled
+ ;;
+(p15) ssm psr.i // restore psr.i
+ adds r3=8,r2 // set up second base pointer for SAVE_REST
+ ;;
+ alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
+ mov out0=ar.ec
+ ;;
+ SAVE_REST
+ ;;
+ br.call.sptk.few rp=ia64_illegal_op_fault
+ ;;
+ alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
+ mov out0=r9
+ mov out1=r10
+ mov out2=r11
+ movl r15=ia64_leave_kernel
+ ;;
+ mov rp=r15
+ mov b6=r8
+ ;;
+ cmp.ne p6,p0=0,r8
+(p6) br.call.dpnt b6¶ // call returns to ia64_leave_kernel
+ br.sptk ia64_leave_kernel
+
.align 1024
/////////////////////////////////////////////////////////////////////////////////////////
// 0x4000 Entry 16 (size 64 bundles) Reserved
@@ -909,7 +961,16 @@
.align 256
/////////////////////////////////////////////////////////////////////////////////////////
// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
- FAULT(24)
+ mov r16=cr.isr
+ mov r31=pr
+ rsm psr.dt // avoid nested faults due to TLB misses...
+ ;;
+ srlz.d // ensure everyone knows psr.dt is off...
+ cmp4.eq p6,p0=0,r16
+(p6) br.sptk dispatch_illegal_op_fault
+ ;;
+ mov r19$ // fault number
+ br.cond.sptk.many dispatch_to_fault_handler
.align 256
/////////////////////////////////////////////////////////////////////////////////////////
diff -urN linux-davidm/arch/ia64/kernel/minstate.h linux-2.4.0-test1-lia/arch/ia64/kernel/minstate.h
--- linux-davidm/arch/ia64/kernel/minstate.h Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/arch/ia64/kernel/minstate.h Thu Jun 1 01:03:59 2000
@@ -69,7 +69,7 @@
(p7) mov rARBSPSTORE=ar.bspstore; /* save ar.bspstore */ \
(p7) dep rKRBS=-1,rKRBS,61,3; /* compute kernel virtual addr of RBS */ \
;; \
-(pKern) addl r1\x16-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
+(pKern) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
(p7) mov ar.bspstore=rKRBS; /* switch to kernel RBS */ \
;; \
(p7) mov r18=ar.bsp; \
diff -urN linux-davidm/arch/ia64/kernel/pal.S linux-2.4.0-test1-lia/arch/ia64/kernel/pal.S
--- linux-davidm/arch/ia64/kernel/pal.S Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/arch/ia64/kernel/pal.S Thu Jun 1 01:04:13 2000
@@ -5,9 +5,14 @@
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999-2000 David Mosberger <davidm@hpl.hp.com>
+ * Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
+ *
+ * 05/22/2000 eranian Added support for stacked register calls
+ * 05/24/2000 eranian Added support for physical mode static calls
*/
#include <asm/asmmacro.h>
+#include <asm/processor.h>
.text
.psr abi64
@@ -83,3 +88,108 @@
srlz.d // seralize restoration of psr.l
br.ret.sptk.few b0
END(ia64_pal_call_static)
+
+/*
+ * Make a PAL call using the stacked registers calling convention.
+ *
+ * Inputs:
+ * in0 Index of PAL service
+ * in2 - in3 Remaning PAL arguments
+ */
+GLOBAL_ENTRY(ia64_pal_call_stacked)
+ UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5))
+ alloc loc1 = ar.pfs,5,4,87,0
+ movl loc2 = pal_entry_point
+
+ mov r28 = in0 // Index MUST be copied to r28
+ mov out0 = in0 // AND in0 of PAL function
+ mov loc0 = rp
+ .body
+ ;;
+ ld8 loc2 = [loc2] // loc2 <- entry point
+ mov out1 = in1
+ mov out2 = in2
+ mov out3 = in3
+ mov loc3 = psr
+ ;;
+ rsm psr.i
+ mov b7 = loc2
+ ;;
+ br.call.sptk.many rp· // now make the call
+.ret2:
+ mov psr.l = loc3
+ mov ar.pfs = loc1
+ mov rp = loc0
+ ;;
+ srlz.d // serialize restoration of psr.l
+ br.ret.sptk.few b0
+END(ia64_pal_call_stacked)
+
+/*
+ * Make a physical mode PAL call using the static registers calling convention.
+ *
+ * Inputs:
+ * in0 Index of PAL service
+ * in2 - in3 Remaning PAL arguments
+ *
+ * PSR_DB, PSR_LP, PSR_TB, PSR_ID, PSR_DA are never set by the kernel.
+ * So we don't need to clear them.
+ */
+#define PAL_PSR_BITS_TO_CLEAR \
+ (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | \
+ IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
+ IA64_PSR_DFL | IA64_PSR_DFH)
+
+#define PAL_PSR_BITS_TO_SET \
+ (IA64_PSR_BN)
+
+
+GLOBAL_ENTRY(ia64_pal_call_phys_static)
+ UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(6))
+ alloc loc1 = ar.pfs,6,90,0,0
+ movl loc2 = pal_entry_point
+1: {
+ mov r28 = in0 // copy procedure index
+ mov r8 = ip // save ip to compute branch
+ mov loc0 = rp // save rp
+ }
+ .body
+ ;;
+ ld8 loc2 = [loc2] // loc2 <- entry point
+ mov r29 = in1 // first argument
+ mov r30 = in2 // copy arg2
+ mov r31 = in3 // copy arg3
+ ;;
+ mov loc3 = psr // save psr
+ adds r8 = .ret4-1b,r8 // calculate return address for call
+ ;;
+ mov loc4=ar.rsc // save RSE configuration
+ dep.z loc2=loc2,0,61 // convert pal entry point to physical
+ dep.z r8=r8,0,61 // convert rp to physical
+ ;;
+ mov b7 = loc2 // install target to branch reg
+ mov ar.rsc=r0 // put RSE in enforced lazy, LE mode
+ movl r16=PAL_PSR_BITS_TO_CLEAR
+ movl r17=PAL_PSR_BITS_TO_SET
+ ;;
+ or loc3=loc3,r17 // add in psr the bits to set
+ ;;
+ andcm r16=loc3,r16 // removes bits to clear from psr
+ br.call.sptk.few rp=ia64_switch_mode
+.ret3:
+ mov rp = r8 // install return address (physical)
+ br.cond.sptk.few b7
+.ret4:
+ mov ar.rsc=r0 // put RSE in enforced lazy, LE mode
+ mov r16=loc3 // r16= original psr
+ br.call.sptk.few rp=ia64_switch_mode // return to virtual mode
+
+.ret5: mov psr.l = loc3 // restore init PSR
+
+ mov ar.pfs = loc1
+ mov rp = loc0
+ ;;
+ mov ar.rsc=loc4 // restore RSE configuration
+ srlz.d // seralize restoration of psr.l
+ br.ret.sptk.few b0
+END(ia64_pal_call_phys_static)
diff -urN linux-davidm/arch/ia64/kernel/palinfo.c linux-2.4.0-test1-lia/arch/ia64/kernel/palinfo.c
--- linux-davidm/arch/ia64/kernel/palinfo.c Wed Dec 31 16:00:00 1969
+++ linux-2.4.0-test1-lia/arch/ia64/kernel/palinfo.c Thu Jun 1 01:04:23 2000
@@ -0,0 +1,780 @@
+/*
+ * palinfo.c
+ *
+ * Prints processor specific information reported by PAL.
+ * This code is based on specification of PAL as of the
+ * Intel IA-64 Architecture Software Developer's Manual v1.0.
+ *
+ *
+ * Copyright (C) 2000 Hewlett-Packard Co
+ * Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
+ *
+ * 05/26/2000 S.Eranian initial release
+ *
+ * ISSUES:
+ * - because of some PAL bugs, some calls return invalid results or
+ * are empty for now.
+ * - remove hack to avoid problem with <= 256M RAM for itr.
+ */
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/mm.h>
+
+#include <asm/pal.h>
+#include <asm/sal.h>
+#include <asm/efi.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+
+/*
+ * Hope to get rid of these in a near future
+*/
+#define IA64_PAL_VERSION_BUG 1
+
+#define PALINFO_VERSION "0.1"
+
+typedef int (*palinfo_func_t)(char*);
+
+typedef struct {
+ const char *name; /* name of the proc entry */
+ palinfo_func_t proc_read; /* function to call for reading */
+ struct proc_dir_entry *entry; /* registered entry (removal) */
+} palinfo_entry_t;
+
+static struct proc_dir_entry *palinfo_dir;
+
+/*
+ * A bunch of string array to get pretty printing
+ */
+
+static char *cache_types[] = {
+ "", /* not used */
+ "Instruction",
+ "Data",
+ "Data/Instruction" /* unified */
+};
+
+static const char *cache_mattrib[]={
+ "WriteThrough",
+ "WriteBack",
+ "", /* reserved */
+ "" /* reserved */
+};
+
+static const char *cache_st_hints[]={
+ "Temporal, level 1",
+ "Reserved",
+ "Reserved",
+ "Non-temporal, all levels",
+ "Reserved",
+ "Reserved",
+ "Reserved",
+ "Reserved"
+};
+
+static const char *cache_ld_hints[]={
+ "Temporal, level 1",
+ "Non-temporal, level 1",
+ "Reserved",
+ "Non-temporal, all levels",
+ "Reserved",
+ "Reserved",
+ "Reserved",
+ "Reserved"
+};
+
+static const char *rse_hints[]={
+ "enforced lazy",
+ "eager stores",
+ "eager loads",
+ "eager loads and stores"
+};
+
+#define RSE_HINTS_COUNT (sizeof(rse_hints)/sizeof(const char *))
+
+/*
+ * The current resvision of the Volume 2 of
+ * IA-64 Architecture Software Developer's Manual is wrong.
+ * Table 4-10 has invalid information concerning the ma field:
+ * Correct table is:
+ * bit 0 - 001 - UC
+ * bit 4 - 100 - UC
+ * bit 5 - 101 - UCE
+ * bit 6 - 110 - WC
+ * bit 7 - 111 - NatPage
+ */
+static const char *mem_attrib[]={
+ "Write Back (WB)", /* 000 */
+ "Uncacheable (UC)", /* 001 */
+ "Reserved", /* 010 */
+ "Reserved", /* 011 */
+ "Uncacheable (UC)", /* 100 */
+ "Uncacheable Exported (UCE)", /* 101 */
+ "Write Coalescing (WC)", /* 110 */
+ "NaTPage" /* 111 */
+};
+
+
+
+/*
+ * Allocate a buffer suitable for calling PAL code in Virtual mode
+ *
+ * The documentation (PAL2.6) requires thius buffer to have a pinned
+ * translation to avoid any DTLB faults. For this reason we allocate
+ * a page (large enough to hold any possible reply) and use a DTC
+ * to hold the translation during the call. A call the free_palbuffer()
+ * is required to release ALL resources (page + translation).
+ *
+ * The size of the page allocated is based on the PAGE_SIZE defined
+ * at compile time for the kernel, i.e. >= 4Kb.
+ *
+ * Return: a pointer to the newly allocated page (virtual address)
+ */
+static void *
+get_palcall_buffer(void)
+{
+ void *tmp;
+
+ tmp = (void *)__get_free_page(GFP_KERNEL);
+ if (tmp = 0) {
+ printk(KERN_ERR "%s: can't get a buffer page\n", __FUNCTION__);
+ } else if ( ((u64)tmp - PAGE_OFFSET) > (1<<_PAGE_SIZE_256M) ) { /* XXX: temporary hack */
+ unsigned long flags;
+
+ /* PSR.ic must be zero to insert new DTR */
+ ia64_clear_ic(flags);
+
+ /*
+ * we only insert of DTR
+ *
+ * XXX: we need to figure out a way to "allocate" TR(s) to avoid
+ * conflicts. Maybe something in an include file like pgtable.h
+ * page.h or processor.h
+ *
+ * ITR0/DTR0: used for kernel code/data
+ * ITR1/DTR1: used by HP simulator
+ * ITR2/DTR2: used to map PAL code
+ */
+ ia64_itr(0x2, 3, (u64)tmp,
+ pte_val(mk_pte_phys(__pa(tmp), __pgprot(__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RW))), PAGE_SHIFT);
+
+ ia64_srlz_d ();
+
+ __restore_flags(flags);
+ }
+
+ return tmp;
+}
+
+/*
+ * Free a palcall buffer allocated with the previous call
+ *
+ * The translation is also purged.
+ */
+static void
+free_palcall_buffer(void *addr)
+{
+ __free_page(addr);
+ ia64_ptr(0x2, (u64)addr, PAGE_SHIFT);
+ ia64_srlz_d ();
+}
+
+/*
+ * Take a 64bit vector and produces a string such that
+ * if bit n is set then 2^n in clear text is generated. The adjustment
+ * to the right unit is also done.
+ *
+ * Input:
+ * - a pointer to a buffer to hold the string
+ * - a 64-bit vector
+ * Ouput:
+ * - a pointer to the end of the buffer
+ *
+ */
+static char *
+bitvector_process(char *p, u64 vector)
+{
+ int i,j;
+ const char *units[]={ "", "K", "M", "G", "T" };
+
+ for (i=0, j=0; i < 64; i++ , j=i/10) {
+ if (vector & 0x1) {
+ p += sprintf(p, "%d%s ", 1 << (i-j*10), units[j]);
+ }
+ vector >>= 1;
+ }
+ return p;
+}
+
+/*
+ * Take a 64bit vector and produces a string such that
+ * if bit n is set then register n is present. The function
+ * takes into account consecutive registers and prints out ranges.
+ *
+ * Input:
+ * - a pointer to a buffer to hold the string
+ * - a 64-bit vector
+ * Ouput:
+ * - a pointer to the end of the buffer
+ *
+ */
+static char *
+bitregister_process(char *p, u64 *reg_info, int max)
+{
+ int i, begin, skip = 0;
+ u64 value = reg_info[0];
+
+ value >>= i = begin = ffs(value) - 1;
+
+ for(; i < max; i++ ) {
+
+ if (i != 0 && (i%64) = 0) value = *++reg_info;
+
+ if ((value & 0x1) = 0 && skip = 0) {
+ if (begin <= i - 2)
+ p += sprintf(p, "%d-%d ", begin, i-1);
+ else
+ p += sprintf(p, "%d ", i-1);
+ skip = 1;
+ begin = -1;
+ } else if ((value & 0x1) && skip = 1) {
+ skip = 0;
+ begin = i;
+ }
+ value >>=1;
+ }
+ if (begin > -1) {
+ if (begin < 127)
+ p += sprintf(p, "%d-127", begin);
+ else
+ p += sprintf(p, "127");
+ }
+
+ return p;
+}
+
+static int
+power_info(char *page)
+{
+ s64 status;
+ char *p = page;
+ pal_power_mgmt_info_u_t *halt_info;
+ int i;
+
+ halt_info = get_palcall_buffer();
+ if (halt_info = 0) return 0;
+
+ status = ia64_pal_halt_info(halt_info);
+ if (status != 0) {
+ free_palcall_buffer(halt_info);
+ return 0;
+ }
+
+ for (i=0; i < 8 ; i++ ) {
+ if (halt_info[i].pal_power_mgmt_info_s.im = 1) {
+ p += sprintf(p, "Power level %d:\n" \
+ "\tentry_latency : %d cycles\n" \
+ "\texit_latency : %d cycles\n" \
+ "\tpower consumption : %d mW\n" \
+ "\tCache+TLB coherency : %s\n", i,
+ halt_info[i].pal_power_mgmt_info_s.entry_latency,
+ halt_info[i].pal_power_mgmt_info_s.exit_latency,
+ halt_info[i].pal_power_mgmt_info_s.power_consumption,
+ halt_info[i].pal_power_mgmt_info_s.co ? "Yes" : "No");
+ } else {
+ p += sprintf(p,"Power level %d: not implemented\n",i);
+ }
+ }
+
+ free_palcall_buffer(halt_info);
+
+ return p - page;
+}
+
+static int
+cache_info(char *page)
+{
+ char *p = page;
+ u64 levels, unique_caches;
+ pal_cache_config_info_t cci;
+ int i,j, k;
+ s64 status;
+
+ if ((status=ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
+ printk("ia64_pal_cache_summary=%ld\n", status);
+ return 0;
+ }
+
+ p += sprintf(p, "Cache levels : %ld\n" \
+ "Unique caches : %ld\n\n",
+ levels,
+ unique_caches);
+
+ for (i=0; i < levels; i++) {
+
+ for (j=2; j >0 ; j--) {
+
+ /* even without unification some level may not be present */
+ if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0) {
+ continue;
+ }
+ p += sprintf(p, "%s Cache level %d:\n" \
+ "\tSize : %ld bytes\n" \
+ "\tAttributes : ",
+ cache_types[j+cci.pcci_unified], i+1,
+ cci.pcci_cache_size);
+
+ if (cci.pcci_unified) p += sprintf(p, "Unified ");
+
+ p += sprintf(p, "%s\n", cache_mattrib[cci.pcci_cache_attr]);
+
+ p += sprintf(p, "\tAssociativity : %d\n" \
+ "\tLine size : %d bytes\n" \
+ "\tStride : %d bytes\n",
+ cci.pcci_assoc,
+ 1<<cci.pcci_line_size,
+ 1<<cci.pcci_stride);
+ if (j = 1)
+ p += sprintf(p, "\tStore latency : N/A\n");
+ else
+ p += sprintf(p, "\tStore latency : %d cycle(s)\n",
+ cci.pcci_st_latency);
+
+ p += sprintf(p, "\tLoad latency : %d cycle(s)\n" \
+ "\tStore hints : ",
+ cci.pcci_ld_latency);
+
+ for(k=0; k < 8; k++ ) {
+ if ( cci.pcci_st_hints & 0x1) p += sprintf(p, "[%s]", cache_st_hints[k]);
+ cci.pcci_st_hints >>=1;
+ }
+ p += sprintf(p, "\n\tLoad hints : ");
+
+ for(k=0; k < 8; k++ ) {
+ if ( cci.pcci_ld_hints & 0x1) p += sprintf(p, "[%s]", cache_ld_hints[k]);
+ cci.pcci_ld_hints >>=1;
+ }
+ p += sprintf(p, "\n\tAlias boundary : %d byte(s)\n" \
+ "\tTag LSB : %d\n" \
+ "\tTag MSB : %d\n",
+ 1<<cci.pcci_alias_boundary,
+ cci.pcci_tag_lsb,
+ cci.pcci_tag_msb);
+
+ /* when unified, data(j=2) is enough */
+ if (cci.pcci_unified) break;
+ }
+ }
+ return p - page;
+}
+
+
+static int
+vm_info(char *page)
+{
+ char *p = page;
+ u64 tr_pages =0, vw_pages=0, tc_pages;
+ u64 attrib;
+ pal_vm_info_1_u_t vm_info_1;
+ pal_vm_info_2_u_t vm_info_2;
+ pal_tc_info_u_t tc_info;
+ ia64_ptce_info_t ptce;
+ int i, j;
+ s64 status;
+
+ if ((status=ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
+ printk("ia64_pal_vm_summary=%ld\n", status);
+ return 0;
+ }
+
+
+ p += sprintf(p, "Physical Address Space : %d bits\n" \
+ "Virtual Address Space : %d bits\n" \
+ "Protection Key Registers(PKR) : %d\n" \
+ "Implemented bits in PKR.key : %d\n" \
+ "Hash Tag ID : 0x%x\n" \
+ "Size of RR.rid : %d\n",
+ vm_info_1.pal_vm_info_1_s.phys_add_size,
+ vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
+ vm_info_1.pal_vm_info_1_s.max_pkr+1,
+ vm_info_1.pal_vm_info_1_s.key_size,
+ vm_info_1.pal_vm_info_1_s.hash_tag_id,
+ vm_info_2.pal_vm_info_2_s.rid_size);
+
+ if (ia64_pal_mem_attrib(&attrib) != 0) return 0;
+
+ p += sprintf(p, "Supported memory attributes : %s\n", mem_attrib[attrib&0x7]);
+
+ if ((status=ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) {
+ printk("ia64_pal_vm_page_size=%ld\n", status);
+ return 0;
+ }
+
+ p += sprintf(p, "\nTLB walker : %s implemented\n" \
+ "Number of DTR : %d\n" \
+ "Number of ITR : %d\n" \
+ "TLB insertable page sizes : ",
+ vm_info_1.pal_vm_info_1_s.vw ? "\b":"not",
+ vm_info_1.pal_vm_info_1_s.max_dtr_entry+1,
+ vm_info_1.pal_vm_info_1_s.max_itr_entry+1);
+
+
+ p = bitvector_process(p, tr_pages);
+
+ p += sprintf(p, "\nTLB purgeable page sizes : ");
+
+ p = bitvector_process(p, vw_pages);
+
+ if ((status=ia64_get_ptce(&ptce)) != 0) {
+ printk("ia64_get_ptce=%ld\n",status);
+ return 0;
+ }
+
+ p += sprintf(p, "\nPurge base address : 0x%016lx\n" \
+ "Purge outer loop count : %d\n" \
+ "Purge inner loop count : %d\n" \
+ "Purge outer loop stride : %d\n" \
+ "Purge inner loop stride : %d\n",
+ ptce.base,
+ ptce.count[0],
+ ptce.count[1],
+ ptce.stride[0],
+ ptce.stride[1]);
+
+ p += sprintf(p, "TC Levels : %d\n" \
+ "Unique TC(s) : %d\n",
+ vm_info_1.pal_vm_info_1_s.num_tc_levels,
+ vm_info_1.pal_vm_info_1_s.max_unique_tcs);
+
+ for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) {
+ for (j=2; j>0 ; j--) {
+ tc_pages = 0; /* just in case */
+
+
+ /* even without unification, some levels may not be present */
+ if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) {
+ continue;
+ }
+
+ p += sprintf(p, "\n%s Translation Cache Level %d:\n" \
+ "\tHash sets : %d\n" \
+ "\tAssociativity : %d\n" \
+ "\tNumber of entries : %d\n" \
+ "\tFlags : ",
+ cache_types[j+tc_info.tc_unified], i+1,
+ tc_info.tc_num_sets,
+ tc_info.tc_associativity,
+ tc_info.tc_num_entries);
+
+ if (tc_info.tc_pf) p += sprintf(p, "PreferredPageSizeOptimized ");
+ if (tc_info.tc_unified) p += sprintf(p, "Unified ");
+ if (tc_info.tc_reduce_tr) p += sprintf(p, "TCReduction");
+
+ p += sprintf(p, "\n\tSupported page sizes: ");
+
+ p = bitvector_process(p, tc_pages);
+
+ /* when unified date (j=2) is enough */
+ if (tc_info.tc_unified) break;
+ }
+ }
+ p += sprintf(p, "\n");
+
+ return p - page;
+}
+
+
+static int
+register_info(char *page)
+{
+ char *p = page;
+ u64 reg_info[2];
+ u64 info;
+ u64 phys_stacked;
+ pal_hints_u_t hints;
+ u64 iregs, dregs;
+ char *info_type[]={
+ "Implemented AR(s)",
+ "AR(s) with read side-effects",
+ "Implemented CR(s)",
+ "CR(s) with read side-effects",
+ };
+
+ for(info=0; info < 4; info++) {
+
+ if (ia64_pal_register_info(info, ®_info[0], ®_info[1]) != 0) return 0;
+
+ p += sprintf(p, "%-32s : ", info_type[info]);
+
+ p = bitregister_process(p, reg_info, 128);
+
+ p += sprintf(p, "\n");
+ }
+
+ if (ia64_pal_rse_info(&phys_stacked, &hints) != 0) return 0;
+
+ p += sprintf(p, "RSE stacked physical registers : %ld\n" \
+ "RSE load/store hints : %ld (%s)\n",
+ phys_stacked,
+ hints.ph_data,
+ hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)");
+
+ if (ia64_pal_debug_info(&iregs, &dregs)) return 0;
+
+ p += sprintf(p, "Instruction debug register pairs : %ld\n" \
+ "Data debug register pairs : %ld\n",
+ iregs, dregs);
+
+ return p - page;
+}
+
+static const char *proc_features[]={
+ NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
+ NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
+ NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
+ NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,
+ NULL,NULL,NULL,NULL,NULL,
+ "XIP,XPSR,XFS implemented",
+ "XR1-XR3 implemented",
+ "Disable dynamic predicate prediction",
+ "Disable processor physical number",
+ "Disable dynamic data cache prefetch",
+ "Disable dynamic inst cache prefetch",
+ "Disable dynamic branch prediction",
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ "Disable BINIT on processor time-out",
+ "Disable dynamic power management (DPM)",
+ "Disable coherency",
+ "Disable cache",
+ "Enable CMCI promotion",
+ "Enable MCA to BINIT promotion",
+ "Enable MCA promotion",
+ "Enable BEER promotion"
+};
+
+
+static int
+processor_info(char *page)
+{
+ char *p = page;
+ const char **v = proc_features;
+ u64 avail=1, status=1, control=1;
+ int i;
+ s64 ret;
+
+ /* must be in physical mode */
+ if ((ret=ia64_pal_proc_get_features(&avail, &status, &control)) != 0) return 0;
+
+ for(i=0; i < 64; i++, v++,avail >>=1, status >>=1, control >>=1) {
+ if ( ! *v ) continue;
+ p += sprintf(p, "%-40s : %s%s %s\n", *v,
+ avail & 0x1 ? "" : "NotImpl",
+ avail & 0x1 ? (status & 0x1 ? "On" : "Off"): "",
+ avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): "");
+ }
+ return p - page;
+}
+
+/*
+ * physical mode call for PAL_VERSION is working fine.
+ * This function is meant to go away once PAL get fixed.
+ */
+static inline s64
+ia64_pal_version_phys(pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_PHYS(iprv, PAL_VERSION, 0, 0, 0);
+ if (pal_min_version)
+ pal_min_version->pal_version_val = iprv.v0;
+ if (pal_cur_version)
+ pal_cur_version->pal_version_val = iprv.v1;
+ return iprv.status;
+}
+
+static int
+version_info(char *page)
+{
+ s64 status;
+ pal_version_u_t min_ver, cur_ver;
+ char *p = page;
+
+#ifdef IA64_PAL_VERSION_BUG
+ /* The virtual mode call is buggy. But the physical mode call seems
+ * to be ok. Until they fix virtual mode, we do physical.
+ */
+ status = ia64_pal_version_phys(&min_ver, &cur_ver);
+#else
+ /* The system crashes if you enable this code with the wrong PAL
+ * code
+ */
+ status = ia64_pal_version(&min_ver, &cur_ver);
+#endif
+ if (status != 0) return 0;
+
+ p += sprintf(p, "PAL_vendor : 0x%x (min=0x%x)\n" \
+ "PAL_A revision : 0x%x (min=0x%x)\n" \
+ "PAL_A model : 0x%x (min=0x%x)\n" \
+ "PAL_B mode : 0x%x (min=0x%x)\n" \
+ "PAL_B revision : 0x%x (min=0x%x)\n",
+ cur_ver.pal_version_s.pv_pal_vendor,
+ min_ver.pal_version_s.pv_pal_vendor,
+ cur_ver.pal_version_s.pv_pal_a_rev,
+ cur_ver.pal_version_s.pv_pal_a_rev,
+ cur_ver.pal_version_s.pv_pal_a_model,
+ min_ver.pal_version_s.pv_pal_a_model,
+ cur_ver.pal_version_s.pv_pal_b_rev,
+ min_ver.pal_version_s.pv_pal_b_rev,
+ cur_ver.pal_version_s.pv_pal_b_model,
+ min_ver.pal_version_s.pv_pal_b_model);
+
+ return p - page;
+}
+
+static int
+perfmon_info(char *page)
+{
+ char *p = page;
+ u64 *pm_buffer;
+ pal_perf_mon_info_u_t pm_info;
+
+ pm_buffer = (u64 *)get_palcall_buffer();
+ if (pm_buffer = 0) return 0;
+
+ if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0) {
+ free_palcall_buffer(pm_buffer);
+ return 0;
+ }
+
+#ifdef IA64_PAL_PERF_MON_INFO_BUG
+ pm_buffer[5]=0x3;
+ pm_info.pal_perf_mon_info_s.cycles = 0x12;
+ pm_info.pal_perf_mon_info_s.retired = 0x08;
+#endif
+
+ p += sprintf(p, "PMC/PMD pairs : %d\n" \
+ "Counter width : %d bits\n" \
+ "Cycle event number : %d\n" \
+ "Retired event number : %d\n" \
+ "Implemented PMC : ",
+ pm_info.pal_perf_mon_info_s.generic,
+ pm_info.pal_perf_mon_info_s.width,
+ pm_info.pal_perf_mon_info_s.cycles,
+ pm_info.pal_perf_mon_info_s.retired);
+
+ p = bitregister_process(p, pm_buffer, 256);
+
+ p += sprintf(p, "\nImplemented PMD : ");
+
+ p = bitregister_process(p, pm_buffer+4, 256);
+
+ p += sprintf(p, "\nCycles count capable : ");
+
+ p = bitregister_process(p, pm_buffer+8, 256);
+
+ p += sprintf(p, "\nRetired bundles count capable : ");
+
+ p = bitregister_process(p, pm_buffer+12, 256);
+
+ p += sprintf(p, "\n");
+
+ free_palcall_buffer(pm_buffer);
+
+ return p - page;
+}
+
+static int
+frequency_info(char *page)
+{
+ char *p = page;
+ struct pal_freq_ratio proc, itc, bus;
+ u64 base;
+
+ if (ia64_pal_freq_base(&base) = -1)
+ p += sprintf(p, "Output clock : not implemented\n");
+ else
+ p += sprintf(p, "Output clock : %ld ticks/s\n", base);
+
+ if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0;
+
+ p += sprintf(p, "Processor/Clock ratio : %ld/%ld\n" \
+ "Bus/Clock ratio : %ld/%ld\n" \
+ "ITC/Clock ratio : %ld/%ld\n",
+ proc.num, proc.den,
+ bus.num, bus.den,
+ itc.num, itc.den);
+
+ return p - page;
+}
+
+
+/*
+ * Entry point routine: all calls go trhough this function
+ */
+static int
+palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ palinfo_func_t info = (palinfo_func_t)data;
+ int len = info(page);
+
+ if (len <= off+count) *eof = 1;
+
+ *start = page + off;
+ len -= off;
+
+ if (len>count) len = count;
+ if (len<0) len = 0;
+
+ return len;
+}
+
+/*
+ * List names,function pairs for every entry in /proc/palinfo
+ * Must be terminated with the NULL,NULL entry.
+ */
+static palinfo_entry_t palinfo_entries[]={
+ { "version_info", version_info, },
+ { "vm_info", vm_info, },
+ { "cache_info", cache_info, },
+ { "power_info", power_info, },
+ { "register_info", register_info, },
+ { "processor_info", processor_info, },
+ { "perfmon_info", perfmon_info, },
+ { "frequency_info", frequency_info, },
+ { NULL, NULL,}
+};
+
+
+static int __init
+palinfo_init(void)
+{
+ palinfo_entry_t *p;
+
+ printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
+
+ palinfo_dir = create_proc_entry("palinfo", S_IFDIR | S_IRUGO | S_IXUGO, NULL);
+
+ for (p = palinfo_entries; p->name ; p++){
+ p->entry = create_proc_read_entry (p->name, 0, palinfo_dir,
+ palinfo_read_entry, p->proc_read);
+ }
+
+ return 0;
+}
+
+static int __exit
+palinfo_exit(void)
+{
+ palinfo_entry_t *p;
+
+ for (p = palinfo_entries; p->name ; p++){
+ remove_proc_entry (p->name, palinfo_dir);
+ }
+ remove_proc_entry ("palinfo", 0);
+
+ return 0;
+}
+
+module_init(palinfo_init);
+module_exit(palinfo_exit);
diff -urN linux-davidm/arch/ia64/kernel/setup.c linux-2.4.0-test1-lia/arch/ia64/kernel/setup.c
--- linux-davidm/arch/ia64/kernel/setup.c Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/arch/ia64/kernel/setup.c Thu Jun 1 01:04:49 2000
@@ -108,6 +108,8 @@
{
unsigned long max_pfn, bootmap_start, bootmap_size;
+ unw_init();
+
/*
* The secondary bootstrap loader passes us the boot
* parameters at the beginning of the ZERO_PAGE, so let's
@@ -155,10 +157,8 @@
#ifdef CONFIG_SMP
bootstrap_processor = hard_smp_processor_id();
current->processor = bootstrap_processor;
-#else
- cpu_init();
- identify_cpu(&cpu_data[0]);
#endif
+ cpu_init(); /* initialize the bootstrap CPU */
if (efi.acpi) {
/* Parse the ACPI tables */
@@ -270,11 +270,14 @@
u64 features;
} field;
} cpuid;
+ pal_vm_info_1_u_t vm1;
+ pal_vm_info_2_u_t vm2;
+ pal_status_t status;
+ unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */
int i;
- for (i = 0; i < 5; ++i) {
+ for (i = 0; i < 5; ++i)
cpuid.bits[i] = ia64_get_cpuid(i);
- }
memset(c, 0, sizeof(struct cpuinfo_ia64));
@@ -287,6 +290,24 @@
c->archrev = cpuid.field.archrev;
c->features = cpuid.field.features;
+ status = ia64_pal_vm_summary(&vm1, &vm2);
+ if (status = PAL_STATUS_SUCCESS) {
+#if 1
+ /*
+ * XXX the current PAL code returns IMPL_VA_MSB=60, which is dead-wrong.
+ * --davidm 00/05/26
+ s*/
+ impl_va_msb = 50;
+#else
+ impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
+#endif
+ phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
+ }
+ printk("processor implements %lu virtual and %lu physical address bits\n",
+ impl_va_msb + 1, phys_addr_size);
+ c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
+ c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
+
#ifdef CONFIG_IA64_SOFTSDV_HACKS
/* BUG: SoftSDV doesn't support the cpuid registers. */
if (c->vendor[0] = '\0')
@@ -301,6 +322,11 @@
void
cpu_init (void)
{
+ extern void __init ia64_rid_init (void);
+ extern void __init ia64_tlb_init (void);
+
+ identify_cpu(&my_cpu_data);
+
/* Clear the stack memory reserved for pt_regs: */
memset(ia64_task_regs(current), 0, sizeof(struct pt_regs));
@@ -314,6 +340,14 @@
*/
ia64_set_dcr(IA64_DCR_DR | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_PP);
ia64_set_fpu_owner(0); /* initialize ar.k5 */
+
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
+
+ ia64_rid_init();
+ ia64_tlb_init();
+
+#ifdef CONFIG_SMP
+ normal_xtp();
+#endif
}
diff -urN linux-davidm/arch/ia64/kernel/signal.c linux-2.4.0-test1-lia/arch/ia64/kernel/signal.c
--- linux-davidm/arch/ia64/kernel/signal.c Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/arch/ia64/kernel/signal.c Thu Jun 1 01:05:04 2000
@@ -71,8 +71,15 @@
* pre-set the correct error code here to ensure that the right values
* get saved in sigcontext by ia64_do_signal.
*/
- pt->r8 = EINTR;
- pt->r10 = -1;
+#ifdef CONFIG_IA32_SUPPORT
+ if (IS_IA32_PROCESS(pt)) {
+ pt->r8 = -EINTR;
+ } else
+#endif
+ {
+ pt->r8 = EINTR;
+ pt->r10 = -1;
+ }
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
@@ -138,7 +145,8 @@
return err;
}
-int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from)
+int
+copy_siginfo_to_user (siginfo_t *to, siginfo_t *from)
{
if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
return -EFAULT;
@@ -147,29 +155,32 @@
else {
int err;
- /* If you change siginfo_t structure, please be sure
- this code is fixed accordingly.
- It should never copy any pad contained in the structure
- to avoid security leaks, but must copy the generic
- 3 ints plus the relevant union member. */
+ /*
+ * If you change siginfo_t structure, please be sure
+ * this code is fixed accordingly. It should never
+ * copy any pad contained in the structure to avoid
+ * security leaks, but must copy the generic 3 ints
+ * plus the relevant union member.
+ */
err = __put_user(from->si_signo, &to->si_signo);
err |= __put_user(from->si_errno, &to->si_errno);
err |= __put_user((short)from->si_code, &to->si_code);
switch (from->si_code >> 16) {
- case __SI_FAULT >> 16:
- case __SI_POLL >> 16:
+ case __SI_FAULT >> 16:
+ err |= __put_user(from->si_isr, &to->si_isr);
+ case __SI_POLL >> 16:
err |= __put_user(from->si_addr, &to->si_addr);
err |= __put_user(from->si_imm, &to->si_imm);
break;
- case __SI_CHLD >> 16:
+ case __SI_CHLD >> 16:
err |= __put_user(from->si_utime, &to->si_utime);
err |= __put_user(from->si_stime, &to->si_stime);
err |= __put_user(from->si_status, &to->si_status);
- default:
+ default:
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_pid, &to->si_pid);
break;
- /* case __SI_RT: This is not generated by the kernel as of now. */
+ /* case __SI_RT: This is not generated by the kernel as of now. */
}
return err;
}
@@ -563,6 +574,11 @@
case ERESTARTSYS:
if ((ka->sa.sa_flags & SA_RESTART) = 0) {
case ERESTARTNOHAND:
+#ifdef CONFIG_IA32_SUPPORT
+ if (IS_IA32_PROCESS(pt))
+ pt->r8 = -EINTR;
+ else
+#endif
pt->r8 = EINTR;
/* note: pt->r10 is already -1 */
break;
diff -urN linux-davidm/arch/ia64/kernel/smp.c linux-2.4.0-test1-lia/arch/ia64/kernel/smp.c
--- linux-davidm/arch/ia64/kernel/smp.c Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/arch/ia64/kernel/smp.c Thu Jun 1 01:06:05 2000
@@ -81,6 +81,7 @@
#ifndef CONFIG_ITANIUM_PTCG
# define IPI_FLUSH_TLB 3
#endif /*!CONFIG_ITANIUM_PTCG */
+#define IPI_KDB_INTERRUPT 4
/*
* Setup routine for controlling SMP activation
@@ -245,6 +246,9 @@
start += (1UL << nbits);
} while (start < end);
+ ia64_insn_group_barrier();
+ ia64_srlz_i(); /* srlz.i implies srlz.d */
+
if (saved_rid != flush_rid) {
ia64_set_rr(flush_start, saved_rid);
ia64_srlz_d();
@@ -440,19 +444,6 @@
}
}
-
-/*
- * Called by both boot and secondaries to move global data into
- * per-processor storage.
- */
-static inline void __init
-smp_store_cpu_info(int cpuid)
-{
- struct cpuinfo_ia64 *c = &cpu_data[cpuid];
-
- identify_cpu(c);
-}
-
static inline void __init
smp_calibrate_delay(int cpuid)
{
@@ -521,16 +512,8 @@
extern void ia64_init_itm(void);
extern void ia64_cpu_local_tick(void);
- ia64_set_dcr(IA64_DCR_DR | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_PP);
- ia64_set_fpu_owner(0);
- ia64_rid_init(); /* initialize region ids */
-
cpu_init();
- __flush_tlb_all();
-
- normal_xtp();
- smp_store_cpu_info(smp_processor_id());
smp_setup_percpu_timer(smp_processor_id());
/* setup the CPU local timer tick */
@@ -658,16 +641,7 @@
/* Setup BSP mappings */
__cpu_number_map[bootstrap_processor] = 0;
__cpu_logical_map[0] = bootstrap_processor;
- current->processor = bootstrap_processor;
- /* Mark BSP booted and get active_mm context */
- cpu_init();
-
- /* reset XTP for interrupt routing */
- normal_xtp();
-
- /* And generate an entry in cpu_data */
- smp_store_cpu_info(bootstrap_processor);
smp_calibrate_delay(smp_processor_id());
#if 0
smp_tune_scheduling();
@@ -785,4 +759,3 @@
}
}
-
diff -urN linux-davidm/arch/ia64/kernel/traps.c linux-2.4.0-test1-lia/arch/ia64/kernel/traps.c
--- linux-davidm/arch/ia64/kernel/traps.c Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/arch/ia64/kernel/traps.c Thu Jun 1 01:07:02 2000
@@ -36,6 +36,10 @@
#include <linux/init.h>
#include <linux/sched.h>
+#ifdef CONFIG_KDB
+#include <linux/kdb.h>
+#endif
+
#include <asm/ia32.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
@@ -89,6 +93,13 @@
printk("%s[%d]: %s %ld\n", current->comm, current->pid, str, err);
+#ifdef CONFIG_KDB
+ while (1) {
+ kdb(KDB_REASON_PANIC, 0, regs);
+ printk("Cant go anywhere from Panic!\n");
+ }
+#endif
+
show_regs(regs);
if (current->thread.flags & IA64_KERNEL_DEATH) {
@@ -361,6 +372,42 @@
}
}
return 0;
+}
+
+struct illegal_op_return {
+ unsigned long fkt, arg1, arg2, arg3;
+};
+
+struct illegal_op_return
+ia64_illegal_op_fault (unsigned long ec, unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4, unsigned long arg5,
+ unsigned long arg6, unsigned long arg7, unsigned long stack)
+{
+ struct pt_regs *regs = (struct pt_regs *) &stack;
+ struct illegal_op_return rv;
+ struct siginfo si;
+ char buf[128];
+
+#ifdef CONFIG_IA64_BRL_EMU
+ {
+ extern struct illegal_op_return ia64_emulate_brl (struct pt_regs *, unsigned long);
+
+ rv = ia64_emulate_brl(regs, ec);
+ if (rv.fkt != (unsigned long) -1)
+ return rv;
+ }
+#endif
+
+ sprintf(buf, "IA-64 Illegal operation fault");
+ die_if_kernel(buf, regs, 0);
+
+ memset(&si, 0, sizeof(si));
+ si.si_signo = SIGILL;
+ si.si_code = ILL_ILLOPC;
+ si.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
+ force_sig_info(SIGILL, &si, current);
+ rv.fkt = 0;
+ return rv;
}
void
diff -urN linux-davidm/arch/ia64/kernel/unaligned.c linux-2.4.0-test1-lia/arch/ia64/kernel/unaligned.c
--- linux-davidm/arch/ia64/kernel/unaligned.c Fri Apr 21 15:21:24 2000
+++ linux-2.4.0-test1-lia/arch/ia64/kernel/unaligned.c Thu Jun 1 01:07:40 2000
@@ -1,8 +1,8 @@
/*
* Architecture-specific unaligned trap handling.
*
- * Copyright (C) 1999 Hewlett-Packard Co
- * Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 1999-2000 Hewlett-Packard Co
+ * Copyright (C) 1999-2000 Stephane Eranian <eranian@hpl.hp.com>
*/
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -1410,6 +1410,25 @@
die_if_kernel("Unaligned reference while in kernel\n", regs, 30);
/* NOT_REACHED */
}
+ /*
+ * For now, we don't support user processes running big-endian
+ * which do unaligned accesses
+ */
+ if (ia64_psr(regs)->be) {
+ struct siginfo si;
+
+ printk(KERN_ERR "%s(%d): big-endian unaligned access %016lx (ip=%016lx) not "
+ "yet supported\n",
+ current->comm, current->pid, ifa, regs->cr_iip + ipsr->ri);
+
+ si.si_signo = SIGBUS;
+ si.si_errno = 0;
+ si.si_code = BUS_ADRALN;
+ si.si_addr = (void *) ifa;
+ send_sig_info(SIGBUS, &si, current);
+ return;
+ }
+
if (current->thread.flags & IA64_THREAD_UAC_SIGBUS) {
struct siginfo si;
@@ -1417,7 +1436,7 @@
si.si_errno = 0;
si.si_code = BUS_ADRALN;
si.si_addr = (void *) ifa;
- send_sig_info (SIGBUS, &si, current);
+ send_sig_info(SIGBUS, &si, current);
return;
}
diff -urN linux-davidm/arch/ia64/kernel/unwind.c linux-2.4.0-test1-lia/arch/ia64/kernel/unwind.c
--- linux-davidm/arch/ia64/kernel/unwind.c Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/arch/ia64/kernel/unwind.c Thu Jun 1 01:07:55 2000
@@ -2,6 +2,16 @@
* Copyright (C) 1999-2000 Hewlett-Packard Co
* Copyright (C) 1999-2000 David Mosberger-Tang <davidm@hpl.hp.com>
*/
+/*
+ * SMP conventions:
+ * o updates to the global unwind data (in structure "unw") are serialized
+ * by the unw.lock spinlock
+ * o each unwind script has its own read-write lock; a thread must acquire
+ * a read lock before executing a script and must acquire a write lock
+ * before modifying a script
+ * o if both the unw.lock spinlock and a script's read-write lock must be
+ * acquired, then the read-write lock must be acquired first.
+ */
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -11,6 +21,7 @@
#ifdef CONFIG_IA64_NEW_UNWIND
+#include <asm/delay.h>
#include <asm/ptrace.h>
#include <asm/ptrace_offsets.h>
#include <asm/rse.h>
@@ -32,202 +43,517 @@
*/
#define UNWIND_TABLE_SORT_BUG
-#define UNW_DEBUG 1
+#define UNW_LOG_CACHE_SIZE 7 /* each unw_script is ~256 bytes in size */
+#define UNW_CACHE_SIZE (1 << UNW_LOG_CACHE_SIZE)
+
+#define UNW_LOG_HASH_SIZE (UNW_LOG_CACHE_SIZE + 1)
+#define UNW_HASH_SIZE (1 << UNW_LOG_HASH_SIZE)
+
+#define UNW_DEBUG 0
+#define UNW_STATS 0 /* WARNING: this disabled interrupts for long time-spans!! */
#if UNW_DEBUG
# define dprintk(format...) printk(format)
+# define inline
#else
# define dprintk(format...)
#endif
+#if UNW_STATS
+# define STAT(x...) x
+#else
+# define STAT(x...)
+#endif
+
#define alloc_reg_state() kmalloc(sizeof(struct unw_state_record), GFP_ATOMIC)
#define free_reg_state(usr) kfree(usr)
typedef unsigned long unw_word;
-
-static const enum unw_register_index save_order[] = {
- UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
- UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
-};
+typedef unsigned char unw_hash_index_t;
#define struct_offset(str,fld) ((char *)&((str *)NULL)->fld - (char *) 0)
-static unsigned short preg_index[UNW_NUM_REGS] = {
- struct_offset(struct unw_frame_info, pri_unat)/8, /* PRI_UNAT_GR */
- struct_offset(struct unw_frame_info, pri_unat)/8, /* PRI_UNAT_MEM */
- struct_offset(struct unw_frame_info, pbsp)/8,
- struct_offset(struct unw_frame_info, bspstore)/8,
- struct_offset(struct unw_frame_info, pfs)/8,
- struct_offset(struct unw_frame_info, rnat)/8,
- struct_offset(struct unw_frame_info, psp)/8,
- struct_offset(struct unw_frame_info, rp)/8,
- struct_offset(struct unw_frame_info, r4)/8,
- struct_offset(struct unw_frame_info, r5)/8,
- struct_offset(struct unw_frame_info, r6)/8,
- struct_offset(struct unw_frame_info, r7)/8,
- struct_offset(struct unw_frame_info, unat)/8,
- struct_offset(struct unw_frame_info, pr)/8,
- struct_offset(struct unw_frame_info, lc)/8,
- struct_offset(struct unw_frame_info, fpsr)/8,
- struct_offset(struct unw_frame_info, b1)/8,
- struct_offset(struct unw_frame_info, b2)/8,
- struct_offset(struct unw_frame_info, b3)/8,
- struct_offset(struct unw_frame_info, b4)/8,
- struct_offset(struct unw_frame_info, b5)/8,
- struct_offset(struct unw_frame_info, f2)/8,
- struct_offset(struct unw_frame_info, f3)/8,
- struct_offset(struct unw_frame_info, f4)/8,
- struct_offset(struct unw_frame_info, f5)/8,
- struct_offset(struct unw_frame_info, fr[16])/8,
- struct_offset(struct unw_frame_info, fr[17])/8,
- struct_offset(struct unw_frame_info, fr[18])/8,
- struct_offset(struct unw_frame_info, fr[19])/8,
- struct_offset(struct unw_frame_info, fr[20])/8,
- struct_offset(struct unw_frame_info, fr[21])/8,
- struct_offset(struct unw_frame_info, fr[22])/8,
- struct_offset(struct unw_frame_info, fr[23])/8,
- struct_offset(struct unw_frame_info, fr[24])/8,
- struct_offset(struct unw_frame_info, fr[25])/8,
- struct_offset(struct unw_frame_info, fr[26])/8,
- struct_offset(struct unw_frame_info, fr[27])/8,
- struct_offset(struct unw_frame_info, fr[28])/8,
- struct_offset(struct unw_frame_info, fr[29])/8,
- struct_offset(struct unw_frame_info, fr[30])/8,
- struct_offset(struct unw_frame_info, fr[31])/8,
-};
+static struct {
+ struct unw_table *tables;
+ const unsigned char save_order[8];
+ /* Maps a preserved register index (preg_index) to corresponding switch_stack offset: */
+ unsigned short sw_off[sizeof(struct unw_frame_info) / 8];
+
+ unsigned short lru_head;
+ unsigned short lru_tail;
+ unsigned short preg_index[UNW_NUM_REGS];
+ struct unw_table kernel_table;
+
+ spinlock_t lock;
+ unw_hash_index_t hash[UNW_HASH_SIZE];
+ struct unw_script cache[UNW_CACHE_SIZE];
+
+# if UNW_DEBUG
+ const char *preg_name[UNW_NUM_REGS];
+# endif
+# if UNW_STATS
+ struct {
+ struct {
+ int lookups;
+ int hinted_hits;
+ int normal_hits;
+ int collision_chain_traversals;
+ } cache;
+ struct {
+ unsigned long build_time;
+ unsigned long run_time;
+ unsigned long parse_time;
+ int builds;
+ int news;
+ int collisions;
+ int runs;
+ } script;
+ struct {
+ unsigned long init_time;
+ unsigned long unwind_time;
+ int inits;
+ int unwinds;
+ } api;
+ } stat;
+# endif
+} unw = {
+ tables: &unw.kernel_table,
+ lock: SPIN_LOCK_UNLOCKED,
+ save_order: {
+ UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
+ UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
+ },
+ preg_index: {
+ struct_offset(struct unw_frame_info, pri_unat)/8, /* PRI_UNAT_GR */
+ struct_offset(struct unw_frame_info, pri_unat)/8, /* PRI_UNAT_MEM */
+ struct_offset(struct unw_frame_info, pbsp)/8,
+ struct_offset(struct unw_frame_info, bspstore)/8,
+ struct_offset(struct unw_frame_info, pfs)/8,
+ struct_offset(struct unw_frame_info, rnat)/8,
+ struct_offset(struct unw_frame_info, psp)/8,
+ struct_offset(struct unw_frame_info, rp)/8,
+ struct_offset(struct unw_frame_info, r4)/8,
+ struct_offset(struct unw_frame_info, r5)/8,
+ struct_offset(struct unw_frame_info, r6)/8,
+ struct_offset(struct unw_frame_info, r7)/8,
+ struct_offset(struct unw_frame_info, unat)/8,
+ struct_offset(struct unw_frame_info, pr)/8,
+ struct_offset(struct unw_frame_info, lc)/8,
+ struct_offset(struct unw_frame_info, fpsr)/8,
+ struct_offset(struct unw_frame_info, b1)/8,
+ struct_offset(struct unw_frame_info, b2)/8,
+ struct_offset(struct unw_frame_info, b3)/8,
+ struct_offset(struct unw_frame_info, b4)/8,
+ struct_offset(struct unw_frame_info, b5)/8,
+ struct_offset(struct unw_frame_info, f2)/8,
+ struct_offset(struct unw_frame_info, f3)/8,
+ struct_offset(struct unw_frame_info, f4)/8,
+ struct_offset(struct unw_frame_info, f5)/8,
+ struct_offset(struct unw_frame_info, fr[16 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[17 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[18 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[19 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[20 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[21 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[22 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[23 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[24 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[25 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[26 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[27 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[28 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[29 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[30 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[31 - 16])/8,
+ },
+ hash : { [0 ... UNW_HASH_SIZE - 1] = -1 },
#if UNW_DEBUG
-
-static const char *preg_name[UNW_NUM_REGS] = {
- "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
- "r4", "r5", "r6", "r7",
- "ar.unat", "pr", "ar.lc", "ar.fpsr",
- "b1", "b2", "b3", "b4", "b5",
- "f2", "f3", "f4", "f5",
- "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
- "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
+ preg_name: {
+ "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
+ "r4", "r5", "r6", "r7",
+ "ar.unat", "pr", "ar.lc", "ar.fpsr",
+ "b1", "b2", "b3", "b4", "b5",
+ "f2", "f3", "f4", "f5",
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
+ }
+#endif
};
-#endif /* UNW_DEBUG */
-
-/* Maps a preserved register index (preg_index) into the corresponding switch_stack offset: */
-static unsigned short sw_offset[sizeof (struct unw_frame_info) / 8];
+\f
+/* Unwind accessors. */
-static struct unw_table *unw_tables;
-
-static void
-push (struct unw_state_record *sr)
+int
+unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
{
- struct unw_reg_state *rs;
+ unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
+ struct unw_ireg *ireg;
+ struct pt_regs *pt;
- rs = alloc_reg_state();
- memcpy(rs, &sr->curr, sizeof(*rs));
- rs->next = sr->stack;
- sr->stack = rs;
-}
+ if ((unsigned) regnum - 1 >= 127)
+ return -1;
-static void
-pop (struct unw_state_record *sr)
-{
- struct unw_reg_state *rs;
+ if (regnum < 32) {
+ if (regnum >= 4 && regnum <= 7) {
+ /* access a preserved register */
+ ireg = &info->r4 + (regnum - 4);
+ addr = ireg->loc;
+ if (addr) {
+ nat_addr = addr + ireg->nat.off;
+ switch (ireg->nat.type) {
+ case UNW_NAT_VAL:
+ /* simulate getf.sig/setf.sig */
+ if (write) {
+ if (*nat) {
+ /* write NaTVal and be done with it */
+ addr[0] = 0;
+ addr[1] = 0x1fffe;
+ return 0;
+ }
+ addr[1] = 0x1003e;
+ } else {
+ if (addr[0] = 0 && addr[1] = 0x1ffe) {
+ /* return NaT and be done with it */
+ *val = 0;
+ *nat = 1;
+ return 0;
+ }
+ }
+ /* fall through */
+ case UNW_NAT_NONE:
+ nat_addr = &dummy_nat;
+ break;
- if (!sr->stack) {
- printk ("unwind: stack underflow!\n");
- return;
+ case UNW_NAT_SCRATCH:
+ if (info->unat)
+ nat_addr = info->unat;
+ else
+ nat_addr = &info->sw->caller_unat;
+ case UNW_NAT_PRI_UNAT:
+ nat_mask = (1UL << ((long) addr & 0x1f8)/8);
+ break;
+
+ case UNW_NAT_STACKED:
+ nat_addr = ia64_rse_rnat_addr(addr);
+ if ((unsigned long) addr < info->regstk.limit
+ || (unsigned long) addr >= info->regstk.top)
+ return -1;
+ if ((unsigned long) nat_addr >= info->regstk.top)
+ nat_addr = &info->sw->ar_rnat;
+ nat_mask = (1UL << ia64_rse_slot_num(addr));
+ break;
+ }
+ } else {
+ addr = &info->sw->r4 + (regnum - 4);
+ nat_addr = &info->sw->ar_unat;
+ nat_mask = (1UL << ((long) addr & 0x1f8)/8);
+ }
+ } else {
+ /* access a scratch register */
+ pt = (struct pt_regs *) info->sp - 1;
+ if (regnum <= 3)
+ addr = &pt->r1 + (regnum - 1);
+ else if (regnum <= 11)
+ addr = &pt->r8 + (regnum - 8);
+ else if (regnum <= 15)
+ addr = &pt->r12 + (regnum - 12);
+ else
+ addr = &pt->r16 + (regnum - 16);
+ if (info->unat)
+ nat_addr = info->unat;
+ else
+ nat_addr = &info->sw->caller_unat;
+ nat_mask = (1UL << ((long) addr & 0x1f8)/8);
+ }
+ } else {
+ /* access a stacked register */
+ addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum);
+ nat_addr = ia64_rse_rnat_addr(addr);
+ if ((unsigned long) addr < info->regstk.limit
+ || (unsigned long) addr >= info->regstk.top)
+ {
+ dprintk("unwind: ignoring attempt to access register outside of rbs\n");
+ return -1;
+ }
+ if ((unsigned long) nat_addr >= info->regstk.top)
+ nat_addr = &info->sw->ar_rnat;
+ nat_mask = (1UL << ia64_rse_slot_num(addr));
}
- rs = sr->stack;
- sr->stack = rs->next;
- free_reg_state(rs);
-}
-static enum unw_register_index __attribute__((const))
-decode_abreg (unsigned char abreg, int memory)
-{
- switch (abreg) {
- case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
- case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
- case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
- case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
- case 0x60: return UNW_REG_PR;
- case 0x61: return UNW_REG_PSP;
- case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
- case 0x63: return UNW_REG_RP;
- case 0x64: return UNW_REG_BSP;
- case 0x65: return UNW_REG_BSPSTORE;
- case 0x66: return UNW_REG_RNAT;
- case 0x67: return UNW_REG_UNAT;
- case 0x68: return UNW_REG_FPSR;
- case 0x69: return UNW_REG_PFS;
- case 0x6a: return UNW_REG_LC;
- default:
- break;
+ if (write) {
+ *addr = *val;
+ *nat_addr = (*nat_addr & ~nat_mask) | nat_mask;
+ } else {
+ *val = *addr;
+ *nat = (*nat_addr & nat_mask) != 0;
}
- dprintk("unwind: bad abreg=0x%x\n", abreg);
- return UNW_REG_LC;
+ return 0;
}
-static void
-set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
+int
+unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
{
- reg->val = val;
- reg->where = where;
- if (reg->when = UNW_WHEN_NEVER)
- reg->when = when;
-}
+ unsigned long *addr;
+ struct pt_regs *pt;
-static void
-alloc_spill_area (unsigned long *offp, unsigned long regsize,
- struct unw_reg_info *lo, struct unw_reg_info *hi)
-{
- struct unw_reg_info *reg;
+ pt = (struct pt_regs *) info->sp - 1;
+ switch (regnum) {
+ /* scratch: */
+ case 0: addr = &pt->b0; break;
+ case 6: addr = &pt->b6; break;
+ case 7: addr = &pt->b7; break;
- for (reg = hi; reg >= lo; --reg) {
- if (reg->where = UNW_WHERE_SPILL_HOME) {
- reg->where = UNW_WHERE_PSPREL;
- reg->val = *offp;
- *offp += regsize;
- }
+ /* preserved: */
+ case 1: case 2: case 3: case 4: case 5:
+ addr = *(&info->b1 + (regnum - 1));
+ break;
+
+ default:
+ return -1;
}
+ if (write)
+ *addr = *val;
+ else
+ *val = *addr;
+ return 0;
}
-static void
-spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
+int
+unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
{
- struct unw_reg_info *reg;
+ struct ia64_fpreg *addr = 0;
+ struct pt_regs *pt;
- for (reg = *regp; reg <= lim; ++reg) {
- if (reg->where = UNW_WHERE_SPILL_HOME) {
- reg->when = t;
- *regp = reg + 1;
- return;
- }
+ if ((unsigned) (regnum - 2) >= 30)
+ return -1;
+
+ pt = (struct pt_regs *) info->sp - 1;
+
+ if (regnum <= 5) {
+ addr = *(&info->f2 + (regnum - 2));
+ if (!addr)
+ addr = &info->sw->f2 + (regnum - 2);
+ } else if (regnum <= 15) {
+ if (regnum <= 9)
+ addr = &pt->f6 + (regnum - 6);
+ else
+ addr = &info->sw->f10 + (regnum - 10);
+ } else if (regnum <= 31) {
+ addr = *(&info->fr[regnum - 16]);
+ if (!addr)
+ addr = &info->sw->f16 + (regnum - 16);
}
- dprintk("unwind: excess spill!\n");
+
+ if (write)
+ *addr = *val;
+ else
+ *val = *addr;
+ return 0;
}
-static void
-finish_prologue (struct unw_state_record *sr)
+int
+unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
{
- struct unw_reg_info *reg;
- unsigned long off;
- int i;
+ unsigned long *addr;
+ struct pt_regs *pt;
- /*
- * First, resolve implicit register save locations
- * (see Section "11.4.2.3 Rules for Using Unwind
- * Descriptors", rule 3):
- */
- for (i = 0; i < (int) sizeof(save_order)/sizeof(save_order[0]); ++i) {
- reg = sr->curr.reg + save_order[i];
- if (reg->where = UNW_WHERE_GR_SAVE) {
- reg->where = UNW_WHERE_GR;
- reg->val = sr->gr_save_loc++;
- }
- }
+ pt = (struct pt_regs *) info->sp - 1;
- /*
- * Next, compute when the fp, general, and branch registers get
- * saved. This must come before alloc_spill_area() because
+ switch (regnum) {
+ case UNW_AR_BSP:
+ addr = info->pbsp;
+ if (!addr)
+ addr = &info->sw->ar_bspstore;
+ break;
+
+ case UNW_AR_BSPSTORE:
+ addr = info->bspstore;
+ if (!addr)
+ addr = &info->sw->ar_bspstore;
+ break;
+
+ case UNW_AR_PFS:
+ addr = info->pfs;
+ if (!addr)
+ addr = &info->sw->ar_pfs;
+ break;
+
+ case UNW_AR_RNAT:
+ addr = info->rnat;
+ if (!addr)
+ addr = &info->sw->ar_rnat;
+ break;
+
+ case UNW_AR_UNAT:
+ addr = info->unat;
+ if (!addr)
+ addr = &info->sw->ar_unat;
+ break;
+
+ case UNW_AR_LC:
+ addr = info->lc;
+ if (!addr)
+ addr = &info->sw->ar_lc;
+ break;
+
+ case UNW_AR_FPSR:
+ addr = info->fpsr;
+ if (!addr)
+ addr = &info->sw->ar_fpsr;
+ break;
+
+ case UNW_AR_RSC:
+ addr = &pt->ar_rsc;
+ break;
+
+ case UNW_AR_CCV:
+ addr = &pt->ar_ccv;
+ break;
+
+ default:
+ return -1;
+ }
+
+ if (write)
+ *addr = *val;
+ else
+ *val = *addr;
+ return 0;
+}
+
+inline int
+unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
+{
+ unsigned long *addr;
+
+ addr = info->pr;
+ if (!addr)
+ addr = &info->sw->pr;
+
+ if (write)
+ *addr = *val;
+ else
+ *val = *addr;
+ return 0;
+}
+
+\f
+/* Unwind decoder routines */
+
+static inline void
+push (struct unw_state_record *sr)
+{
+ struct unw_reg_state *rs;
+
+ rs = alloc_reg_state();
+ memcpy(rs, &sr->curr, sizeof(*rs));
+ rs->next = sr->stack;
+ sr->stack = rs;
+}
+
+static void
+pop (struct unw_state_record *sr)
+{
+ struct unw_reg_state *rs;
+
+ if (!sr->stack) {
+ printk ("unwind: stack underflow!\n");
+ return;
+ }
+ rs = sr->stack;
+ sr->stack = rs->next;
+ free_reg_state(rs);
+}
+
+static enum unw_register_index __attribute__((const))
+decode_abreg (unsigned char abreg, int memory)
+{
+ switch (abreg) {
+ case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
+ case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
+ case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
+ case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
+ case 0x60: return UNW_REG_PR;
+ case 0x61: return UNW_REG_PSP;
+ case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
+ case 0x63: return UNW_REG_RP;
+ case 0x64: return UNW_REG_BSP;
+ case 0x65: return UNW_REG_BSPSTORE;
+ case 0x66: return UNW_REG_RNAT;
+ case 0x67: return UNW_REG_UNAT;
+ case 0x68: return UNW_REG_FPSR;
+ case 0x69: return UNW_REG_PFS;
+ case 0x6a: return UNW_REG_LC;
+ default:
+ break;
+ }
+ dprintk("unwind: bad abreg=0x%x\n", abreg);
+ return UNW_REG_LC;
+}
+
+static void
+set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
+{
+ reg->val = val;
+ reg->where = where;
+ if (reg->when = UNW_WHEN_NEVER)
+ reg->when = when;
+}
+
+static void
+alloc_spill_area (unsigned long *offp, unsigned long regsize,
+ struct unw_reg_info *lo, struct unw_reg_info *hi)
+{
+ struct unw_reg_info *reg;
+
+ for (reg = hi; reg >= lo; --reg) {
+ if (reg->where = UNW_WHERE_SPILL_HOME) {
+ reg->where = UNW_WHERE_PSPREL;
+ reg->val = *offp;
+ *offp += regsize;
+ }
+ }
+}
+
+static inline void
+spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
+{
+ struct unw_reg_info *reg;
+
+ for (reg = *regp; reg <= lim; ++reg) {
+ if (reg->where = UNW_WHERE_SPILL_HOME) {
+ reg->when = t;
+ *regp = reg + 1;
+ return;
+ }
+ }
+ dprintk("unwind: excess spill!\n");
+}
+
+static inline void
+finish_prologue (struct unw_state_record *sr)
+{
+ struct unw_reg_info *reg;
+ unsigned long off;
+ int i;
+
+ /*
+ * First, resolve implicit register save locations
+ * (see Section "11.4.2.3 Rules for Using Unwind
+ * Descriptors", rule 3):
+ */
+ for (i = 0; i < (int) sizeof(unw.save_order)/sizeof(unw.save_order[0]); ++i) {
+ reg = sr->curr.reg + unw.save_order[i];
+ if (reg->where = UNW_WHERE_GR_SAVE) {
+ reg->where = UNW_WHERE_GR;
+ reg->val = sr->gr_save_loc++;
+ }
+ }
+
+ /*
+ * Next, compute when the fp, general, and branch registers get
+ * saved. This must come before alloc_spill_area() because
* we need to know which registers are spilled to their home
* locations.
*/
@@ -267,7 +593,7 @@
* Region header descriptors.
*/
-static inline void
+static void
desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave,
struct unw_state_record *sr)
{
@@ -298,7 +624,7 @@
if (!body) {
for (i = 0; i < 4; ++i) {
if (mask & 0x8)
- set_reg(sr->curr.reg + save_order[i], UNW_WHERE_GR,
+ set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
sr->region_start + sr->region_len - 1, grsave++);
mask <<= 1;
}
@@ -316,8 +642,10 @@
static inline void
desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
{
- dprintk("unwind: ignoring unwabi(abi=0x%x,context=0x%x)\n", abi, context);
- sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
+ if (abi = 0 && context = 'i')
+ sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
+ else
+ dprintk("unwind: ignoring unwabi(abi=0x%x,context=0x%x)\n", abi, context);
}
static inline void
@@ -645,44 +973,126 @@
#include "unwind_decoder.c"
-static struct unw_table_entry *
-lookup (struct unw_table *table, unsigned long rel_ip)
+\f
+/* Unwind scripts. */
+
+static inline unw_hash_index_t
+hash (unsigned long ip)
{
- struct unw_table_entry *e = 0;
- unsigned long lo, hi, mid;
+# define magic 0x9e3779b97f4a7c16 /* (sqrt(5)/2-1)*2^64 */
- /* do a binary search for right entry: */
- for (lo = 0, hi = table->length; lo < hi; ) {
- mid = (lo + hi) / 2;
- e = &table->array[mid];
- if (rel_ip < e->start_offset)
- hi = mid;
- else if (rel_ip >= e->end_offset)
- lo = mid + 1;
- else
- break;
- }
- return e;
+ return (ip >> 4)*magic >> (64 - UNW_LOG_HASH_SIZE);
}
-static struct unw_script *
-script_new (void)
+static inline long
+cache_match (struct unw_script *script, unsigned long ip, unsigned long pr_val)
{
- struct unw_script *script;
+ read_lock(&script->lock);
+ if ((ip) = (script)->ip && (((pr_val) ^ (script)->pr_val) & (script)->pr_mask) = 0)
+ /* keep the read lock... */
+ return 1;
+ read_unlock(&script->lock);
+ return 0;
+}
- script = kmalloc(sizeof(*script), GFP_ATOMIC); /* XXX fix me */
- memset(script, 0, sizeof(*script));
- return script;
+static inline struct unw_script *
+script_lookup (struct unw_frame_info *info)
+{
+ struct unw_script *script = unw.cache + info->hint;
+ unsigned long ip, pr_val;
+
+ STAT(++unw.stat.cache.lookups);
+
+ ip = info->ip;
+ pr_val = info->pr_val;
+
+ if (cache_match(script, ip, pr_val)) {
+ STAT(++unw.stat.cache.hinted_hits);
+ return script;
+ }
+
+ script = unw.cache + unw.hash[hash(ip)];
+ while (1) {
+ if (cache_match(script, ip, pr_val)) {
+ /* update hint; no locking required as single-word writes are atomic */
+ STAT(++unw.stat.cache.normal_hits);
+ unw.cache[info->prev_script].hint = script - unw.cache;
+ return script;
+ }
+ if (script->coll_chain >= UNW_HASH_SIZE)
+ return 0;
+ script = unw.cache + script->coll_chain;
+ STAT(++unw.stat.cache.collision_chain_traversals);
+ }
}
-static void
-script_emit (struct unw_script *script, struct unw_insn insn)
+/*
+ * On returning, a write lock for the SCRIPT is still being held.
+ */
+static inline struct unw_script *
+script_new (unsigned long ip)
{
- if (script->count >= UNW_MAX_SCRIPT_LEN) {
- dprintk("unwind: script exceeds maximum size of %u instructions!\n",
- UNW_MAX_SCRIPT_LEN);
+ struct unw_script *script, *prev, *tmp;
+ unsigned short head;
+ unsigned long flags;
+ unsigned char index;
+
+ STAT(++unw.stat.script.news);
+
+ /*
+ * Atomically fetch the least recently used script. We can't
+ * do this via unw.lock because we also need to acquire the
+ * script's lock and to avoid deadlock, we must acquire the
+ * latter before the former.
+ */
+ do {
+ head = unw.lru_head;
+ } while (cmpxchg(&unw.lru_head, head, unw.cache[head].lru_chain) != head);
+
+ script = unw.cache + head;
+
+ write_lock(&script->lock);
+
+ spin_lock_irqsave(&unw.lock, flags);
+ {
+ /* re-insert script at the tail of the LRU chain: */
+ unw.cache[unw.lru_tail].lru_chain = head;
+ unw.lru_tail = head;
+
+ /* remove the old script from the hash table (if it's there): */
+ index = hash(script->ip);
+ tmp = unw.cache + unw.hash[index];
+ prev = 0;
+ while (1) {
+ if (tmp = script) {
+ if (prev)
+ prev->coll_chain = tmp->coll_chain;
+ else
+ unw.hash[index] = tmp->coll_chain;
+ break;
+ } else
+ prev = tmp;
+ if (tmp->coll_chain >= UNW_CACHE_SIZE)
+ /* old script wasn't in the hash-table */
+ break;
+ tmp = unw.cache + tmp->coll_chain;
+ }
+
+ /* enter new script in the hash table */
+ index = hash(ip);
+ script->coll_chain = unw.hash[index];
+ unw.hash[index] = script - unw.cache;
+
+ script->ip = ip; /* set new IP while we're holding the locks */
+
+ STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
}
- script->insn[script->count++] = insn;
+ spin_unlock_irqrestore(&unw.lock, flags);
+
+ script->flags = 0;
+ script->hint = 0;
+ script->count = 0;
+ return script;
}
static void
@@ -690,17 +1100,34 @@
{
script->pr_mask = sr->pr_mask;
script->pr_val = sr->pr_val;
+ /*
+ * We could down-grade our write-lock on script->lock here but
+ * the rwlock API doesn't offer atomic lock downgrading, so
+ * we'll just keep the write-lock and release it later when
+ * we're done using the script.
+ */
}
static inline void
-emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
+script_emit (struct unw_script *script, struct unw_insn insn)
{
- struct unw_reg_info *r = sr->curr.reg + i;
- enum unw_insn_opcode opc;
- struct unw_insn insn;
- unsigned long val;
-
- switch (r->where) {
+ if (script->count >= UNW_MAX_SCRIPT_LEN) {
+ dprintk("unwind: script exceeds maximum size of %u instructions!\n",
+ UNW_MAX_SCRIPT_LEN);
+ return;
+ }
+ script->insn[script->count++] = insn;
+}
+
+static inline void
+emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
+{
+ struct unw_reg_info *r = sr->curr.reg + i;
+ enum unw_insn_opcode opc;
+ struct unw_insn insn;
+ unsigned long val;
+
+ switch (r->where) {
case UNW_WHERE_GR:
if (r->val >= 32) {
/* register got spilled to a stacked register */
@@ -734,7 +1161,7 @@
return;
}
insn.opc = opc;
- insn.dst = preg_index[i];
+ insn.dst = unw.preg_index[i];
insn.val = val;
script_emit(script, insn);
}
@@ -765,10 +1192,10 @@
opc = UNW_INSN_MOVE2;
need_nat_info = 0;
}
- val = preg_index[UNW_REG_R4 + (rval - 4)];
+ val = unw.preg_index[UNW_REG_R4 + (rval - 4)];
} else {
opc = UNW_INSN_LOAD_SPREL;
- val = 0x10 - sizeof(struct pt_regs);
+ val = -sizeof(struct pt_regs);
if (rval >= 1 && rval <= 3)
val += struct_offset(struct pt_regs, r1) + 8*(rval - 1);
else if (rval <= 11)
@@ -784,12 +1211,12 @@
case UNW_WHERE_FR:
if (rval <= 5)
- val = preg_index[UNW_REG_F2 + (rval - 1)];
+ val = unw.preg_index[UNW_REG_F2 + (rval - 1)];
else if (rval >= 16 && rval <= 31)
- val = preg_index[UNW_REG_F16 + (rval - 16)];
+ val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
else {
opc = UNW_INSN_LOAD_SPREL;
- val = 0x10 - sizeof(struct pt_regs);
+ val = -sizeof(struct pt_regs);
if (rval <= 9)
val += struct_offset(struct pt_regs, f6) + 16*(rval - 6);
else
@@ -799,10 +1226,10 @@
case UNW_WHERE_BR:
if (rval >= 1 && rval <= 5)
- val = preg_index[UNW_REG_B1 + (rval - 1)];
+ val = unw.preg_index[UNW_REG_B1 + (rval - 1)];
else {
opc = UNW_INSN_LOAD_SPREL;
- val = 0x10 - sizeof(struct pt_regs);
+ val = -sizeof(struct pt_regs);
if (rval = 0)
val += struct_offset(struct pt_regs, b0);
else if (rval = 6)
@@ -825,412 +1252,191 @@
break;
}
insn.opc = opc;
- insn.dst = preg_index[i];
+ insn.dst = unw.preg_index[i];
insn.val = val;
script_emit(script, insn);
if (need_nat_info)
emit_nat_info(sr, i, script);
}
+static inline struct unw_table_entry *
+lookup (struct unw_table *table, unsigned long rel_ip)
+{
+ struct unw_table_entry *e = 0;
+ unsigned long lo, hi, mid;
+
+ /* do a binary search for right entry: */
+ for (lo = 0, hi = table->length; lo < hi; ) {
+ mid = (lo + hi) / 2;
+ e = &table->array[mid];
+ if (rel_ip < e->start_offset)
+ hi = mid;
+ else if (rel_ip >= e->end_offset)
+ lo = mid + 1;
+ else
+ break;
+ }
+ return e;
+}
+
/*
* Build an unwind script that unwinds from state OLD_STATE to the
* entrypoint of the function that called OLD_STATE.
*/
-static struct unw_script *
+static inline struct unw_script *
build_script (struct unw_frame_info *info)
{
struct unw_reg_state *rs, *next;
struct unw_table_entry *e = 0;
struct unw_script *script = 0;
+ unsigned long ip = info->ip;
struct unw_state_record sr;
struct unw_table *table;
struct unw_reg_info *r;
struct unw_insn insn;
u8 *dp, *desc_end;
- unsigned long ip;
u64 hdr;
int i;
+ STAT(unsigned long start, parse_start;)
+
+ STAT(++unw.stat.script.builds; start = ia64_get_itc());
/* build state record */
memset(&sr, 0, sizeof(sr));
for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
r->when = UNW_WHEN_NEVER;
- sr.pr_val = info->pr_val;
-
- script = script_new ();
- if (!script) {
- dprintk("unwind: failed to create unwind script\n");
- return 0;
- }
- ip = script->ip = info->ip;
-
- /* search the kernels and the modules' unwind tables for IP: */
-
- for (table = unw_tables; table; table = table->next) {
- if (ip >= table->start && ip < table->end) {
- e = lookup(table, ip - table->segment_base);
- break;
- }
- }
- if (!e) {
- /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
- dprintk("unwind: no unwind info for ip=%lx\n", ip);
- sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
- sr.curr.reg[UNW_REG_RP].when = -1;
- sr.curr.reg[UNW_REG_RP].val = 0;
- compile_reg(&sr, UNW_REG_RP, script);
- script_finalize(script, &sr);
- return script;
- }
-
- sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))
- + (ip & 0xfUL));
- hdr = *(u64 *) (table->segment_base + e->info_offset);
- dp = (u8 *) (table->segment_base + e->info_offset + 8);
- desc_end = dp + 8*UNW_LENGTH(hdr);
-
- while (!sr.done && dp < desc_end)
- dp = unw_decode(dp, sr.in_body, &sr);
-
- if (sr.when_target > sr.epilogue_start) {
- /*
- * sp has been restored and all values on the memory stack below
- * psp also have been restored.
- */
- sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
- sr.curr.reg[UNW_REG_PSP].val = 0;
- for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
- if ((r->where = UNW_WHERE_PSPREL && r->val <= 0x10)
- || r->where = UNW_WHERE_SPREL)
- r->where = UNW_WHERE_NONE;
- }
-
- script->flags = sr.flags;
-
- /*
- * If RP did't get saved, generate entry for the return link
- * register.
- */
- if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
- sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
- sr.curr.reg[UNW_REG_RP].when = -1;
- sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
- }
-
-#if UNW_DEBUG
- printk ("unwind: state record for func 0x%lx, t=%u:\n",
- table->segment_base + e->start_offset, sr.when_target);
- for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
- if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
- printk(" %s <- ", preg_name[r - sr.curr.reg]);
- switch (r->where) {
- case UNW_WHERE_GR: printk("r%lu", r->val); break;
- case UNW_WHERE_FR: printk("f%lu", r->val); break;
- case UNW_WHERE_BR: printk("b%lu", r->val); break;
- case UNW_WHERE_SPREL: printk("[sp+0x%lx]", r->val); break;
- case UNW_WHERE_PSPREL: printk("[psp+0x%lx]", 0x10 - r->val); break;
- case UNW_WHERE_NONE:
- printk("%s+0x%lx", preg_name[r - sr.curr.reg], r->val);
- break;
- default: printk("BADWHERE(%d)", r->where); break;
- }
- printk ("\t\t%d\n", r->when);
- }
- }
-#endif
-
- /* translate state record into unwinder instructions: */
-
- if (sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE
- && sr.when_target > sr.curr.reg[UNW_REG_PSP].when && sr.curr.reg[UNW_REG_PSP].val != 0)
- {
- /* new psp is sp plus frame size */
- insn.opc = UNW_INSN_ADD;
- insn.dst = preg_index[UNW_REG_PSP];
- insn.val = sr.curr.reg[UNW_REG_PSP].val;
- script_emit(script, insn);
- }
-
- /* determine where the primary UNaT is: */
- if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
- i = UNW_REG_PRI_UNAT_MEM;
- else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
- i = UNW_REG_PRI_UNAT_GR;
- else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
- i = UNW_REG_PRI_UNAT_MEM;
- else
- i = UNW_REG_PRI_UNAT_GR;
-
- compile_reg(&sr, i, script);
-
- for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
- compile_reg(&sr, i, script);
-
- /* free labelled register states & stack: */
-
- for (rs = sr.reg_state_list; rs; rs = next) {
- next = rs->next;
- free_reg_state(rs);
- }
- while (sr.stack)
- pop(&sr);
-
- script_finalize(script, &sr);
- return script;
-}
-
-int
-unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
-{
- unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
- struct unw_ireg *ireg;
- struct pt_regs *pt;
-
- if ((unsigned) regnum - 1 >= 127)
- return -1;
-
- if (regnum < 32) {
- if (regnum >= 4 && regnum <= 7) {
- /* access a preserved register */
- ireg = &info->r4 + (regnum - 4);
- addr = ireg->loc;
- if (addr) {
- nat_addr = addr + ireg->nat.off;
- switch (ireg->nat.type) {
- case UNW_NAT_VAL:
- /* simulate getf.sig/setf.sig */
- if (write) {
- if (*nat) {
- /* write NaTVal and be done with it */
- addr[0] = 0;
- addr[1] = 0x1fffe;
- return 0;
- }
- addr[1] = 0x1003e;
- } else {
- if (addr[0] = 0 && addr[1] = 0x1ffe) {
- /* return NaT and be done with it */
- *val = 0;
- *nat = 1;
- return 0;
- }
- }
- /* fall through */
- case UNW_NAT_NONE:
- nat_addr = &dummy_nat;
- break;
-
- case UNW_NAT_SCRATCH:
- if (info->unat)
- nat_addr = info->unat;
- else
- nat_addr = &info->sw->caller_unat;
- case UNW_NAT_PRI_UNAT:
- nat_mask = (1UL << ((long) addr & 0x1f8)/8);
- break;
-
- case UNW_NAT_STACKED:
- nat_addr = ia64_rse_rnat_addr(addr);
- if ((unsigned long) addr < info->regstk.limit
- || (unsigned long) addr >= info->regstk.top)
- return -1;
- if ((unsigned long) nat_addr >= info->regstk.top)
- nat_addr = &info->sw->ar_rnat;
- nat_mask = (1UL << ia64_rse_slot_num(addr));
- break;
- }
- } else {
- addr = &info->sw->r4 + (regnum - 4);
- nat_addr = &info->sw->ar_unat;
- nat_mask = (1UL << ((long) addr & 0x1f8)/8);
- }
- } else {
- /* access a scratch register */
- pt = (struct pt_regs *) info->sp - 1;
- if (regnum <= 3)
- addr = &pt->r1 + (regnum - 1);
- else if (regnum <= 11)
- addr = &pt->r8 + (regnum - 8);
- else if (regnum <= 15)
- addr = &pt->r12 + (regnum - 12);
- else
- addr = &pt->r16 + (regnum - 16);
- if (info->unat)
- nat_addr = info->unat;
- else
- nat_addr = &info->sw->caller_unat;
- nat_mask = (1UL << ((long) addr & 0x1f8)/8);
- }
- } else {
- /* access a stacked register */
- addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum);
- nat_addr = ia64_rse_rnat_addr(addr);
- if ((unsigned long) addr < info->regstk.limit
- || (unsigned long) addr >= info->regstk.top)
- {
- dprintk("unwind: ignoring attempt to access register outside of rbs\n");
- return -1;
- }
- if ((unsigned long) nat_addr >= info->regstk.top)
- nat_addr = &info->sw->ar_rnat;
- nat_mask = (1UL << ia64_rse_slot_num(addr));
- }
-
- if (write) {
- *addr = *val;
- *nat_addr = (*nat_addr & ~nat_mask) | nat_mask;
- } else {
- *val = *addr;
- *nat = (*nat_addr & nat_mask) != 0;
- }
- return 0;
-}
-
-int
-unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
-{
- unsigned long *addr;
- struct pt_regs *pt;
-
- pt = (struct pt_regs *) info->sp - 1;
- switch (regnum) {
- /* scratch: */
- case 0: addr = &pt->b0; break;
- case 6: addr = &pt->b6; break;
- case 7: addr = &pt->b7; break;
-
- /* preserved: */
- case 1: case 2: case 3: case 4: case 5:
- addr = *(&info->b1 + (regnum - 1));
- break;
-
- default:
- return -1;
- }
- if (write)
- *addr = *val;
- else
- *val = *addr;
- return 0;
-}
-
-int
-unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
-{
- struct ia64_fpreg *addr = 0;
- struct pt_regs *pt;
-
- if ((unsigned) (regnum - 2) >= 30)
- return -1;
-
- pt = (struct pt_regs *) info->sp - 1;
+ sr.pr_val = info->pr_val;
- if (regnum <= 5) {
- addr = *(&info->f2 + (regnum - 2));
- if (!addr)
- addr = &info->sw->f2 + (regnum - 2);
- } else if (regnum <= 15) {
- if (regnum <= 9)
- addr = &pt->f6 + (regnum - 6);
- else
- addr = &info->sw->f10 + (regnum - 10);
- } else if (regnum <= 31) {
- addr = *(&info->fr[regnum - 16]);
- if (!addr)
- addr = &info->sw->f16 + (regnum - 16);
+ script = script_new(ip);
+ if (!script) {
+ dprintk("unwind: failed to create unwind script\n");
+ STAT(unw.stat.script.build_time += ia64_get_itc() - start);
+ return 0;
}
+ unw.cache[info->prev_script].hint = script - unw.cache;
- if (write)
- *addr = *val;
- else
- *val = *addr;
- return 0;
-}
-
-int
-unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
-{
- unsigned long *addr;
- struct pt_regs *pt;
+ /* search the kernels and the modules' unwind tables for IP: */
- pt = (struct pt_regs *) info->sp - 1;
+ STAT(parse_start = ia64_get_itc());
- switch (regnum) {
- case UNW_AR_BSP:
- addr = info->pbsp;
- if (!addr)
- addr = &info->sw->ar_bspstore;
- break;
+ for (table = unw.tables; table; table = table->next) {
+ if (ip >= table->start && ip < table->end) {
+ e = lookup(table, ip - table->segment_base);
+ break;
+ }
+ }
+ if (!e) {
+ /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
+ dprintk("unwind: no unwind info for ip=0x%lx\n", ip);
+ sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
+ sr.curr.reg[UNW_REG_RP].when = -1;
+ sr.curr.reg[UNW_REG_RP].val = 0;
+ compile_reg(&sr, UNW_REG_RP, script);
+ script_finalize(script, &sr);
+ STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
+ STAT(unw.stat.script.build_time += ia64_get_itc() - start);
+ return script;
+ }
- case UNW_AR_BSPSTORE:
- addr = info->bspstore;
- if (!addr)
- addr = &info->sw->ar_bspstore;
- break;
+ sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))
+ + (ip & 0xfUL));
+ hdr = *(u64 *) (table->segment_base + e->info_offset);
+ dp = (u8 *) (table->segment_base + e->info_offset + 8);
+ desc_end = dp + 8*UNW_LENGTH(hdr);
- case UNW_AR_PFS:
- addr = info->pfs;
- if (!addr)
- addr = &info->sw->ar_pfs;
- break;
+ while (!sr.done && dp < desc_end)
+ dp = unw_decode(dp, sr.in_body, &sr);
- case UNW_AR_RNAT:
- addr = info->rnat;
- if (!addr)
- addr = &info->sw->ar_rnat;
- break;
+ if (sr.when_target > sr.epilogue_start) {
+ /*
+ * sp has been restored and all values on the memory stack below
+ * psp also have been restored.
+ */
+ sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
+ sr.curr.reg[UNW_REG_PSP].val = 0;
+ for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
+ if ((r->where = UNW_WHERE_PSPREL && r->val <= 0x10)
+ || r->where = UNW_WHERE_SPREL)
+ r->where = UNW_WHERE_NONE;
+ }
- case UNW_AR_UNAT:
- addr = info->unat;
- if (!addr)
- addr = &info->sw->ar_unat;
- break;
+ script->flags = sr.flags;
- case UNW_AR_LC:
- addr = info->lc;
- if (!addr)
- addr = &info->sw->ar_lc;
- break;
+ /*
+ * If RP did't get saved, generate entry for the return link
+ * register.
+ */
+ if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
+ sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
+ sr.curr.reg[UNW_REG_RP].when = -1;
+ sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
+ }
- case UNW_AR_FPSR:
- addr = info->fpsr;
- if (!addr)
- addr = &info->sw->ar_fpsr;
- break;
+#if UNW_DEBUG
+ printk ("unwind: state record for func 0x%lx, t=%u:\n",
+ table->segment_base + e->start_offset, sr.when_target);
+ for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
+ if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
+ printk(" %s <- ", unw.preg_name[r - sr.curr.reg]);
+ switch (r->where) {
+ case UNW_WHERE_GR: printk("r%lu", r->val); break;
+ case UNW_WHERE_FR: printk("f%lu", r->val); break;
+ case UNW_WHERE_BR: printk("b%lu", r->val); break;
+ case UNW_WHERE_SPREL: printk("[sp+0x%lx]", r->val); break;
+ case UNW_WHERE_PSPREL: printk("[psp+0x%lx]", 0x10 - r->val); break;
+ case UNW_WHERE_NONE:
+ printk("%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
+ break;
+ default: printk("BADWHERE(%d)", r->where); break;
+ }
+ printk ("\t\t%d\n", r->when);
+ }
+ }
+#endif
- case UNW_AR_RSC:
- addr = &pt->ar_rsc;
- break;
+ STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
- case UNW_AR_CCV:
- addr = &pt->ar_ccv;
- break;
+ /* translate state record into unwinder instructions: */
- default:
- return -1;
+ if (sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE
+ && sr.when_target > sr.curr.reg[UNW_REG_PSP].when && sr.curr.reg[UNW_REG_PSP].val != 0)
+ {
+ /* new psp is sp plus frame size */
+ insn.opc = UNW_INSN_ADD;
+ insn.dst = unw.preg_index[UNW_REG_PSP];
+ insn.val = sr.curr.reg[UNW_REG_PSP].val;
+ script_emit(script, insn);
}
- if (write)
- *addr = *val;
+ /* determine where the primary UNaT is: */
+ if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
+ i = UNW_REG_PRI_UNAT_MEM;
+ else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
+ i = UNW_REG_PRI_UNAT_GR;
+ else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
+ i = UNW_REG_PRI_UNAT_MEM;
else
- *val = *addr;
- return 0;
-}
+ i = UNW_REG_PRI_UNAT_GR;
-inline int
-unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
-{
- unsigned long *addr;
+ compile_reg(&sr, i, script);
- addr = info->pr;
- if (!addr)
- addr = &info->sw->pr;
+ for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
+ compile_reg(&sr, i, script);
- if (write)
- *addr = *val;
- else
- *val = *addr;
- return 0;
+ /* free labelled register states & stack: */
+
+ STAT(parse_start = ia64_get_itc());
+ for (rs = sr.reg_state_list; rs; rs = next) {
+ next = rs->next;
+ free_reg_state(rs);
+ }
+ while (sr.stack)
+ pop(&sr);
+ STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
+
+ script_finalize(script, &sr);
+ STAT(unw.stat.script.build_time += ia64_get_itc() - start);
+ return script;
}
/*
@@ -1238,13 +1444,15 @@
* reflect the state that existed upon entry to the function that this
* unwinder represents.
*/
-static void
+static inline void
run_script (struct unw_script *script, struct unw_frame_info *state)
{
struct unw_insn *ip, *limit, next_insn;
unsigned long opc, dst, val, off;
unsigned long *s = (unsigned long *) state;
+ STAT(unsigned long start;)
+ STAT(++unw.stat.script.runs; start = ia64_get_itc());
state->flags = script->flags;
ip = script->insn;
limit = script->insn + script->count;
@@ -1299,10 +1507,11 @@
break;
}
}
+ STAT(unw.stat.script.run_time += ia64_get_itc() - start);
return;
lazy_init:
- off = sw_offset[val];
+ off = unw.sw_off[val];
s[val] = (unsigned long) state->sw + off;
if (off >= struct_offset (struct unw_frame_info, r4)
&& off <= struct_offset (struct unw_frame_info, r7))
@@ -1314,61 +1523,71 @@
goto redo;
}
-static void
+static int
find_save_locs (struct unw_frame_info *info)
{
- static struct unw_script *script_cache;
+ int have_write_lock = 0;
struct unw_script *scr;
- /* XXX BIG FIXME---need hash table here... */
- scr = script_cache;
- if (scr) {
- while (scr->ip != info->ip || ((info->pr_val ^ scr->pr_val) & scr->pr_mask) != 0) {
- if (scr->hint = -1) {
- scr = 0;
- break;
- }
- scr = (struct unw_script *) ((int *) scr + scr->hint);
- }
+ if (info->ip & (my_cpu_data.unimpl_va_mask | 0xf)) {
+ /* don't let obviously bad addresses pollute the cache */
+ info->rp = 0;
+ return -1;
}
+ scr = script_lookup(info);
if (!scr) {
scr = build_script(info);
- if (scr) {
- if (script_cache)
- scr->hint = (int *) script_cache - (int *) scr;
- else
- scr->hint = -1;
- script_cache = scr;
+ if (!scr) {
+ dprintk("unwind: failed to locate/build unwind script for ip %lx\n",
+ info->ip);
+ return -1;
}
+ have_write_lock = 1;
}
+ info->hint = scr->hint;
+ info->prev_script = scr - unw.cache;
- if (!scr) {
- dprintk("unwind: failed to locate/build unwind script for ip %lx\n", info->ip);
- return;
- }
run_script(scr, info);
+
+ if (have_write_lock)
+ write_unlock(&scr->lock);
+ else
+ read_unlock(&scr->lock);
+ return 0;
}
int
unw_unwind (struct unw_frame_info *info)
{
+ unsigned long prev_ip, prev_sp, prev_bsp;
unsigned long ip, pr, num_regs;
+ STAT(unsigned long start, flags;)
+ int retval;
+ STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc());
+
+ prev_ip = info->ip;
+ prev_sp = info->sp;
+ prev_bsp = info->bsp;
+
/* restore the ip */
if (!info->rp) {
- dprintk("unwind: failed to locate return link!\n");
+ dprintk("unwind: failed to locate return link (ip=0x%lx)!\n", info->ip);
+ STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
return -1;
}
ip = info->ip = *info->rp;
if (ip <= TASK_SIZE) {
- dprintk("unwind: reached user-space (ip=%lx)\n", ip);
+ dprintk("unwind: reached user-space (ip=0x%lx)\n", ip);
+ STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
return -1;
}
/* restore the cfm: */
if (!info->pfs) {
dprintk("unwind: failed to locate ar.pfs!\n");
+ STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
return -1;
}
info->cfm = *info->pfs;
@@ -1387,6 +1606,7 @@
if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
dprintk("unwind: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
info->bsp, info->regstk.limit, info->regstk.top);
+ STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
return -1;
}
@@ -1395,20 +1615,31 @@
if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
dprintk("unwind: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
info->sp, info->regstk.top, info->regstk.limit);
+ STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
+ return -1;
+ }
+
+ if (info->ip = prev_ip && info->sp = prev_sp && info->bsp = prev_bsp) {
+ dprintk("unwind: ip, sp, bsp remain unchanged; stopping here (ip=0x%lx)\n", ip);
+ STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
return -1;
}
/* finally, restore the predicates: */
unw_get_pr(info, &info->pr_val);
- find_save_locs(info);
- return 0;
+ retval = find_save_locs(info);
+ STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
+ return retval;
}
static void
unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
{
unsigned long rbslimit, rbstop, stklimit, stktop, sol;
+ STAT(unsigned long start, flags;)
+
+ STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
/*
* Subtle stuff here: we _could_ unwind through the
@@ -1445,6 +1676,7 @@
info->pr_val = sw->pr;
find_save_locs(info);
+ STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
}
#endif /* CONFIG_IA64_NEW_UNWIND */
@@ -1563,7 +1795,8 @@
return -1;
info->ip = read_reg(info, sol - 2, &is_nat);
- if (is_nat)
+ if (is_nat || (info->ip & (my_cpu_data.unimpl_va_mask | 0xf)))
+ /* don't let obviously bad addresses pollute the cache */
return -1;
cfm = read_reg(info, sol - 1, &is_nat);
@@ -1580,18 +1813,12 @@
#ifdef CONFIG_IA64_NEW_UNWIND
-void *
-unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
- void *table_start, void *table_end)
+static void
+init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
+ unsigned long gp, void *table_start, void *table_end)
{
- struct unw_table *table;
struct unw_table_entry *start = table_start, *end = table_end;
- if (end - start <= 0) {
- dprintk("unwind: ignoring attempt to insert empty unwind table\n");
- return 0;
- }
-
#ifdef UNWIND_TABLE_SORT_BUG
{
struct unw_table_entry *e1, *e2, tmp;
@@ -1609,8 +1836,6 @@
}
}
#endif
-
- table = kmalloc(sizeof(*table), GFP_USER);
table->name = name;
table->segment_base = segment_base;
table->gp = gp;
@@ -1618,9 +1843,34 @@
table->end = segment_base + end[-1].end_offset;
table->array = start;
table->length = end - start;
+}
+
+void *
+unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
+ void *table_start, void *table_end)
+{
+ struct unw_table_entry *start = table_start, *end = table_end;
+ struct unw_table *table;
+ unsigned long flags;
+
+ if (end - start <= 0) {
+ dprintk("unwind: ignoring attempt to insert empty unwind table\n");
+ return 0;
+ }
+
+ table = kmalloc(sizeof(*table), GFP_USER);
+ if (!table)
+ return 0;
- table->next = unw_tables;
- unw_tables = table;
+ init_unwind_table(table, name, segment_base, gp, table_start, table_end);
+
+ spin_lock_irqsave(&unw.lock, flags);
+ {
+ /* keep kernel unwind table at the front (it's searched most commonly): */
+ table->next = unw.tables->next;
+ unw.tables->next = table;
+ }
+ spin_unlock_irqrestore(&unw.lock, flags);
return table;
}
@@ -1628,7 +1878,10 @@
void
unw_remove_unwind_table (void *handle)
{
- struct unw_table *table, *prev;
+ struct unw_table *table, *prevt;
+ struct unw_script *tmp, *prev;
+ unsigned long flags;
+ long index;
if (!handle) {
dprintk("unwind: ignoring attempt to remove non-existent unwind table\n");
@@ -1636,18 +1889,52 @@
}
table = handle;
- for (prev = (struct unw_table *) &unw_tables; prev; prev = prev->next)
- if (prev->next = table)
- break;
- if (!prev) {
- dprintk("unwind: failed to find unwind table %p\n", table);
+ if (table = &unw.kernel_table) {
+ dprintk("unwind: sorry, freeing the kernel's unwind table is a no-can-do!\n");
return;
}
- prev->next = table->next;
- kfree(table);
- /* XXX need to implement this... */
- dprintk("unwind: don't forget to clear cache entries for this module!\n");
+ spin_lock_irqsave(&unw.lock, flags);
+ {
+ /* first, delete the table: */
+
+ for (prevt = (struct unw_table *) &unw.tables; prevt; prevt = prevt->next)
+ if (prevt->next = table)
+ break;
+ if (!prevt) {
+ dprintk("unwind: failed to find unwind table %p\n", table);
+ spin_unlock_irqrestore(&unw.lock, flags);
+ return;
+ }
+ prevt->next = table->next;
+
+ /* next, remove hash table entries for this table */
+
+ for (index = 0; index <= UNW_HASH_SIZE; ++index) {
+ if (unw.hash[index] >= UNW_CACHE_SIZE)
+ continue;
+
+ tmp = unw.cache + unw.hash[index];
+ prev = 0;
+ while (1) {
+ write_lock(&tmp->lock);
+ {
+ if (tmp->ip >= table->start && tmp->ip < table->end) {
+ if (prev)
+ prev->coll_chain = tmp->coll_chain;
+ else
+ unw.hash[index] = -1;
+ tmp->ip = 0;
+ } else
+ prev = tmp;
+ }
+ write_unlock(&tmp->lock);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&unw.lock, flags);
+
+ kfree(table);
}
#endif /* CONFIG_IA64_NEW_UNWIND */
@@ -1656,27 +1943,39 @@
{
#ifdef CONFIG_IA64_NEW_UNWIND
extern int ia64_unw_start, ia64_unw_end, __gp;
+ extern void unw_hash_index_t_is_too_narrow (void);
long i, off;
-# define SW(f) struct_offset(struct switch_stack, f)
- sw_offset[preg_index[UNW_REG_PRI_UNAT_GR]] = SW(ar_unat);
- sw_offset[preg_index[UNW_REG_BSPSTORE]] = SW(ar_bspstore);
- sw_offset[preg_index[UNW_REG_PFS]] = SW(ar_unat);
- sw_offset[preg_index[UNW_REG_RP]] = SW(b0);
- sw_offset[preg_index[UNW_REG_UNAT]] = SW(ar_unat);
- sw_offset[preg_index[UNW_REG_PR]] = SW(pr);
- sw_offset[preg_index[UNW_REG_LC]] = SW(ar_lc);
- sw_offset[preg_index[UNW_REG_FPSR]] = SW(ar_fpsr);
- for (i = UNW_REG_R4, off = SW(r4); i <= UNW_REG_R7; ++i, off += 8)
- sw_offset[preg_index[i]] = off;
- for (i = UNW_REG_B1, off = SW(b1); i <= UNW_REG_B5; ++i, off += 8)
- sw_offset[preg_index[i]] = off;
- for (i = UNW_REG_F2, off = SW(f2); i <= UNW_REG_F5; ++i, off += 16)
- sw_offset[preg_index[i]] = off;
- for (i = UNW_REG_F16, off = SW(f16); i <= UNW_REG_F31; ++i, off += 16)
- sw_offset[preg_index[i]] = off;
+ if (8*sizeof (unw_hash_index_t) < UNW_LOG_HASH_SIZE)
+ unw_hash_index_t_is_too_narrow();
+
+ unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(AR_UNAT);
+ unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
+ unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_UNAT);
+ unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
+ unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(AR_UNAT);
+ unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
+ unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
+ unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
+ for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8)
+ unw.sw_off[unw.preg_index[i]] = off;
+ for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8)
+ unw.sw_off[unw.preg_index[i]] = off;
+ for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16)
+ unw.sw_off[unw.preg_index[i]] = off;
+ for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16)
+ unw.sw_off[unw.preg_index[i]] = off;
+
+ unw.cache[0].coll_chain = -1;
+ for (i = 1; i < UNW_CACHE_SIZE; ++i) {
+ unw.cache[i].lru_chain = (i - 1);
+ unw.cache[i].coll_chain = -1;
+ unw.cache[i].lock = RW_LOCK_UNLOCKED;
+ }
+ unw.lru_head = UNW_CACHE_SIZE - 1;
+ unw.lru_tail = 0;
- unw_add_unwind_table("kernel", KERNEL_START, (unsigned long) &__gp,
- &ia64_unw_start, &ia64_unw_end);
+ init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) &__gp,
+ &ia64_unw_start, &ia64_unw_end);
#endif /* CONFIG_IA64_NEW_UNWIND */
}
diff -urN linux-davidm/arch/ia64/kernel/unwind_i.h linux-2.4.0-test1-lia/arch/ia64/kernel/unwind_i.h
--- linux-davidm/arch/ia64/kernel/unwind_i.h Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/arch/ia64/kernel/unwind_i.h Thu Jun 1 01:08:05 2000
@@ -138,14 +138,23 @@
signed int val : 19;
};
-#define UNW_MAX_SCRIPT_LEN (2*UNW_NUM_REGS)
+/*
+ * Preserved general static registers (r2-r5) give rise to two script
+ * instructions; everything else yields at most one instruction; at
+ * the end of the script, the psp gets popped, accounting for one more
+ * instruction.
+ */
+#define UNW_MAX_SCRIPT_LEN (UNW_NUM_REGS + 5)
struct unw_script {
unsigned long ip; /* ip this script is for */
unsigned long pr_mask; /* mask of predicates script depends on */
unsigned long pr_val; /* predicate values this script is for */
- unsigned long flags; /* see UNW_FLAG_* in unwind.h */
- unsigned int count; /* number of instructions in script */
- int hint; /* hint for next script to try */
+ rwlock_t lock;
+ unsigned int flags; /* see UNW_FLAG_* in unwind.h */
+ unsigned short lru_chain; /* used for least-recently-used chain */
+ unsigned short coll_chain; /* used for hash collisions */
+ unsigned short hint; /* hint for next script to try (or -1) */
+ unsigned short count; /* number of instructions in script */
struct unw_insn insn[UNW_MAX_SCRIPT_LEN];
};
diff -urN linux-davidm/arch/ia64/mm/fault.c linux-2.4.0-test1-lia/arch/ia64/mm/fault.c
--- linux-davidm/arch/ia64/mm/fault.c Mon Apr 24 15:52:23 2000
+++ linux-2.4.0-test1-lia/arch/ia64/mm/fault.c Thu Jun 1 01:09:50 2000
@@ -1,8 +1,8 @@
/*
* MMU fault handling support.
*
- * Copyright (C) 1998, 1999 Hewlett-Packard Co
- * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+ * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -94,7 +94,14 @@
* sure we exit gracefully rather than endlessly redo the
* fault.
*/
- if (!handle_mm_fault(mm, vma, address, (isr & IA64_ISR_W) != 0)) {
+ switch (handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0)) {
+ case 1:
+ ++current->min_flt;
+ break;
+ case 2:
+ ++current->maj_flt;
+ break;
+ case 0:
/*
* We ran out of memory, or some other thing happened
* to us that made us unable to handle the page fault
@@ -102,6 +109,8 @@
*/
signal = SIGBUS;
goto bad_area;
+ default:
+ goto out_of_memory;
}
up(&mm->mmap_sem);
return;
@@ -128,15 +137,11 @@
return;
}
if (user_mode(regs)) {
-#if 0
-printk("%s(%d): segfault accessing %lx\n", current->comm, current->pid, address);
-show_regs(regs);
-#endif
si.si_signo = signal;
si.si_errno = 0;
si.si_code = SI_KERNEL;
si.si_addr = (void *) address;
- force_sig_info(SIGSEGV, &si, current);
+ force_sig_info(signal, &si, current);
return;
}
@@ -161,4 +166,11 @@
die_if_kernel("Oops", regs, isr);
do_exit(SIGKILL);
return;
+
+ out_of_memory:
+ up(&mm->mmap_sem);
+ printk("VM: killing process %s\n", current->comm);
+ if (user_mode(regs))
+ do_exit(SIGKILL);
+ goto no_context;
}
diff -urN linux-davidm/arch/ia64/mm/init.c linux-2.4.0-test1-lia/arch/ia64/mm/init.c
--- linux-davidm/arch/ia64/mm/init.c Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/arch/ia64/mm/init.c Thu Jun 1 01:10:02 2000
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/swap.h>
+#include <asm/bitops.h>
#include <asm/dma.h>
#include <asm/efi.h>
#include <asm/ia32.h>
@@ -265,7 +266,7 @@
void __init
ia64_rid_init (void)
{
- unsigned long flags, rid, pta;
+ unsigned long flags, rid, pta, impl_va_msb;
/* Set up the kernel identity mappings (regions 6 & 7) and the vmalloc area (region 5): */
ia64_clear_ic(flags);
@@ -300,11 +301,15 @@
# define ld_max_addr_space_size (ld_max_addr_space_pages + PAGE_SHIFT)
# define ld_max_vpt_size (ld_max_addr_space_pages + ld_pte_size)
# define POW2(n) (1ULL << (n))
-# define IMPL_VA_MSB 50
- if (POW2(ld_max_addr_space_size - 1) + POW2(ld_max_vpt_size) > POW2(IMPL_VA_MSB))
+ impl_va_msb = ffz(~my_cpu_data.unimpl_va_mask) - 1;
+
+ if (impl_va_msb < 50 || impl_va_msb > 60)
+ panic("Bogus impl_va_msb value of %lu!\n", impl_va_msb);
+
+ if (POW2(ld_max_addr_space_size - 1) + POW2(ld_max_vpt_size) > POW2(impl_va_msb))
panic("mm/init: overlap between virtually mapped linear page table and "
"mapped kernel space!");
- pta = POW2(61) - POW2(IMPL_VA_MSB);
+ pta = POW2(61) - POW2(impl_va_msb);
/*
* Set the (virtually mapped linear) page table address. Bit
* 8 selects between the short and long format, bits 2-7 the
@@ -324,9 +329,6 @@
clear_page((void *) ZERO_PAGE_ADDR);
- ia64_rid_init();
- __flush_tlb_all();
-
/* initialize mem_map[] */
memset(zones_size, 0, sizeof(zones_size));
@@ -378,8 +380,6 @@
max_mapnr = max_low_pfn;
high_memory = __va(max_low_pfn * PAGE_SIZE);
-
- ia64_tlb_init();
totalram_pages += free_all_bootmem();
diff -urN linux-davidm/arch/ia64/mm/tlb.c linux-2.4.0-test1-lia/arch/ia64/mm/tlb.c
--- linux-davidm/arch/ia64/mm/tlb.c Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/arch/ia64/mm/tlb.c Thu Jun 1 01:10:24 2000
@@ -53,10 +53,11 @@
* flush_tlb_no_ptcg is called with ptcg_lock locked
*/
static inline void
-flush_tlb_no_ptcg (__u64 start, __u64 end, __u64 nbits)
+flush_tlb_no_ptcg (unsigned long start, unsigned long end, unsigned long nbits)
{
- __u64 flags;
- __u64 saved_tpr;
+ extern void smp_send_flush_tlb (void);
+ unsigned long saved_tpr = 0;
+ unsigned long flags;
/*
* Some times this is called with interrupts disabled and causes
@@ -83,10 +84,12 @@
* Purge local TLB entries. ALAT invalidation is done in ia64_leave_kernel.
*/
do {
- __asm__ __volatile__ ("ptc.l %0,%1" :: "r"(start), "r"(nbits<<2) : "memory");
+ asm volatile ("ptc.l %0,%1" :: "r"(start), "r"(nbits<<2) : "memory");
start += (1UL << nbits);
} while (start < end);
+ ia64_srlz_i(); /* srlz.i implies srlz.d */
+
/*
* Wait for other CPUs to finish purging entries.
*/
@@ -157,7 +160,7 @@
stride0 = ia64_ptce_info.stride[0];
stride1 = ia64_ptce_info.stride[1];
- __save_and_cli(flags);
+ local_irq_save(flags);
for (i = 0; i < count0; ++i) {
for (j = 0; j < count1; ++j) {
asm volatile ("ptc.e %0" :: "r"(addr));
@@ -165,7 +168,7 @@
}
addr += stride0;
}
- __restore_flags(flags);
+ local_irq_restore(flags);
ia64_insn_group_barrier();
ia64_srlz_i(); /* srlz.i implies srlz.d */
ia64_insn_group_barrier();
@@ -211,21 +214,20 @@
/*
* Flush ALAT entries also.
*/
- __asm__ __volatile__ ("ptc.ga %0,%1;;srlz.i;;"
- :: "r"(start), "r"(nbits<<2) : "memory");
+ asm volatile ("ptc.ga %0,%1;;srlz.i;;" :: "r"(start), "r"(nbits<<2) : "memory");
# else
- __asm__ __volatile__ ("ptc.l %0,%1" :: "r"(start), "r"(nbits<<2) : "memory");
+ asm volatile ("ptc.l %0,%1" :: "r"(start), "r"(nbits<<2) : "memory");
# endif
start += (1UL << nbits);
} while (start < end);
-#endif /* CONFIG_SMP && !defined(CONFIG_ITANIUM_PTCG)
+#endif /* CONFIG_SMP && !defined(CONFIG_ITANIUM_PTCG) */
spin_unlock(&ptcg_lock);
ia64_insn_group_barrier();
ia64_srlz_i(); /* srlz.i implies srlz.d */
ia64_insn_group_barrier();
}
-void
+void __init
ia64_tlb_init (void)
{
ia64_get_ptce(&ia64_ptce_info);
diff -urN linux-davidm/drivers/ide/piix.c linux-2.4.0-test1-lia/drivers/ide/piix.c
--- linux-davidm/drivers/ide/piix.c Thu Jun 1 01:20:27 2000
+++ linux-2.4.0-test1-lia/drivers/ide/piix.c Thu Jun 1 01:11:08 2000
@@ -423,7 +423,7 @@
void __init ide_init_piix (ide_hwif_t *hwif)
{
-#if 0
+#ifndef CONFIG_IA64
/* autoprobe instead... --davidm 00/04/20 */
if (!hwif->irq)
hwif->irq = hwif->channel ? 15 : 14;
diff -urN linux-davidm/drivers/scsi/simscsi.c linux-2.4.0-test1-lia/drivers/scsi/simscsi.c
--- linux-davidm/drivers/scsi/simscsi.c Thu May 25 23:22:10 2000
+++ linux-2.4.0-test1-lia/drivers/scsi/simscsi.c Thu Jun 1 01:11:28 2000
@@ -85,17 +85,22 @@
static void
simscsi_interrupt (unsigned long val)
{
+ unsigned long flags;
Scsi_Cmnd *sc;
- while ((sc = queue[rd].sc) != 0) {
- atomic_dec(&num_reqs);
- queue[rd].sc = 0;
+ spin_lock_irqsave(&io_request_lock, flags);
+ {
+ while ((sc = queue[rd].sc) != 0) {
+ atomic_dec(&num_reqs);
+ queue[rd].sc = 0;
#if DEBUG_SIMSCSI
- printk("simscsi_interrupt: done with %ld\n", sc->serial_number);
+ printk("simscsi_interrupt: done with %ld\n", sc->serial_number);
#endif
- (*sc->scsi_done)(sc);
- rd = (rd + 1) % SIMSCSI_REQ_QUEUE_LEN;
+ (*sc->scsi_done)(sc);
+ rd = (rd + 1) % SIMSCSI_REQ_QUEUE_LEN;
+ }
}
+ spin_unlock_irqrestore(&io_request_lock, flags);
}
int
diff -urN linux-davidm/drivers/sound/emu10k1/main.c linux-2.4.0-test1-lia/drivers/sound/emu10k1/main.c
--- linux-davidm/drivers/sound/emu10k1/main.c Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/drivers/sound/emu10k1/main.c Thu Jun 1 01:12:11 2000
@@ -532,7 +532,7 @@
return CTSTATUS_SUCCESS;
}
-static void audio_exit(struct emu10k1_card *card)
+static void __devexit audio_exit(struct emu10k1_card *card)
{
kfree(card->waveout);
kfree(card->wavein);
@@ -550,7 +550,7 @@
return;
}
-static void emu10k1_exit(struct emu10k1_card *card)
+static void __devexit emu10k1_exit(struct emu10k1_card *card)
{
int ch;
diff -urN linux-davidm/fs/binfmt_elf.c linux-2.4.0-test1-lia/fs/binfmt_elf.c
--- linux-davidm/fs/binfmt_elf.c Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/fs/binfmt_elf.c Thu Jun 1 01:12:21 2000
@@ -288,12 +288,14 @@
} else
map_addr = -EINVAL;
#else /* !CONFIG_BINFMT_ELF32 */
+ down(¤t->mm->mmap_sem);
map_addr = do_mmap(interpreter,
load_addr + ELF_PAGESTART(vaddr),
eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr),
elf_prot,
elf_type,
eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr));
+ up(¤t->mm->mmap_sem);
#endif /* !CONFIG_BINFMT_ELF32 */
if (!load_addr_set && interp_elf_ex->e_type = ET_DYN) {
@@ -663,11 +665,13 @@
} else
error = EINVAL;
#else /* CONFIG_BINFMT_ELF32 */
+ down(¤t->mm->mmap_sem);
error = do_mmap(bprm->file, ELF_PAGESTART(load_bias + vaddr),
(elf_ppnt->p_filesz +
ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
elf_prot, elf_flags, (elf_ppnt->p_offset -
ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
+ up(¤t->mm->mmap_sem);
#endif /* CONFIG_BINFMT_ELF32 */
if (!load_addr_set) {
diff -urN linux-davidm/fs/lockd/xdr.c linux-2.4.0-test1-lia/fs/lockd/xdr.c
--- linux-davidm/fs/lockd/xdr.c Sun Apr 2 15:31:32 2000
+++ linux-2.4.0-test1-lia/fs/lockd/xdr.c Thu Jun 1 01:12:30 2000
@@ -86,7 +86,7 @@
if ((len = ntohl(*p++)) != NFS2_FHSIZE) {
printk(KERN_NOTICE
- "lockd: bad fhandle size %x (should be %Zu)\n",
+ "lockd: bad fhandle size %x (should be %u)\n",
len, NFS2_FHSIZE);
return NULL;
}
diff -urN linux-davidm/fs/nfsd/nfscache.c linux-2.4.0-test1-lia/fs/nfsd/nfscache.c
--- linux-davidm/fs/nfsd/nfscache.c Thu Jun 1 01:20:28 2000
+++ linux-2.4.0-test1-lia/fs/nfsd/nfscache.c Thu Jun 1 01:12:41 2000
@@ -60,7 +60,7 @@
nfscache = (struct svc_cacherep *)
__get_free_pages(GFP_KERNEL, order);
if (!nfscache) {
- printk (KERN_ERR "nfsd: cannot allocate %d bytes for reply cache\n", i);
+ printk (KERN_ERR "nfsd: cannot allocate %Zu bytes for reply cache\n", i);
return;
}
memset(nfscache, 0, i);
@@ -70,7 +70,7 @@
if (!hash_list) {
free_pages ((unsigned long)nfscache, order);
nfscache = NULL;
- printk (KERN_ERR "nfsd: cannot allocate %d bytes for hash list\n", i);
+ printk (KERN_ERR "nfsd: cannot allocate %Zu bytes for hash list\n", i);
return;
}
diff -urN linux-davidm/fs/readdir.c linux-2.4.0-test1-lia/fs/readdir.c
--- linux-davidm/fs/readdir.c Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/fs/readdir.c Thu Jun 1 01:30:33 2000
@@ -79,10 +79,6 @@
return 0;
}
-#define NAME_OFFSET(de) ((int) ((de)->d_name - (char *) (de)))
-#define ROUND_UP(x) (((x)+sizeof(long)-1) & ~(sizeof(long)-1))
-
-#if !defined(__ia64__)
/*
* Traditional linux readdir() handling..
*
@@ -91,6 +87,10 @@
* anyway. Thus the special "fillonedir()" function for that
* case (the low-level handlers don't need to care about this).
*/
+#define NAME_OFFSET(de) ((int) ((de)->d_name - (char *) (de)))
+#define ROUND_UP(x) (((x)+sizeof(long)-1) & ~(sizeof(long)-1))
+
+#ifndef __ia64__
struct old_linux_dirent {
unsigned long d_ino;
@@ -146,7 +146,7 @@
return error;
}
-#endif /* !defined(__ia64__) */
+#endif /* !__ia64__ */
/*
* New, all-improved, singing, dancing, iBCS2-compliant getdents()
diff -urN linux-davidm/include/asm-ia64/pal.h linux-2.4.0-test1-lia/include/asm-ia64/pal.h
--- linux-davidm/include/asm-ia64/pal.h Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/include/asm-ia64/pal.h Thu Jun 1 01:13:03 2000
@@ -4,11 +4,12 @@
/*
* Processor Abstraction Layer definitions.
*
- * This is based on version 2.4 of the manual "Enhanced Mode Processor
- * Abstraction Layer".
+ * This is based on Intel IA-64 Architecture Software Developer's Manual rev 1.0
+ * chapter 11 IA-64 Processor Abstraction Layer
*
* Copyright (C) 1998-2000 Hewlett-Packard Co
* Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com>
@@ -16,6 +17,8 @@
* 99/10/01 davidm Make sure we pass zero for reserved parameters.
* 00/03/07 davidm Updated pal_cache_flush() to be in sync with PAL v2.6.
* 00/03/23 cfleck Modified processor min-state save area to match updated PAL & SAL info
+ * 00/05/24 eranian Updated to latest PAL spec, fix structures bugs, added
+ * 00/05/25 eranian Support for stack calls, and statis physical calls
*/
/*
@@ -127,8 +130,8 @@
typedef union pal_cache_config_info_1_s {
struct {
u64 u : 1, /* 0 Unified cache ? */
- reserved : 5, /* 7-3 Reserved */
at : 2, /* 2-1 Cache mem attr*/
+ reserved : 5, /* 7-3 Reserved */
associativity : 8, /* 16-8 Associativity*/
line_size : 8, /* 23-17 Line size */
stride : 8, /* 31-24 Stride */
@@ -164,8 +167,8 @@
u64 pcci_reserved;
} pal_cache_config_info_t;
-#define pcci_ld_hint pcci_info_1.pcci1.load_hints
-#define pcci_st_hint pcci_info_1.pcci1_bits.store_hints
+#define pcci_ld_hints pcci_info_1.pcci1_bits.load_hints
+#define pcci_st_hints pcci_info_1.pcci1_bits.store_hints
#define pcci_ld_latency pcci_info_1.pcci1_bits.load_latency
#define pcci_st_latency pcci_info_1.pcci1_bits.store_latency
#define pcci_stride pcci_info_1.pcci1_bits.stride
@@ -641,8 +644,12 @@
* parameters.
*/
extern struct ia64_pal_retval ia64_pal_call_static (u64, u64, u64, u64);
+extern struct ia64_pal_retval ia64_pal_call_stacked (u64, u64, u64, u64);
+extern struct ia64_pal_retval ia64_pal_call_phys_static (u64, u64, u64, u64);
-#define PAL_CALL(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_static(a0,a1, a2, a3)
+#define PAL_CALL(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_static(a0, a1, a2, a3)
+#define PAL_CALL_STK(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_stacked(a0, a1, a2, a3)
+#define PAL_CALL_PHYS(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_phys_static(a0, a1, a2, a3)
typedef int (*ia64_pal_handler) (u64, ...);
extern ia64_pal_handler ia64_pal;
@@ -702,7 +709,7 @@
pal_bus_features_u_t *features_control)
{
struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_BUS_GET_FEATURES, 0, 0, 0);
+ PAL_CALL_PHYS(iprv, PAL_BUS_GET_FEATURES, 0, 0, 0);
if (features_avail)
features_avail->pal_bus_features_val = iprv.v0;
if (features_status)
@@ -711,15 +718,54 @@
features_control->pal_bus_features_val = iprv.v2;
return iprv.status;
}
+
/* Enables/disables specific processor bus features */
extern inline s64
ia64_pal_bus_set_features (pal_bus_features_u_t feature_select)
{
struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_BUS_SET_FEATURES, feature_select.pal_bus_features_val, 0, 0);
+ PAL_CALL_PHYS(iprv, PAL_BUS_SET_FEATURES, feature_select.pal_bus_features_val, 0, 0);
return iprv.status;
}
+/* Get detailed cache information */
+extern inline s64
+ia64_pal_cache_config_info (u64 cache_level, u64 cache_type, pal_cache_config_info_t *conf)
+{
+ struct ia64_pal_retval iprv;
+
+ PAL_CALL(iprv, PAL_CACHE_INFO, cache_level, cache_type, 0);
+
+ if (iprv.status = 0) {
+ conf->pcci_status = iprv.status;
+ conf->pcci_info_1.pcci1_data = iprv.v0;
+ conf->pcci_info_2.pcci2_data = iprv.v1;
+ conf->pcci_reserved = iprv.v2;
+ }
+ return iprv.status;
+
+}
+
+/* Get detailed cche protection information */
+extern inline s64
+ia64_pal_cache_prot_info (u64 cache_level, u64 cache_type, pal_cache_protection_info_t *prot)
+{
+ struct ia64_pal_retval iprv;
+
+ PAL_CALL(iprv, PAL_CACHE_PROT_INFO, cache_level, cache_type, 0);
+
+ if (iprv.status = 0) {
+ prot->pcpi_status = iprv.status;
+ prot->pcp_info[0].pcpi_data = iprv.v0 & 0xffffffff;
+ prot->pcp_info[1].pcpi_data = iprv.v0 >> 32;
+ prot->pcp_info[2].pcpi_data = iprv.v1 & 0xffffffff;
+ prot->pcp_info[3].pcpi_data = iprv.v1 >> 32;
+ prot->pcp_info[4].pcpi_data = iprv.v2 & 0xffffffff;
+ prot->pcp_info[5].pcpi_data = iprv.v2 >> 32;
+ }
+ return iprv.status;
+}
+
/*
* Flush the processor instruction or data caches. *PROGRESS must be
* initialized to zero before calling this for the first time..
@@ -895,7 +941,10 @@
struct {
u64 exit_latency : 16,
entry_latency : 16,
- power_consumption : 32;
+ power_consumption : 28,
+ im : 1,
+ co : 1,
+ reserved : 2;
} pal_power_mgmt_info_s;
} pal_power_mgmt_info_u_t;
@@ -904,7 +953,7 @@
ia64_pal_halt_info (pal_power_mgmt_info_u_t *power_buf)
{
struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_HALT_INFO, (unsigned long) power_buf, 0, 0);
+ PAL_CALL_STK(iprv, PAL_HALT_INFO, (unsigned long) power_buf, 0, 0);
return iprv.status;
}
@@ -1013,7 +1062,7 @@
struct ia64_pal_retval iprv;
PAL_CALL(iprv, PAL_MEM_ATTRIB, 0, 0, 0);
if (mem_attrib)
- *mem_attrib = iprv.v0;
+ *mem_attrib = iprv.v0 & 0xff;
return iprv.status;
}
@@ -1076,28 +1125,32 @@
return iprv.status;
}
-#ifdef TBD
struct pal_features_s;
/* Provide information about configurable processor features */
extern inline s64
-ia64_pal_proc_get_features (struct pal_features_s *features_avail,
- struct pal_features_s *features_status,
- struct pal_features_s *features_control)
+ia64_pal_proc_get_features (u64 *features_avail,
+ u64 *features_status,
+ u64 *features_control)
{
struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_PROC_GET_FEATURES, 0, 0, 0);
+ PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, 0, 0);
+ if (iprv.status = 0) {
+ *features_avail = iprv.v0;
+ *features_status = iprv.v1;
+ *features_control = iprv.v2;
+ }
return iprv.status;
}
+
/* Enable/disable processor dependent features */
extern inline s64
-ia64_pal_proc_set_features (feature_select)
+ia64_pal_proc_set_features (u64 feature_select)
{
struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_PROC_SET_FEATURES, feature_select, 0, 0);
+ PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, feature_select, 0, 0);
return iprv.status;
}
-#endif
/*
* Put everything in a struct so we avoid the global offset table whenever
* possible.
@@ -1206,12 +1259,16 @@
/* Return PAL version information */
extern inline s64
-ia64_pal_version (pal_version_u_t *pal_version)
+ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version)
{
struct ia64_pal_retval iprv;
PAL_CALL(iprv, PAL_VERSION, 0, 0, 0);
- if (pal_version)
- pal_version->pal_version_val = iprv.v0;
+ if (pal_min_version)
+ pal_min_version->pal_version_val = iprv.v0;
+
+ if (pal_cur_version)
+ pal_cur_version->pal_version_val = iprv.v1;
+
return iprv.status;
}
@@ -1228,7 +1285,14 @@
} pal_tc_info_s;
} pal_tc_info_u_t;
-
+#define tc_reduce_tr pal_tc_info_s.reduce_tr
+#define tc_unified pal_tc_info_s.unified
+#define tc_pf pal_tc_info_s.pf
+#define tc_num_entries pal_tc_info_s.num_entries
+#define tc_associativity pal_tc_info_s.associativity
+#define tc_num_sets pal_tc_info_s.num_sets
+
+
/* Return information about the virtual memory characteristics of the processor
* implementation.
*/
@@ -1264,7 +1328,7 @@
struct {
u64 vw : 1,
phys_add_size : 7,
- key_size : 16,
+ key_size : 8,
max_pkr : 8,
hash_tag_id : 8,
max_dtr_entry : 8,
diff -urN linux-davidm/include/asm-ia64/pgtable.h linux-2.4.0-test1-lia/include/asm-ia64/pgtable.h
--- linux-davidm/include/asm-ia64/pgtable.h Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/include/asm-ia64/pgtable.h Thu Jun 1 01:14:01 2000
@@ -16,23 +16,16 @@
#include <asm/mman.h>
#include <asm/page.h>
+#include <asm/processor.h>
#include <asm/types.h>
-/* Size of virtuaql and physical address spaces: */
-#ifdef CONFIG_ITANIUM
-# define IA64_IMPL_VA_MSB 50
-# define IA64_PHYS_BITS 44 /* Itanium PRM defines 44 bits of ppn */
-#else
-# define IA64_IMPL_VA_MSB 60 /* maximum value (bits 61-63 are region bits) */
-# define IA64_PHYS_BITS 50 /* EAS2.6 allows up to 50 bits of ppn */
-#endif
-#define IA64_PHYS_SIZE (__IA64_UL(1) << IA64_PHYS_BITS)
+#define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */
/* Is ADDR a valid kernel address? */
#define kern_addr_valid(addr) ((addr) >= TASK_SIZE)
/* Is ADDR a valid physical address? */
-#define phys_addr_valid(addr) ((addr) < IA64_PHYS_SIZE)
+#define phys_addr_valid(addr) (((addr) & my_cpu_data.unimpl_pa_mask) = 0)
/*
* First, define the various bits in a PTE. Note that the PTE format
@@ -63,7 +56,7 @@
#define _PAGE_AR_SHIFT 9
#define _PAGE_A (1 << 5) /* page accessed bit */
#define _PAGE_D (1 << 6) /* page dirty bit */
-#define _PAGE_PPN_MASK ((IA64_PHYS_SIZE - 1) & ~0xfffUL)
+#define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL)
#define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */
#define _PAGE_PROTNONE (__IA64_UL(1) << 63)
diff -urN linux-davidm/include/asm-ia64/processor.h linux-2.4.0-test1-lia/include/asm-ia64/processor.h
--- linux-davidm/include/asm-ia64/processor.h Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/include/asm-ia64/processor.h Thu Jun 1 01:14:12 2000
@@ -237,6 +237,8 @@
__u64 proc_freq; /* frequency of processor */
__u64 cyc_per_usec; /* itc_freq/1000000 */
__u64 usec_per_cyc; /* 2^IA64_USEC_PER_CYC_SHIFT*1000000/itc_freq */
+ __u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */
+ __u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */
#ifdef CONFIG_SMP
__u64 loops_per_sec;
__u64 ipi_count;
diff -urN linux-davidm/include/asm-ia64/string.h linux-2.4.0-test1-lia/include/asm-ia64/string.h
--- linux-davidm/include/asm-ia64/string.h Sun Feb 6 18:42:40 2000
+++ linux-2.4.0-test1-lia/include/asm-ia64/string.h Thu Jun 1 01:14:24 2000
@@ -12,4 +12,7 @@
#define __HAVE_ARCH_STRLEN 1 /* see arch/ia64/lib/strlen.S */
#define __HAVE_ARCH_MEMSET 1 /* see arch/ia64/lib/memset.S */
+extern __kernel_size_t strlen (const char *);
+extern void *memset (void *,int,__kernel_size_t);
+
#endif /* _ASM_IA64_STRING_H */
diff -urN linux-davidm/include/asm-ia64/system.h linux-2.4.0-test1-lia/include/asm-ia64/system.h
--- linux-davidm/include/asm-ia64/system.h Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/include/asm-ia64/system.h Thu Jun 1 01:14:34 2000
@@ -135,7 +135,7 @@
do { \
unsigned long ip, old_psr, psr = (x); \
\
- __asm__ __volatile__ ("mov %0=psr; mov psr.l=%1;; srlz.d" \
+ __asm__ __volatile__ (";;mov %0=psr; mov psr.l=%1;; srlz.d" \
: "=&r" (old_psr) : "r" (psr) : "memory"); \
if ((old_psr & (1UL << 14)) && !(psr & (1UL << 14))) { \
__asm__ ("mov %0=ip" : "=r"(ip)); \
@@ -149,7 +149,7 @@
: "=r" (x) :: "memory")
# define local_irq_disable() __asm__ __volatile__ (";; rsm psr.i;;" ::: "memory")
/* (potentially) setting psr.i requires data serialization: */
-# define local_irq_restore(x) __asm__ __volatile__ ("mov psr.l=%0;; srlz.d" \
+# define local_irq_restore(x) __asm__ __volatile__ (";; mov psr.l=%0;; srlz.d" \
:: "r" (x) : "memory")
#endif /* !CONFIG_IA64_DEBUG_IRQ */
diff -urN linux-davidm/include/asm-ia64/unwind.h linux-2.4.0-test1-lia/include/asm-ia64/unwind.h
--- linux-davidm/include/asm-ia64/unwind.h Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/include/asm-ia64/unwind.h Thu Jun 1 01:14:42 2000
@@ -48,7 +48,9 @@
struct unw_frame_info {
struct unw_stack regstk;
struct unw_stack memstk;
- unsigned long flags;
+ unsigned int flags;
+ short hint;
+ short prev_script;
unsigned long bsp;
unsigned long sp; /* stack pointer */
unsigned long psp; /* previous sp */
diff -urN linux-davidm/init/main.c linux-2.4.0-test1-lia/init/main.c
--- linux-davidm/init/main.c Thu Jun 1 01:38:40 2000
+++ linux-2.4.0-test1-lia/init/main.c Thu Jun 1 01:15:49 2000
@@ -572,7 +572,6 @@
#endif
mem_init();
kmem_cache_sizes_init();
- unw_init(); /* XXX remove reliance on kmalloc and move to setup_arch() */
#ifdef CONFIG_PERFMON
perfmon_init();
#endif
next reply other threads:[~2000-06-01 8:54 UTC|newest]
Thread overview: 217+ messages / expand[flat|nested] mbox.gz Atom feed top
2000-06-01 8:54 David Mosberger [this message]
2000-06-03 17:32 ` [Linux-ia64] kernel update (relative to v2.4.0-test1) Manfred Spraul
2000-06-10 1:07 ` David Mosberger
2000-06-10 1:11 ` David Mosberger
2000-07-14 21:37 ` [Linux-ia64] kernel update (relative to 2.4.0-test4) David Mosberger
2000-08-12 5:02 ` [Linux-ia64] kernel update (relative to v2.4.0-test6) David Mosberger
2000-08-14 11:35 ` Andreas Schwab
2000-08-14 17:00 ` David Mosberger
2000-09-09 6:51 ` [Linux-ia64] kernel update (relative to v2.4.0-test8) David Mosberger
2000-09-09 19:07 ` H . J . Lu
2000-09-09 20:49 ` David Mosberger
2000-09-09 21:25 ` Uros Prestor
2000-09-09 21:33 ` H . J . Lu
2000-09-09 21:45 ` David Mosberger
2000-09-09 21:49 ` H . J . Lu
2000-09-10 0:17 ` David Mosberger
2000-09-10 0:24 ` Uros Prestor
2000-09-10 0:39 ` H . J . Lu
2000-09-10 0:57 ` H . J . Lu
2000-09-10 15:47 ` H . J . Lu
2000-09-14 1:50 ` David Mosberger
2000-10-05 19:01 ` [Linux-ia64] kernel update (relative to v2.4.0-test9) David Mosberger
2000-10-05 22:08 ` Keith Owens
2000-10-05 22:15 ` David Mosberger
2000-10-31 8:55 ` [Linux-ia64] kernel update (relative to 2.4.0-test9) David Mosberger
2000-11-02 8:50 ` [Linux-ia64] kernel update (relative to 2.4.0-test10) David Mosberger
2000-11-02 10:39 ` Pimenov, Sergei
2000-11-16 7:59 ` David Mosberger
2000-12-07 8:26 ` [Linux-ia64] kernel update (relative to 2.4.0-test11) David Mosberger
2000-12-07 21:57 ` David Mosberger
2000-12-15 5:00 ` [Linux-ia64] kernel update (relative to 2.4.0-test12) David Mosberger
2000-12-15 22:43 ` Nathan Straz
2001-01-09 9:48 ` [Linux-ia64] kernel update (relative to 2.4.0) David Mosberger
2001-01-09 11:05 ` Sapariya Manish.j
2001-01-10 3:26 ` [Linux-ia64] kernel update (relative to 2.4.0) - copy_user fi Mallick, Asit K
2001-01-12 2:30 ` [Linux-ia64] kernel update (relative to 2.4.0) Jim Wilson
2001-01-26 4:53 ` David Mosberger
2001-01-31 20:32 ` [Linux-ia64] kernel update (relative to 2.4.1) David Mosberger
2001-03-01 7:12 ` [Linux-ia64] kernel update (relative to 2.4.2) David Mosberger
2001-03-01 10:17 ` Andreas Schwab
2001-03-01 10:27 ` Andreas Schwab
2001-03-01 15:29 ` David Mosberger
2001-03-02 12:26 ` Keith Owens
2001-05-09 4:52 ` [Linux-ia64] kernel update (relative to 2.4.4) Keith Owens
2001-05-09 5:07 ` David Mosberger
2001-05-09 11:45 ` Keith Owens
2001-05-09 13:38 ` Jack Steiner
2001-05-09 14:06 ` David Mosberger
2001-05-09 14:21 ` Jack Steiner
2001-05-10 4:14 ` David Mosberger
2001-05-31 7:37 ` [Linux-ia64] kernel update (relative to 2.4.5) David Mosberger
2001-06-27 7:09 ` David Mosberger
2001-06-27 17:24 ` Richard Hirst
2001-06-27 18:10 ` Martin Wilck
2001-07-23 23:49 ` [Linux-ia64] kernel update (relative to 2.4.7) David Mosberger
2001-07-24 1:50 ` Keith Owens
2001-07-24 3:02 ` Keith Owens
2001-07-24 16:37 ` Andreas Schwab
2001-07-24 18:42 ` David Mosberger
2001-08-14 8:15 ` [Linux-ia64] kernel update (relative to 2.4.8) Chris Ahna
2001-08-14 8:19 ` David Mosberger
2001-08-14 8:51 ` Keith Owens
2001-08-14 15:48 ` David Mosberger
2001-08-14 16:23 ` Don Dugger
2001-08-14 17:06 ` David Mosberger
2001-08-15 0:22 ` Keith Owens
2001-08-21 3:55 ` [Linux-ia64] kernel update (relative to 2.4.9) David Mosberger
2001-08-22 10:00 ` Andreas Schwab
2001-08-22 17:42 ` Chris Ahna
2001-09-25 7:13 ` [Linux-ia64] kernel update (relative to 2.4.10) David Mosberger
2001-09-25 7:17 ` David Mosberger
2001-09-25 12:17 ` Andreas Schwab
2001-09-25 15:14 ` Andreas Schwab
2001-09-25 15:45 ` Andreas Schwab
2001-09-26 22:49 ` David Mosberger
2001-09-26 22:51 ` David Mosberger
2001-09-27 4:57 ` Keith Owens
2001-09-27 17:48 ` David Mosberger
2001-10-02 5:20 ` Keith Owens
2001-10-02 5:50 ` Keith Owens
2001-10-11 2:47 ` [Linux-ia64] kernel update (relative to 2.4.11) David Mosberger
2001-10-11 4:39 ` Keith Owens
2001-10-25 4:27 ` [Linux-ia64] kernel update (relative to 2.4.13) David Mosberger
2001-10-25 4:30 ` David Mosberger
2001-10-25 5:26 ` Keith Owens
2001-10-25 6:21 ` Keith Owens
2001-10-25 6:44 ` Christoph Hellwig
2001-10-25 19:55 ` Luck, Tony
2001-10-25 20:20 ` David Mosberger
2001-10-26 14:36 ` Andreas Schwab
2001-10-30 2:20 ` David Mosberger
2001-11-02 1:35 ` William Lee Irwin III
2001-11-06 1:23 ` David Mosberger
2001-11-06 6:59 ` [Linux-ia64] kernel update (relative to 2.4.14) David Mosberger
2001-11-07 1:48 ` Keith Owens
2001-11-07 2:47 ` David Mosberger
2001-11-27 5:24 ` [Linux-ia64] kernel update (relative to 2.4.16) David Mosberger
2001-11-27 13:04 ` Andreas Schwab
2001-11-27 17:02 ` John Hesterberg
2001-11-27 22:03 ` John Hesterberg
2001-11-29 0:41 ` David Mosberger
2001-12-05 15:25 ` [Linux-ia64] kernel update (relative to 2.4.10) n0ano
2001-12-15 5:13 ` [Linux-ia64] kernel update (relative to 2.4.16) David Mosberger
2001-12-15 8:12 ` Keith Owens
2001-12-16 12:21 ` [Linux-ia64] kernel update (relative to 2.4.10) Zach, Yoav
2001-12-17 17:11 ` n0ano
2001-12-26 21:15 ` [Linux-ia64] kernel update (relative to 2.4.16) David Mosberger
2001-12-27 6:38 ` [Linux-ia64] kernel update (relative to v2.4.17) David Mosberger
2001-12-27 8:09 ` j-nomura
2001-12-27 21:59 ` Christian Groessler
2001-12-31 3:13 ` Matt_Domsch
2002-01-07 11:30 ` j-nomura
2002-02-08 7:02 ` [Linux-ia64] kernel update (relative to 2.5.3) David Mosberger
2002-02-27 1:47 ` [Linux-ia64] kernel update (relative to 2.4.18) David Mosberger
2002-02-28 4:40 ` Peter Chubb
2002-02-28 19:19 ` David Mosberger
2002-03-06 22:33 ` Peter Chubb
2002-03-08 6:38 ` [Linux-ia64] kernel update (relative to 2.5.5) David Mosberger
2002-03-09 11:08 ` Keith Owens
2002-04-26 7:15 ` [Linux-ia64] kernel update (relative to v2.5.10) David Mosberger
2002-05-31 6:08 ` [Linux-ia64] kernel update (relative to v2.5.18) David Mosberger
2002-06-06 2:01 ` Peter Chubb
2002-06-06 3:16 ` David Mosberger
2002-06-07 21:54 ` Bjorn Helgaas
2002-06-07 22:07 ` Bjorn Helgaas
2002-06-09 10:34 ` Steffen Persvold
2002-06-14 3:12 ` Peter Chubb
2002-06-22 8:57 ` [Linux-ia64] kernel update (relative to 2.4.18) David Mosberger
2002-06-22 9:25 ` David Mosberger
2002-06-22 10:05 ` Steffen Persvold
2002-06-22 19:03 ` David Mosberger
2002-06-22 19:33 ` Andreas Schwab
2002-07-08 22:08 ` Kimio Suganuma
2002-07-08 22:14 ` David Mosberger
2002-07-20 7:08 ` [Linux-ia64] kernel update (relative to v2.4.18) David Mosberger
2002-07-22 11:54 ` Andreas Schwab
2002-07-22 12:31 ` Keith Owens
2002-07-22 12:34 ` Andreas Schwab
2002-07-22 12:54 ` Keith Owens
2002-07-22 18:05 ` David Mosberger
2002-07-22 23:54 ` Kimio Suganuma
2002-07-23 1:00 ` Keith Owens
2002-07-23 1:10 ` David Mosberger
2002-07-23 1:21 ` Matthew Wilcox
2002-07-23 1:28 ` David Mosberger
2002-07-23 1:35 ` Grant Grundler
2002-07-23 3:09 ` Keith Owens
2002-07-23 5:04 ` David Mosberger
2002-07-23 5:58 ` Keith Owens
2002-07-23 6:15 ` David Mosberger
2002-07-23 12:09 ` Andreas Schwab
2002-07-23 15:38 ` Wichmann, Mats D
2002-07-23 16:17 ` David Mosberger
2002-07-23 16:28 ` David Mosberger
2002-07-23 16:30 ` David Mosberger
2002-07-23 18:08 ` KOCHI, Takayoshi
2002-07-23 19:17 ` Andreas Schwab
2002-07-24 4:30 ` KOCHI, Takayoshi
2002-08-22 13:42 ` [Linux-ia64] kernel update (relative to 2.4.19) Bjorn Helgaas
2002-08-22 14:22 ` Wichmann, Mats D
2002-08-22 15:29 ` Bjorn Helgaas
2002-08-23 4:52 ` KOCHI, Takayoshi
2002-08-23 10:10 ` Andreas Schwab
2002-08-30 5:42 ` [Linux-ia64] kernel update (relative to v2.5.32) David Mosberger
2002-08-30 17:26 ` KOCHI, Takayoshi
2002-08-30 19:00 ` David Mosberger
2002-09-18 3:25 ` Peter Chubb
2002-09-18 3:32 ` David Mosberger
2002-09-18 6:54 ` [Linux-ia64] kernel update (relative to 2.5.35) David Mosberger
2002-09-28 21:48 ` [Linux-ia64] kernel update (relative to 2.5.39) David Mosberger
2002-09-30 23:28 ` Peter Chubb
2002-09-30 23:49 ` David Mosberger
2002-10-01 4:26 ` Peter Chubb
2002-10-01 5:19 ` David Mosberger
2002-10-03 2:33 ` Jes Sorensen
2002-10-03 2:46 ` KOCHI, Takayoshi
2002-10-13 23:39 ` Peter Chubb
2002-10-17 11:46 ` Jes Sorensen
2002-11-01 6:18 ` [Linux-ia64] kernel update (relative to 2.5.45) David Mosberger
2002-12-11 4:44 ` [Linux-ia64] kernel update (relative to 2.4.20) Bjorn Helgaas
2002-12-12 2:00 ` Matthew Wilcox
2002-12-13 17:36 ` Bjorn Helgaas
2002-12-21 9:00 ` [Linux-ia64] kernel update (relative to 2.5.52) David Mosberger
2002-12-26 6:07 ` Kimio Suganuma
2003-01-02 21:27 ` David Mosberger
2003-01-25 5:02 ` [Linux-ia64] kernel update (relative to 2.5.59) David Mosberger
2003-01-25 20:19 ` Sam Ravnborg
2003-01-27 18:47 ` David Mosberger
2003-01-28 19:44 ` Arun Sharma
2003-01-28 19:55 ` David Mosberger
2003-01-28 21:34 ` Arun Sharma
2003-01-28 23:09 ` David Mosberger
2003-01-29 4:27 ` Peter Chubb
2003-01-29 6:07 ` David Mosberger
2003-01-29 14:06 ` Erich Focht
2003-01-29 17:10 ` Luck, Tony
2003-01-29 17:48 ` Paul Bame
2003-01-29 19:08 ` David Mosberger
2003-02-12 23:26 ` [Linux-ia64] kernel update (relative to 2.5.60) David Mosberger
2003-02-13 5:52 ` j-nomura
2003-02-13 17:53 ` Grant Grundler
2003-02-13 18:36 ` David Mosberger
2003-02-13 19:17 ` Grant Grundler
2003-02-13 20:00 ` David Mosberger
2003-02-13 20:11 ` Grant Grundler
2003-02-18 19:52 ` Jesse Barnes
2003-03-07 8:19 ` [Linux-ia64] kernel update (relative to v2.5.64) David Mosberger
2003-04-12 4:28 ` [Linux-ia64] kernel update (relative to v2.5.67) David Mosberger
2003-04-14 12:55 ` Takayoshi Kochi
2003-04-14 17:00 ` Howell, David P
2003-04-14 18:45 ` David Mosberger
2003-04-14 20:56 ` Alex Williamson
2003-04-14 22:13 ` Howell, David P
2003-04-15 9:01 ` Takayoshi Kochi
2003-04-15 22:03 ` David Mosberger
2003-04-15 22:12 ` Alex Williamson
2003-04-15 22:27 ` David Mosberger
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=marc-linux-ia64-105590678205111@msgid-missing \
--to=davidm@hpl.hp.com \
--cc=linux-ia64@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox