qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [Qemu-devel] ia64 support
@ 2010-03-29  0:25 Aurelien Jarno
  2010-03-29  0:25 ` [Qemu-devel] [PATCH 1/5] linux-user/ia64: workaround ia64 strangenesses Aurelien Jarno
                   ` (4 more replies)
  0 siblings, 5 replies; 10+ messages in thread
From: Aurelien Jarno @ 2010-03-29  0:25 UTC (permalink / raw)
  To: qemu-devel

This patch series adds host Itanium support to QEMU.

System mode works correctly except a few unaligned access in the slirp
code (fixed by the kernel). It has been tested by booting debian 
installer on arm, i386, mips, mipsel, ppc, sparc, and x86_64. A full 
installation has been done on sparc.

User mode works correctly for static binaries, but fails for some
dynamic binaries, due to mmap emulation not working correctly when
host page size is bigger than the target one.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH 1/5] linux-user/ia64: workaround ia64 strangenesses
  2010-03-29  0:25 [Qemu-devel] ia64 support Aurelien Jarno
@ 2010-03-29  0:25 ` Aurelien Jarno
  2010-03-29  9:36   ` [Qemu-devel] " Paolo Bonzini
  2010-03-29  0:25 ` [Qemu-devel] [PATCH 2/5] linux-user: fix page_unprotect when host page size > target page size Aurelien Jarno
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 10+ messages in thread
From: Aurelien Jarno @ 2010-03-29  0:25 UTC (permalink / raw)
  To: qemu-devel; +Cc: Aurelien Jarno

ia64 has some strangenesses that need to be workaround:
- it has a __clone2() syscall instead of the using clone() one, with
  different arguments, and which is not declared in the usual headers.
- ucontext.uc_sigmask is declared with type long int, while it is
  actually of type sigset_t.
- uc_mcontext, uc_sigmask, uc_stack, uc_link are declared using #define,
  which clashes with the target_ucontext fields. Change their names to
  tuc_*, as already done for some target architectures.
---
 cpu-exec.c           |    6 +-
 linux-user/signal.c  |  208 +++++++++++++++++++++++++-------------------------
 linux-user/syscall.c |    6 +-
 3 files changed, 114 insertions(+), 106 deletions(-)

diff --git a/cpu-exec.c b/cpu-exec.c
index bcfcda2..372aeac 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -81,7 +81,11 @@ void cpu_resume_from_signal(CPUState *env1, void *puc)
     if (puc) {
         /* XXX: use siglongjmp ? */
 #ifdef __linux__
+#ifdef __ia64
+        sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
+#else
         sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
+#endif
 #elif defined(__OpenBSD__)
         sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
 #endif
@@ -1150,7 +1154,7 @@ int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
     }
     return handle_cpu_signal(ip, (unsigned long)info->si_addr,
                              is_write,
-                             &uc->uc_sigmask, puc);
+                             (sigset_t *)&uc->uc_sigmask, puc);
 }
 
 #elif defined(__s390__)
diff --git a/linux-user/signal.c b/linux-user/signal.c
index e327c3d..a72c15c 100644
--- a/linux-user/signal.c
+++ b/linux-user/signal.c
@@ -2052,10 +2052,10 @@ typedef struct {
 } target_mcontext_t;
 
 struct target_ucontext {
-    struct target_ucontext *uc_link;
-    abi_ulong uc_flags;
-    target_sigset_t uc_sigmask;
-    target_mcontext_t uc_mcontext;
+    struct target_ucontext *tuc_link;
+    abi_ulong tuc_flags;
+    target_sigset_t tuc_sigmask;
+    target_mcontext_t tuc_mcontext;
 };
 
 /* A V9 register window */
@@ -2081,7 +2081,7 @@ void sparc64_set_context(CPUSPARCState *env)
     ucp_addr = env->regwptr[UREG_I0];
     if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1))
         goto do_sigsegv;
-    grp  = &ucp->uc_mcontext.mc_gregs;
+    grp  = &ucp->tuc_mcontext.mc_gregs;
     err  = __get_user(pc, &((*grp)[MC_PC]));
     err |= __get_user(npc, &((*grp)[MC_NPC]));
     if (err || ((pc | npc) & 3))
@@ -2091,11 +2091,11 @@ void sparc64_set_context(CPUSPARCState *env)
         sigset_t set;
 
         if (TARGET_NSIG_WORDS == 1) {
-            if (__get_user(target_set.sig[0], &ucp->uc_sigmask.sig[0]))
+            if (__get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]))
                 goto do_sigsegv;
         } else {
             abi_ulong *src, *dst;
-            src = ucp->uc_sigmask.sig;
+            src = ucp->tuc_sigmask.sig;
             dst = target_set.sig;
             for (i = 0; i < sizeof(target_sigset_t) / sizeof(abi_ulong);
                  i++, dst++, src++)
@@ -2129,8 +2129,8 @@ void sparc64_set_context(CPUSPARCState *env)
     err |= __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6]));
     err |= __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7]));
 
-    err |= __get_user(fp, &(ucp->uc_mcontext.mc_fp));
-    err |= __get_user(i7, &(ucp->uc_mcontext.mc_i7));
+    err |= __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
+    err |= __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
 
     w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
     if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 
@@ -2139,20 +2139,20 @@ void sparc64_set_context(CPUSPARCState *env)
     if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 
                  abi_ulong) != 0)
         goto do_sigsegv;
-    err |= __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab));
-    err |= __get_user(env->fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs));
+    err |= __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
+    err |= __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
     {
         uint32_t *src, *dst;
-        src = ucp->uc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
+        src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
         dst = env->fpr;
         /* XXX: check that the CPU storage is the same as user context */
         for (i = 0; i < 64; i++, dst++, src++)
             err |= __get_user(*dst, src);
     }
     err |= __get_user(env->fsr,
-                      &(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr));
+                      &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
     err |= __get_user(env->gsr,
-                      &(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr));
+                      &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
     if (err)
         goto do_sigsegv;
     unlock_user_struct(ucp, ucp_addr, 0);
@@ -2178,7 +2178,7 @@ void sparc64_get_context(CPUSPARCState *env)
     if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0))
         goto do_sigsegv;
     
-    mcp = &ucp->uc_mcontext;
+    mcp = &ucp->tuc_mcontext;
     grp = &mcp->mc_gregs;
 
     /* Skip over the trap instruction, first. */
@@ -2191,11 +2191,11 @@ void sparc64_get_context(CPUSPARCState *env)
     host_to_target_sigset_internal(&target_set, &set);
     if (TARGET_NSIG_WORDS == 1) {
         err |= __put_user(target_set.sig[0],
-                          (abi_ulong *)&ucp->uc_sigmask);
+                          (abi_ulong *)&ucp->tuc_sigmask);
     } else {
         abi_ulong *src, *dst;
         src = target_set.sig;
-        dst = ucp->uc_sigmask.sig;
+        dst = ucp->tuc_sigmask.sig;
         for (i = 0; i < sizeof(target_sigset_t) / sizeof(abi_ulong);
              i++, dst++, src++)
             err |= __put_user(*src, dst);
@@ -2238,7 +2238,7 @@ void sparc64_get_context(CPUSPARCState *env)
     {
         uint32_t *src, *dst;
         src = env->fpr;
-        dst = ucp->uc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
+        dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
         /* XXX: check that the CPU storage is the same as user context */
         for (i = 0; i < 64; i++, dst++, src++)
             err |= __put_user(*src, dst);
@@ -2346,12 +2346,12 @@ struct sigframe {
 };
 
 struct target_ucontext {
-    target_ulong uc_flags;
-    target_ulong uc_link;
-    target_stack_t uc_stack;
+    target_ulong tuc_flags;
+    target_ulong tuc_link;
+    target_stack_t tuc_stack;
     target_ulong pad0;
-    struct target_sigcontext uc_mcontext;
-    target_sigset_t uc_sigmask;
+    struct target_sigcontext tuc_mcontext;
+    target_sigset_t tuc_sigmask;
 };
 
 struct target_rt_sigframe {
@@ -2663,17 +2663,17 @@ static void setup_rt_frame(int sig, struct target_sigaction *ka,
 
     copy_siginfo_to_user(&frame->rs_info, info);
 
-    __put_user(0, &frame->rs_uc.uc_flags);
-    __put_user(0, &frame->rs_uc.uc_link);
-    __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.uc_stack.ss_sp);
-    __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.uc_stack.ss_size);
+    __put_user(0, &frame->rs_uc.tuc_flags);
+    __put_user(0, &frame->rs_uc.tuc_link);
+    __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
+    __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
     __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
-               &frame->rs_uc.uc_stack.ss_flags);
+               &frame->rs_uc.tuc_stack.ss_flags);
 
-    setup_sigcontext(env, &frame->rs_uc.uc_mcontext);
+    setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
 
     for(i = 0; i < TARGET_NSIG_WORDS; i++) {
-        __put_user(set->sig[i], &frame->rs_uc.uc_sigmask.sig[i]);
+        __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
     }
 
     /*
@@ -2720,14 +2720,14 @@ long do_rt_sigreturn(CPUState *env)
     if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
    	goto badframe;
 
-    target_to_host_sigset(&blocked, &frame->rs_uc.uc_sigmask);
+    target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
     sigprocmask(SIG_SETMASK, &blocked, NULL);
 
-    if (restore_sigcontext(env, &frame->rs_uc.uc_mcontext))
+    if (restore_sigcontext(env, &frame->rs_uc.tuc_mcontext))
         goto badframe;
 
     if (do_sigaltstack(frame_addr +
-		       offsetof(struct target_rt_sigframe, rs_uc.uc_stack),
+		       offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
 		       0, get_sp_from_cpustate(env)) == -EFAULT)
         goto badframe;
 
@@ -2779,11 +2779,11 @@ struct target_sigframe
 
 
 struct target_ucontext {
-    target_ulong uc_flags;
-    struct target_ucontext *uc_link;
-    target_stack_t uc_stack;
-    struct target_sigcontext uc_mcontext;
-    target_sigset_t uc_sigmask;	/* mask last for extensibility */
+    target_ulong tuc_flags;
+    struct target_ucontext *tuc_link;
+    target_stack_t tuc_stack;
+    struct target_sigcontext tuc_mcontext;
+    target_sigset_t tuc_sigmask;	/* mask last for extensibility */
 };
 
 struct target_rt_sigframe
@@ -2940,18 +2940,18 @@ static void setup_rt_frame(int sig, struct target_sigaction *ka,
     err |= copy_siginfo_to_user(&frame->info, info);
 
     /* Create the ucontext.  */
-    err |= __put_user(0, &frame->uc.uc_flags);
-    err |= __put_user(0, (unsigned long *)&frame->uc.uc_link);
+    err |= __put_user(0, &frame->uc.tuc_flags);
+    err |= __put_user(0, (unsigned long *)&frame->uc.tuc_link);
     err |= __put_user((unsigned long)target_sigaltstack_used.ss_sp,
-		      &frame->uc.uc_stack.ss_sp);
+		      &frame->uc.tuc_stack.ss_sp);
     err |= __put_user(sas_ss_flags(regs->gregs[15]),
-		      &frame->uc.uc_stack.ss_flags);
+		      &frame->uc.tuc_stack.ss_flags);
     err |= __put_user(target_sigaltstack_used.ss_size,
-		      &frame->uc.uc_stack.ss_size);
-    err |= setup_sigcontext(&frame->uc.uc_mcontext,
+		      &frame->uc.tuc_stack.ss_size);
+    err |= setup_sigcontext(&frame->uc.tuc_mcontext,
 			    regs, set->sig[0]);
     for(i = 0; i < TARGET_NSIG_WORDS; i++) {
-        err |= __put_user(set->sig[i], &frame->uc.uc_sigmask.sig[i]);
+        err |= __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
     }
 
     /* Set up to return from userspace.  If provided, use a stub
@@ -3038,14 +3038,14 @@ long do_rt_sigreturn(CPUState *regs)
     if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
    	goto badframe;
 
-    target_to_host_sigset(&blocked, &frame->uc.uc_sigmask);
+    target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
     sigprocmask(SIG_SETMASK, &blocked, NULL);
 
-    if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
+    if (restore_sigcontext(regs, &frame->uc.tuc_mcontext, &r0))
         goto badframe;
 
     if (do_sigaltstack(frame_addr +
-		       offsetof(struct target_rt_sigframe, uc.uc_stack),
+		       offsetof(struct target_rt_sigframe, uc.tuc_stack),
 		       0, get_sp_from_cpustate(regs)) == -EFAULT)
         goto badframe;
 
@@ -3555,22 +3555,22 @@ struct target_mcontext {
 };
 
 struct target_ucontext {
-    target_ulong uc_flags;
-    target_ulong uc_link;    /* struct ucontext __user * */
-    struct target_sigaltstack uc_stack;
+    target_ulong tuc_flags;
+    target_ulong tuc_link;    /* struct ucontext __user * */
+    struct target_sigaltstack tuc_stack;
 #if !defined(TARGET_PPC64)
-    int32_t uc_pad[7];
-    target_ulong uc_regs;    /* struct mcontext __user *
+    int32_t tuc_pad[7];
+    target_ulong tuc_regs;    /* struct mcontext __user *
                                 points to uc_mcontext field */
 #endif
-    target_sigset_t uc_sigmask;
+    target_sigset_t tuc_sigmask;
 #if defined(TARGET_PPC64)
     target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
-    struct target_sigcontext uc_mcontext;
+    struct target_sigcontext tuc_mcontext;
 #else
-    int32_t uc_maskext[30];
-    int32_t uc_pad2[3];
-    struct target_mcontext uc_mcontext;
+    int32_t tuc_maskext[30];
+    int32_t tuc_pad2[3];
+    struct target_mcontext tuc_mcontext;
 #endif
 };
 
@@ -3883,21 +3883,21 @@ static void setup_rt_frame(int sig, struct target_sigaction *ka,
 
     err |= copy_siginfo_to_user(&rt_sf->info, info);
 
-    err |= __put_user(0, &rt_sf->uc.uc_flags);
-    err |= __put_user(0, &rt_sf->uc.uc_link);
+    err |= __put_user(0, &rt_sf->uc.tuc_flags);
+    err |= __put_user(0, &rt_sf->uc.tuc_link);
     err |= __put_user((target_ulong)target_sigaltstack_used.ss_sp,
-                      &rt_sf->uc.uc_stack.ss_sp);
+                      &rt_sf->uc.tuc_stack.ss_sp);
     err |= __put_user(sas_ss_flags(env->gpr[1]),
-                      &rt_sf->uc.uc_stack.ss_flags);
+                      &rt_sf->uc.tuc_stack.ss_flags);
     err |= __put_user(target_sigaltstack_used.ss_size,
-                      &rt_sf->uc.uc_stack.ss_size);
-    err |= __put_user(h2g (&rt_sf->uc.uc_mcontext),
-                      &rt_sf->uc.uc_regs);
+                      &rt_sf->uc.tuc_stack.ss_size);
+    err |= __put_user(h2g (&rt_sf->uc.tuc_mcontext),
+                      &rt_sf->uc.tuc_regs);
     for(i = 0; i < TARGET_NSIG_WORDS; i++) {
-        err |= __put_user(set->sig[i], &rt_sf->uc.uc_sigmask.sig[i]);
+        err |= __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
     }
 
-    frame = &rt_sf->uc.uc_mcontext;
+    frame = &rt_sf->uc.tuc_mcontext;
     err |= save_user_regs(env, frame, TARGET_NR_rt_sigreturn);
 
     /* The kernel checks for the presence of a VDSO here.  We don't
@@ -3985,7 +3985,7 @@ static int do_setcontext(struct target_ucontext *ucp, CPUState *env, int sig)
     sigset_t blocked;
     target_sigset_t set;
 
-    if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, uc_sigmask),
+    if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
                        sizeof (set)))
         return 1;
 
@@ -3993,7 +3993,7 @@ static int do_setcontext(struct target_ucontext *ucp, CPUState *env, int sig)
     fprintf (stderr, "do_setcontext: not implemented\n");
     return 0;
 #else
-    if (__get_user(mcp_addr, &ucp->uc_regs))
+    if (__get_user(mcp_addr, &ucp->tuc_regs))
         return 1;
 
     if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
@@ -4026,7 +4026,7 @@ long do_rt_sigreturn(CPUState *env)
         goto sigsegv;
 
     do_sigaltstack(rt_sf_addr
-                   + offsetof(struct target_rt_sigframe, uc.uc_stack),
+                   + offsetof(struct target_rt_sigframe, uc.tuc_stack),
                    0, env->gpr[1]);
 
     unlock_user_struct(rt_sf, rt_sf_addr, 1);
@@ -4082,12 +4082,12 @@ struct target_mcontext {
 #define TARGET_MCONTEXT_VERSION 2
 
 struct target_ucontext {
-    abi_ulong uc_flags;
-    abi_ulong uc_link;
-    target_stack_t uc_stack;
-    struct target_mcontext uc_mcontext;
-    abi_long uc_filler[80];
-    target_sigset_t uc_sigmask;
+    abi_ulong tuc_flags;
+    abi_ulong tuc_link;
+    target_stack_t tuc_stack;
+    struct target_mcontext tuc_mcontext;
+    abi_long tuc_filler[80];
+    target_sigset_t tuc_sigmask;
 };
 
 struct target_rt_sigframe
@@ -4212,10 +4212,10 @@ give_sigsegv:
 static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
                                            CPUState *env)
 {
-    target_greg_t *gregs = uc->uc_mcontext.gregs;
+    target_greg_t *gregs = uc->tuc_mcontext.gregs;
     int err;
 
-    err = __put_user(TARGET_MCONTEXT_VERSION, &uc->uc_mcontext.version);
+    err = __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
     err |= __put_user(env->dregs[0], &gregs[0]);
     err |= __put_user(env->dregs[1], &gregs[1]);
     err |= __put_user(env->dregs[2], &gregs[2]);
@@ -4244,9 +4244,9 @@ static inline int target_rt_restore_ucontext(CPUState *env,
 {
     int temp;
     int err;
-    target_greg_t *gregs = uc->uc_mcontext.gregs;
+    target_greg_t *gregs = uc->tuc_mcontext.gregs;
     
-    err = __get_user(temp, &uc->uc_mcontext.version);
+    err = __get_user(temp, &uc->tuc_mcontext.version);
     if (temp != TARGET_MCONTEXT_VERSION)
         goto badframe;
 
@@ -4306,21 +4306,21 @@ static void setup_rt_frame(int sig, struct target_sigaction *ka,
 
     /* Create the ucontext */
 
-    err |= __put_user(0, &frame->uc.uc_flags);
-    err |= __put_user(0, &frame->uc.uc_link);
+    err |= __put_user(0, &frame->uc.tuc_flags);
+    err |= __put_user(0, &frame->uc.tuc_link);
     err |= __put_user(target_sigaltstack_used.ss_sp,
-                      &frame->uc.uc_stack.ss_sp);
+                      &frame->uc.tuc_stack.ss_sp);
     err |= __put_user(sas_ss_flags(env->aregs[7]),
-                      &frame->uc.uc_stack.ss_flags);
+                      &frame->uc.tuc_stack.ss_flags);
     err |= __put_user(target_sigaltstack_used.ss_size,
-                      &frame->uc.uc_stack.ss_size);
+                      &frame->uc.tuc_stack.ss_size);
     err |= target_rt_setup_ucontext(&frame->uc, env);
 
     if (err)
             goto give_sigsegv;
 
     for(i = 0; i < TARGET_NSIG_WORDS; i++) {
-        if (__put_user(set->sig[i], &frame->uc.uc_sigmask.sig[i]))
+        if (__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]))
             goto give_sigsegv;
     }
 
@@ -4409,7 +4409,7 @@ long do_rt_sigreturn(CPUState *env)
         goto badframe;
 
     if (do_sigaltstack(frame_addr +
-                       offsetof(struct target_rt_sigframe, uc.uc_stack),
+                       offsetof(struct target_rt_sigframe, uc.tuc_stack),
                        0, get_sp_from_cpustate(env)) == -EFAULT)
         goto badframe;
 
@@ -4447,12 +4447,12 @@ struct target_sigcontext {
 };
 
 struct target_ucontext {
-    abi_ulong uc_flags;
-    abi_ulong uc_link;
-    abi_ulong uc_osf_sigmask;
-    target_stack_t uc_stack;
-    struct target_sigcontext uc_mcontext;
-    target_sigset_t uc_sigmask;
+    abi_ulong tuc_flags;
+    abi_ulong tuc_link;
+    abi_ulong tuc_osf_sigmask;
+    target_stack_t tuc_stack;
+    struct target_sigcontext tuc_mcontext;
+    target_sigset_t tuc_sigmask;
 };
 
 struct target_sigframe {
@@ -4588,18 +4588,18 @@ static void setup_rt_frame(int sig, struct target_sigaction *ka,
 
     err |= copy_siginfo_to_user(&frame->info, info);
 
-    err |= __put_user(0, &frame->uc.uc_flags);
-    err |= __put_user(0, &frame->uc.uc_link);
-    err |= __put_user(set->sig[0], &frame->uc.uc_osf_sigmask);
+    err |= __put_user(0, &frame->uc.tuc_flags);
+    err |= __put_user(0, &frame->uc.tuc_link);
+    err |= __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
     err |= __put_user(target_sigaltstack_used.ss_sp,
-                      &frame->uc.uc_stack.ss_sp);
+                      &frame->uc.tuc_stack.ss_sp);
     err |= __put_user(sas_ss_flags(env->ir[IR_SP]),
-                      &frame->uc.uc_stack.ss_flags);
+                      &frame->uc.tuc_stack.ss_flags);
     err |= __put_user(target_sigaltstack_used.ss_size,
-                      &frame->uc.uc_stack.ss_size);
-    err |= setup_sigcontext(&frame->uc.uc_mcontext, env, frame_addr, set);
+                      &frame->uc.tuc_stack.ss_size);
+    err |= setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
     for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
-        err |= __put_user(set->sig[i], &frame->uc.uc_sigmask.sig[i]);
+        err |= __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
     }
 
     if (ka->sa_restorer) {
@@ -4668,14 +4668,14 @@ long do_rt_sigreturn(CPUState *env)
     if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
         goto badframe;
     }
-    target_to_host_sigset(&set, &frame->uc.uc_sigmask);
+    target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
     sigprocmask(SIG_SETMASK, &set, NULL);
 
-    if (restore_sigcontext(env, &frame->uc.uc_mcontext)) {
+    if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
         goto badframe;
     }
     if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
-                                             uc.uc_stack),
+                                             uc.tuc_stack),
                        0, env->ir[IR_SP]) == -EFAULT) {
         goto badframe;
     }
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 80acf70..5640ba6 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -41,6 +41,10 @@
 #include <sys/swap.h>
 #include <signal.h>
 #include <sched.h>
+#ifdef __ia64__
+int __clone2(int (*fn)(void *), void *child_stack_base,
+             size_t stack_size, int flags, void *arg, ...);
+#endif
 #include <sys/socket.h>
 #include <sys/un.h>
 #include <sys/uio.h>
@@ -3628,7 +3632,7 @@ static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
             return -EINVAL;
         /* This is probably going to die very quickly, but do it anyway.  */
 #ifdef __ia64__
-        ret = __clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
+        ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
 #else
 	ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
 #endif
-- 
1.7.0.2

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH 2/5] linux-user: fix page_unprotect when host page size > target page size
  2010-03-29  0:25 [Qemu-devel] ia64 support Aurelien Jarno
  2010-03-29  0:25 ` [Qemu-devel] [PATCH 1/5] linux-user/ia64: workaround ia64 strangenesses Aurelien Jarno
@ 2010-03-29  0:25 ` Aurelien Jarno
  2010-03-29  0:25 ` [Qemu-devel] [PATCH 4/5] tcg: align static_code_gen_buffer to CODE_GEN_ALIGN Aurelien Jarno
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 10+ messages in thread
From: Aurelien Jarno @ 2010-03-29  0:25 UTC (permalink / raw)
  To: qemu-devel; +Cc: Aurelien Jarno

When the host page size is bigger that the target one, unprotecting a
page should:
- mark all the target pages corresponding to the host page as writable
- invalidate all tb corresponding to the host page (and not the target
  page)

Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
---
 exec.c |   45 ++++++++++++++++++++++-----------------------
 1 files changed, 22 insertions(+), 23 deletions(-)

diff --git a/exec.c b/exec.c
index 0916208..1b0fe52 100644
--- a/exec.c
+++ b/exec.c
@@ -2447,8 +2447,8 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
    page. Return TRUE if the fault was successfully handled. */
 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
 {
-    unsigned int page_index, prot, pindex;
-    PageDesc *p, *p1;
+    unsigned int prot;
+    PageDesc *p;
     target_ulong host_start, host_end, addr;
 
     /* Technically this isn't safe inside a signal handler.  However we
@@ -2456,37 +2456,36 @@ int page_unprotect(target_ulong address, unsigned long pc, void *puc)
        practice it seems to be ok.  */
     mmap_lock();
 
-    host_start = address & qemu_host_page_mask;
-    page_index = host_start >> TARGET_PAGE_BITS;
-    p1 = page_find(page_index);
-    if (!p1) {
+    p = page_find(address >> TARGET_PAGE_BITS);
+    if (!p) {
         mmap_unlock();
         return 0;
     }
-    host_end = host_start + qemu_host_page_size;
-    p = p1;
-    prot = 0;
-    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
-        prot |= p->flags;
-        p++;
-    }
+
     /* if the page was really writable, then we change its
        protection back to writable */
-    if (prot & PAGE_WRITE_ORG) {
-        pindex = (address - host_start) >> TARGET_PAGE_BITS;
-        if (!(p1[pindex].flags & PAGE_WRITE)) {
-            mprotect((void *)g2h(host_start), qemu_host_page_size,
-                     (prot & PAGE_BITS) | PAGE_WRITE);
-            p1[pindex].flags |= PAGE_WRITE;
+    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
+        host_start = address & qemu_host_page_mask;
+        host_end = host_start + qemu_host_page_size;
+
+        prot = 0;
+        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
+            p = page_find(addr >> TARGET_PAGE_BITS);
+            p->flags |= PAGE_WRITE;
+            prot |= p->flags;
+
             /* and since the content will be modified, we must invalidate
                the corresponding translated code. */
-            tb_invalidate_phys_page(address, pc, puc);
+            tb_invalidate_phys_page(addr, pc, puc);
 #ifdef DEBUG_TB_CHECK
-            tb_invalidate_check(address);
+            tb_invalidate_check(addr);
 #endif
-            mmap_unlock();
-            return 1;
         }
+        mprotect((void *)g2h(host_start), qemu_host_page_size,
+                 prot & PAGE_BITS);
+
+        mmap_unlock();
+        return 1;
     }
     mmap_unlock();
     return 0;
-- 
1.7.0.2

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH 4/5] tcg: align static_code_gen_buffer to CODE_GEN_ALIGN
  2010-03-29  0:25 [Qemu-devel] ia64 support Aurelien Jarno
  2010-03-29  0:25 ` [Qemu-devel] [PATCH 1/5] linux-user/ia64: workaround ia64 strangenesses Aurelien Jarno
  2010-03-29  0:25 ` [Qemu-devel] [PATCH 2/5] linux-user: fix page_unprotect when host page size > target page size Aurelien Jarno
@ 2010-03-29  0:25 ` Aurelien Jarno
  2010-03-29  0:25 ` [Qemu-devel] [PATCH 5/5] tcg: initial ia64 support Aurelien Jarno
  2010-03-29  0:35 ` [Qemu-devel] [PATCH 3/5] ia64 disas support Aurelien Jarno
  4 siblings, 0 replies; 10+ messages in thread
From: Aurelien Jarno @ 2010-03-29  0:25 UTC (permalink / raw)
  To: qemu-devel; +Cc: Aurelien Jarno

On ia64, the default memory alignement is not enough for a code
alignement. To fix that, force static_code_gen_buffer alignment
to CODE_GEN_ALIGN.

Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
---
 exec.c |    3 ++-
 1 files changed, 2 insertions(+), 1 deletions(-)

diff --git a/exec.c b/exec.c
index 1b0fe52..5c13524 100644
--- a/exec.c
+++ b/exec.c
@@ -447,7 +447,8 @@ static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
 #endif
 
 #ifdef USE_STATIC_CODE_GEN_BUFFER
-static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
+static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
+               __attribute__((aligned (CODE_GEN_ALIGN)));
 #endif
 
 static void code_gen_alloc(unsigned long tb_size)
-- 
1.7.0.2

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH 5/5] tcg: initial ia64 support
  2010-03-29  0:25 [Qemu-devel] ia64 support Aurelien Jarno
                   ` (2 preceding siblings ...)
  2010-03-29  0:25 ` [Qemu-devel] [PATCH 4/5] tcg: align static_code_gen_buffer to CODE_GEN_ALIGN Aurelien Jarno
@ 2010-03-29  0:25 ` Aurelien Jarno
  2010-03-29  0:35 ` [Qemu-devel] [PATCH 3/5] ia64 disas support Aurelien Jarno
  4 siblings, 0 replies; 10+ messages in thread
From: Aurelien Jarno @ 2010-03-29  0:25 UTC (permalink / raw)
  To: qemu-devel; +Cc: Aurelien Jarno

A few words about design choices:
* On IA64, instructions should be grouped by bundle, and dependencies
  between instructions declared. A first version of this code tried to
  schedule instructions automatically, but was very complex and too
  invasive for the current common TCG code (ops not ending at
  instruction boundaries, code retranslation breaking already generated
  code, etc.)  It was also not very efficient, as dependencies between
  TCG ops is not available.
  Instead the option taken by the current implementation does not try
  to fill the bundle by scheduling instructions, but by providing ops
  not available as an ia64 instruction, and by offering 22-bit constant
  loading for most of the instructions. With both options the bundle are
  filled are approximately the same level.

* Up to 128 registers can be affected to a function on IA64, but TCG
  limits this number to 64, which is actually more than enough. The
  register affectation is the following:
  - r0: used to map a constant argument with value 0
  - r1: global pointer
  - r2, r3: internal use
  - r4 to r6: not used to avoid saving them
  - r7: env structure
  - r8 to r11: free for TCG (call clobbered)
  - r12: stack pointer
  - r13: thread pointer
  - r14 to r31: free for TCG (call clobbered)
  - r32: reserved (return address)
  - r33: reserved (PFS)
  - r33 to r63: free for TCG

* The IA64 architecture has only 64-bit registers and no 32-bit
  instructions (the only exception being cmp4). Therefore 64-bit
  registers and instructions are used for 32-bit ops. The adopted
  strategy is the same as the ABI, that is the higher 32 bits are
  undefined. Most ops (and, or, add, shl, etc.) can directly use
  the 64-bit registers, while some others have to sign-extend (sar,
  div, etc.) or zero-extend (shr, divu, etc.) the register first.

Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
---
 configure             |   20 +-
 cpu-common.h          |    2 +-
 tcg/ia64/tcg-target.c | 2325 +++++++++++++++++++++++++++++++++++++++++++++++++
 tcg/ia64/tcg-target.h |  156 ++++
 4 files changed, 2488 insertions(+), 15 deletions(-)
 create mode 100644 tcg/ia64/tcg-target.c
 create mode 100644 tcg/ia64/tcg-target.h

diff --git a/configure b/configure
index fcb821f..ff7e3c6 100755
--- a/configure
+++ b/configure
@@ -193,6 +193,8 @@ elif check_define _ARCH_PPC ; then
   fi
 elif check_define __mips__ ; then
   cpu="mips"
+elif check_define __ia64__ ; then
+  cpu="ia64"
 else
   cpu=`uname -m`
 fi
@@ -710,6 +712,9 @@ case "$cpu" in
     mips*)
            host_guest_base="yes"
            ;;
+    ia64*)
+           host_guest_base="yes"
+           ;;
 esac
 
 [ -z "$guest_base" ] && guest_base="$host_guest_base"
@@ -2659,9 +2664,6 @@ alpha)
   # Ensure there's only a single GP
   cflags="-msmall-data $cflags"
 ;;
-ia64)
-  cflags="-mno-sdata $cflags"
-;;
 esac
 
 if test "$target_softmmu" = "yes" ; then
@@ -2706,21 +2708,11 @@ if test "$target_linux_user" = "yes" -o "$target_bsd_user" = "yes" ; then
     # -static is used to avoid g1/g3 usage by the dynamic linker
     ldflags="$linker_script -static $ldflags"
     ;;
-  ia64)
-    ldflags="-Wl,-G0 $linker_script -static $ldflags"
-    ;;
-  i386|x86_64|ppc|ppc64|s390|sparc64|alpha|arm|m68k|mips|mips64)
+  i386|x86_64|ppc|ppc64|s390|sparc64|alpha|arm|m68k|mips|mips64|ia64)
     ldflags="$linker_script $ldflags"
     ;;
   esac
 fi
-if test "$target_softmmu" = "yes" ; then
-  case "$ARCH" in
-  ia64)
-    ldflags="-Wl,-G0 $linker_script -static $ldflags"
-    ;;
-  esac
-fi
 
 echo "LDFLAGS+=$ldflags" >> $config_target_mak
 echo "QEMU_CFLAGS+=$cflags" >> $config_target_mak
diff --git a/cpu-common.h b/cpu-common.h
index bb05300..ce0221b 100644
--- a/cpu-common.h
+++ b/cpu-common.h
@@ -3,7 +3,7 @@
 
 /* CPU interfaces that are target indpendent.  */
 
-#if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__)
+#if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__) || defined(__ia64__)
 #define WORDS_ALIGNED
 #endif
 
diff --git a/tcg/ia64/tcg-target.c b/tcg/ia64/tcg-target.c
new file mode 100644
index 0000000..113f245
--- /dev/null
+++ b/tcg/ia64/tcg-target.c
@@ -0,0 +1,2325 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2009-2010 Aurelien Jarno <aurelien@aurel32.net>
+ * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/*
+ * Register definitions
+ */
+
+#ifndef NDEBUG
+static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
+     "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
+     "r8",  "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+    "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+    "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
+    "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
+    "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
+    "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
+    "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
+};
+#endif
+
+/* Branch registers */
+enum {
+    TCG_REG_B0 = 0,
+    TCG_REG_B1,
+    TCG_REG_B2,
+    TCG_REG_B3,
+    TCG_REG_B4,
+    TCG_REG_B5,
+    TCG_REG_B6,
+    TCG_REG_B7,
+};
+
+/* Floating point registers */
+enum {
+    TCG_REG_F0 = 0,
+    TCG_REG_F1,
+    TCG_REG_F2,
+    TCG_REG_F3,
+    TCG_REG_F4,
+    TCG_REG_F5,
+    TCG_REG_F6,
+    TCG_REG_F7,
+    TCG_REG_F8,
+    TCG_REG_F9,
+    TCG_REG_F10,
+    TCG_REG_F11,
+    TCG_REG_F12,
+    TCG_REG_F13,
+    TCG_REG_F14,
+    TCG_REG_F15,
+};
+
+/* Predicate registers */
+enum {
+    TCG_REG_P0 = 0,
+    TCG_REG_P1,
+    TCG_REG_P2,
+    TCG_REG_P3,
+    TCG_REG_P4,
+    TCG_REG_P5,
+    TCG_REG_P6,
+    TCG_REG_P7,
+    TCG_REG_P8,
+    TCG_REG_P9,
+    TCG_REG_P10,
+    TCG_REG_P11,
+    TCG_REG_P12,
+    TCG_REG_P13,
+    TCG_REG_P14,
+    TCG_REG_P15,
+};
+
+/* Application registers */
+enum {
+    TCG_REG_PFS = 64,
+};
+
+static const int tcg_target_reg_alloc_order[] = {
+    TCG_REG_R34,
+    TCG_REG_R35,
+    TCG_REG_R36,
+    TCG_REG_R37,
+    TCG_REG_R38,
+    TCG_REG_R39,
+    TCG_REG_R40,
+    TCG_REG_R41,
+    TCG_REG_R42,
+    TCG_REG_R43,
+    TCG_REG_R44,
+    TCG_REG_R45,
+    TCG_REG_R46,
+    TCG_REG_R47,
+    TCG_REG_R48,
+    TCG_REG_R49,
+    TCG_REG_R50,
+    TCG_REG_R51,
+    TCG_REG_R52,
+    TCG_REG_R53,
+    TCG_REG_R54,
+    TCG_REG_R55,
+    TCG_REG_R14,
+    TCG_REG_R15,
+    TCG_REG_R16,
+    TCG_REG_R17,
+    TCG_REG_R18,
+    TCG_REG_R19,
+    TCG_REG_R20,
+    TCG_REG_R21,
+    TCG_REG_R22,
+    TCG_REG_R23,
+    TCG_REG_R24,
+    TCG_REG_R25,
+    TCG_REG_R26,
+    TCG_REG_R27,
+    TCG_REG_R28,
+    TCG_REG_R29,
+    TCG_REG_R30,
+    TCG_REG_R31,
+    TCG_REG_R56,
+    TCG_REG_R57,
+    TCG_REG_R58,
+    TCG_REG_R59,
+    TCG_REG_R60,
+    TCG_REG_R61,
+    TCG_REG_R62,
+    TCG_REG_R63,
+    TCG_REG_R8,
+    TCG_REG_R9,
+    TCG_REG_R10,
+    TCG_REG_R11
+};
+
+static const int tcg_target_call_iarg_regs[8] = {
+    TCG_REG_R56,
+    TCG_REG_R57,
+    TCG_REG_R58,
+    TCG_REG_R59,
+    TCG_REG_R60,
+    TCG_REG_R61,
+    TCG_REG_R62,
+    TCG_REG_R63,
+};
+
+static const int tcg_target_call_oarg_regs[2] = {
+    TCG_REG_R8,
+    TCG_REG_R9
+};
+
+/* maximum number of register used for input function arguments */
+static inline int tcg_target_get_call_iarg_regs_count(int flags)
+{
+    return 8;
+}
+
+/*
+ * opcode formation
+ */
+
+/* bundle templates: stops (double bar in the IA64 manual) are marked with
+   an uppercase letter. */
+enum {
+    mii = 0x00,
+    miI = 0x01,
+    mIi = 0x02,
+    mII = 0x03,
+    mlx = 0x04,
+    mLX = 0x05,
+    mmi = 0x08,
+    mmI = 0x09,
+    Mmi = 0x0a,
+    MmI = 0x0b,
+    mfi = 0x0c,
+    mfI = 0x0d,
+    mmf = 0x0e,
+    mmF = 0x0f,
+    mib = 0x10,
+    miB = 0x11,
+    mbb = 0x12,
+    mbB = 0x13,
+    bbb = 0x16,
+    bbB = 0x17,
+    mmb = 0x18,
+    mmB = 0x19,
+    mfb = 0x1c,
+    mfB = 0x1d,
+};
+
+enum {
+    OPC_ADD_A1                = 0x10000000000ull,
+    OPC_AND_A1                = 0x10060000000ull,
+    OPC_AND_A3                = 0x10160000000ull,
+    OPC_ANDCM_A1              = 0x10068000000ull,
+    OPC_ANDCM_A3              = 0x10168000000ull,
+    OPC_ADDS_A4               = 0x10800000000ull,
+    OPC_ADDL_A5               = 0x12000000000ull,
+    OPC_ALLOC_M34             = 0x02c00000000ull,
+    OPC_BR_DPTK_FEW_B1        = 0x08400000000ull,
+    OPC_BR_SPTK_MANY_B1       = 0x08000001000ull,
+    OPC_BR_SPTK_MANY_B4       = 0x00100001000ull,
+    OPC_BR_CALL_SPTK_MANY_B5  = 0x02100001000ull,
+    OPC_BR_RET_SPTK_MANY_B4   = 0x00108001100ull,
+    OPC_BRL_SPTK_MANY_X3      = 0x18000001000ull,
+    OPC_CMP_LT_A6             = 0x18000000000ull,
+    OPC_CMP_LTU_A6            = 0x1a000000000ull,
+    OPC_CMP_EQ_A6             = 0x1c000000000ull,
+    OPC_CMP4_LT_A6            = 0x18400000000ull,
+    OPC_CMP4_LTU_A6           = 0x1a400000000ull,
+    OPC_CMP4_EQ_A6            = 0x1c400000000ull,
+    OPC_DEP_Z_I12             = 0x0a600000000ull,
+    OPC_EXTR_I11              = 0x0a400002000ull,
+    OPC_EXTR_U_I11            = 0x0a400000000ull,
+    OPC_FCVT_FX_TRUNC_S1_F10  = 0x004d0000000ull,
+    OPC_FCVT_FXU_TRUNC_S1_F10 = 0x004d8000000ull,
+    OPC_FCVT_XF_F11           = 0x000e0000000ull,
+    OPC_FMA_S1_F1             = 0x10400000000ull,
+    OPC_FNMA_S1_F1            = 0x18400000000ull,
+    OPC_FRCPA_S1_F6           = 0x00600000000ull,
+    OPC_GETF_SIG_M19          = 0x08708000000ull,
+    OPC_LD1_M1                = 0x08000000000ull,
+    OPC_LD1_M3                = 0x0a000000000ull,
+    OPC_LD2_M1                = 0x08040000000ull,
+    OPC_LD2_M3                = 0x0a040000000ull,
+    OPC_LD4_M1                = 0x08080000000ull,
+    OPC_LD4_M3                = 0x0a080000000ull,
+    OPC_LD8_M1                = 0x080c0000000ull,
+    OPC_LD8_M3                = 0x0a0c0000000ull,
+    OPC_MUX1_I3               = 0x0eca0000000ull,
+    OPC_NOP_B9                = 0x04008000000ull,
+    OPC_NOP_F16               = 0x00008000000ull,
+    OPC_NOP_I18               = 0x00008000000ull,
+    OPC_NOP_M48               = 0x00008000000ull,
+    OPC_MOV_I21               = 0x00e00100000ull,
+    OPC_MOV_RET_I21           = 0x00e00500000ull,
+    OPC_MOV_I22               = 0x00188000000ull,
+    OPC_MOV_I_I26             = 0x00150000000ull,
+    OPC_MOVL_X2               = 0x0c000000000ull,
+    OPC_OR_A1                 = 0x10070000000ull,
+    OPC_SETF_EXP_M18          = 0x0c748000000ull,
+    OPC_SETF_SIG_M18          = 0x0c708000000ull,
+    OPC_SHL_I7                = 0x0f240000000ull,
+    OPC_SHR_I5                = 0x0f220000000ull,
+    OPC_SHR_U_I5              = 0x0f200000000ull,
+    OPC_SHRP_I10              = 0x0ac00000000ull,
+    OPC_SXT1_I29              = 0x000a0000000ull,
+    OPC_SXT2_I29              = 0x000a8000000ull,
+    OPC_SXT4_I29              = 0x000b0000000ull,
+    OPC_ST1_M4                = 0x08c00000000ull,
+    OPC_ST2_M4                = 0x08c40000000ull,
+    OPC_ST4_M4                = 0x08c80000000ull,
+    OPC_ST8_M4                = 0x08cc0000000ull,
+    OPC_SUB_A1                = 0x10028000000ull,
+    OPC_SUB_A3                = 0x10128000000ull,
+    OPC_UNPACK4_L_I2          = 0x0f860000000ull,
+    OPC_XMA_L_F2              = 0x1d000000000ull,
+    OPC_XOR_A1                = 0x10078000000ull,
+    OPC_ZXT1_I29              = 0x00080000000ull,
+    OPC_ZXT2_I29              = 0x00088000000ull,
+    OPC_ZXT4_I29              = 0x00090000000ull,
+};
+
+static inline uint64_t tcg_opc_a1(int qp, uint64_t opc, int r1,
+                                  int r2, int r3)
+{
+    return opc
+           | ((r3 & 0x7f) << 20)
+           | ((r2 & 0x7f) << 13)
+           | ((r1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_a3(int qp, uint64_t opc, int r1,
+                                  uint64_t imm, int r3)
+{
+    return opc
+           | ((imm & 0x80) << 29) /* s */
+           | ((imm & 0x7f) << 13) /* imm7b */
+           | ((r3 & 0x7f) << 20)
+           | ((r1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_a4(int qp, uint64_t opc, int r1,
+                                  uint64_t imm, int r3)
+{
+    return opc
+           | ((imm & 0x2000) << 23) /* s */
+           | ((imm & 0x1f80) << 20) /* imm6d */
+           | ((imm & 0x007f) << 13) /* imm7b */
+           | ((r3 & 0x7f) << 20)
+           | ((r1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_a5(int qp, uint64_t opc, int r1,
+                                  uint64_t imm, int r3)
+{
+    return opc
+           | ((imm & 0x200000) << 15) /* s */
+           | ((imm & 0x1f0000) <<  6) /* imm5c */
+           | ((imm & 0x00ff80) << 20) /* imm9d */
+           | ((imm & 0x00007f) << 13) /* imm7b */
+           | ((r3 & 0x03) << 20)
+           | ((r1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_a6(int qp, uint64_t opc, int p1,
+                                  int p2, int r2, int r3)
+{
+    return opc
+           | ((p2 & 0x3f) << 27)
+           | ((r3 & 0x7f) << 20)
+           | ((r2 & 0x7f) << 13)
+           | ((p1 & 0x3f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_b1(int qp, uint64_t opc, uint64_t imm)
+{
+    return opc
+           | ((imm & 0x100000) << 16) /* s */
+           | ((imm & 0x0fffff) << 13) /* imm20b */
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_b4(int qp, uint64_t opc, int b2)
+{
+    return opc
+           | ((b2 & 0x7) << 13)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_b5(int qp, uint64_t opc, int b1, int b2)
+{
+    return opc
+           | ((b2 & 0x7) << 13)
+           | ((b1 & 0x7) << 6)
+           | (qp & 0x3f);
+}
+
+
+static inline uint64_t tcg_opc_b9(int qp, uint64_t opc, uint64_t imm)
+{
+    return opc
+           | ((imm & 0x100000) << 16) /* i */
+           | ((imm & 0x0fffff) << 6)  /* imm20a */
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_f1(int qp, uint64_t opc, int f1,
+                                  int f3, int f4, int f2)
+{
+    return opc
+           | ((f4 & 0x7f) << 27)
+           | ((f3 & 0x7f) << 20)
+           | ((f2 & 0x7f) << 13)
+           | ((f1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_f2(int qp, uint64_t opc, int f1,
+                                  int f3, int f4, int f2)
+{
+    return opc
+           | ((f4 & 0x7f) << 27)
+           | ((f3 & 0x7f) << 20)
+           | ((f2 & 0x7f) << 13)
+           | ((f1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_f6(int qp, uint64_t opc, int f1,
+                                  int p2, int f2, int f3)
+{
+    return opc
+           | ((p2 & 0x3f) << 27)
+           | ((f3 & 0x7f) << 20)
+           | ((f2 & 0x7f) << 13)
+           | ((f1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_f10(int qp, uint64_t opc, int f1, int f2)
+{
+    return opc
+           | ((f2 & 0x7f) << 13)
+           | ((f1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_f11(int qp, uint64_t opc, int f1, int f2)
+{
+    return opc
+           | ((f2 & 0x7f) << 13)
+           | ((f1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_f16(int qp, uint64_t opc, uint64_t imm)
+{
+    return opc
+           | ((imm & 0x100000) << 16) /* i */
+           | ((imm & 0x0fffff) << 6)  /* imm20a */
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_i2(int qp, uint64_t opc, int r1,
+                                  int r2, int r3)
+{
+    return opc
+           | ((r3 & 0x7f) << 20)
+           | ((r2 & 0x7f) << 13)
+           | ((r1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_i3(int qp, uint64_t opc, int r1,
+                                  int r2, int mbtype)
+{
+    return opc
+           | ((mbtype & 0x0f) << 20)
+           | ((r2 & 0x7f) << 13)
+           | ((r1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_i5(int qp, uint64_t opc, int r1,
+                                  int r3, int r2)
+{
+    return opc
+           | ((r3 & 0x7f) << 20)
+           | ((r2 & 0x7f) << 13)
+           | ((r1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_i7(int qp, uint64_t opc, int r1,
+                                  int r2, int r3)
+{
+    return opc
+           | ((r3 & 0x7f) << 20)
+           | ((r2 & 0x7f) << 13)
+           | ((r1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_i10(int qp, uint64_t opc, int r1,
+                                   int r2, int r3, uint64_t count)
+{
+    return opc
+           | ((count & 0x3f) << 27)
+           | ((r3 & 0x7f) << 20)
+           | ((r2 & 0x7f) << 13)
+           | ((r1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_i11(int qp, uint64_t opc, int r1,
+                                   int r3, uint64_t pos, uint64_t len)
+{
+    return opc
+           | ((len & 0x3f) << 27)
+           | ((r3 & 0x7f) << 20)
+           | ((pos & 0x3f) << 14)
+           | ((r1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_i12(int qp, uint64_t opc, int r1,
+                                   int r2, uint64_t pos, uint64_t len)
+{
+    return opc
+           | ((len & 0x3f) << 27)
+           | ((pos & 0x3f) << 20)
+           | ((r2 & 0x7f) << 13)
+           | ((r1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_i18(int qp, uint64_t opc, uint64_t imm)
+{
+    return opc
+           | ((imm & 0x100000) << 16) /* i */
+           | ((imm & 0x0fffff) << 6)  /* imm20a */
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_i21(int qp, uint64_t opc, int b1,
+                                   int r2, uint64_t imm)
+{
+    return opc
+           | ((imm & 0x1ff) << 24)
+           | ((r2 & 0x7f) << 13)
+           | ((b1 & 0x7) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_i22(int qp, uint64_t opc, int r1, int b2)
+{
+    return opc
+           | ((b2 & 0x7) << 13)
+           | ((r1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_i26(int qp, uint64_t opc, int ar3, int r2)
+{
+    return opc
+           | ((ar3 & 0x7f) << 20)
+           | ((r2 & 0x7f) << 13)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_i29(int qp, uint64_t opc, int r1, int r3)
+{
+    return opc
+           | ((r3 & 0x7f) << 20)
+           | ((r1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_l2(uint64_t imm)
+{
+    return (imm & 0x7fffffffffc00000ull) >> 22;
+}
+
+static inline uint64_t tcg_opc_l3(uint64_t imm)
+{
+    return (imm & 0x07fffffffff00000ull) >> 18;
+}
+
+static inline uint64_t tcg_opc_m1(int qp, uint64_t opc, int r1, int r3)
+{
+    return opc
+           | ((r3 & 0x7f) << 20)
+           | ((r1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_m3(int qp, uint64_t opc, int r1,
+                                  int r3, uint64_t imm)
+{
+    return opc
+           | ((imm & 0x100) << 28) /* s */
+           | ((imm & 0x080) << 20) /* i */
+           | ((imm & 0x07f) << 13) /* imm7b */
+           | ((r3 & 0x7f) << 20)
+           | ((r1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_m4(int qp, uint64_t opc, int r2, int r3)
+{
+    return opc
+           | ((r3 & 0x7f) << 20)
+           | ((r2 & 0x7f) << 13)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_m18(int qp, uint64_t opc, int f1, int r2)
+{
+    return opc
+           | ((r2 & 0x7f) << 13)
+           | ((f1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_m19(int qp, uint64_t opc, int r1, int f2)
+{
+    return opc
+           | ((f2 & 0x7f) << 13)
+           | ((r1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_m34(int qp, uint64_t opc, int r1,
+                                   int sof, int sol, int sor)
+{
+    return opc
+           | ((sor & 0x0f) << 27)
+           | ((sol & 0x7f) << 20)
+           | ((sof & 0x7f) << 13)
+           | ((r1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_m48(int qp, uint64_t opc, uint64_t imm)
+{
+    return opc
+           | ((imm & 0x100000) << 16) /* i */
+           | ((imm & 0x0fffff) << 6)  /* imm20a */
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_x2(int qp, uint64_t opc,
+                                  int r1, uint64_t imm)
+{
+    return opc
+           | ((imm & 0x8000000000000000ull) >> 27) /* i */
+           |  (imm & 0x0000000000200000ull)        /* ic */
+           | ((imm & 0x00000000001f0000ull) << 6)  /* imm5c */
+           | ((imm & 0x000000000000ff80ull) << 20) /* imm9d */
+           | ((imm & 0x000000000000007full) << 13) /* imm7b */
+           | ((r1 & 0x7f) << 6)
+           | (qp & 0x3f);
+}
+
+static inline uint64_t tcg_opc_x3(int qp, uint64_t opc, uint64_t imm)
+{
+    return opc
+           | ((imm & 0x0800000000000000ull) >> 23) /* i */
+           | ((imm & 0x00000000000fffffull) << 13) /* imm20b */
+           | (qp & 0x3f);
+}
+
+
+/*
+ * Relocations
+ */
+
+static inline void reloc_pcrel21b (void *pc, tcg_target_long target)
+{
+    uint64_t imm;
+    int64_t disp;
+    int slot;
+
+    slot = (tcg_target_long) pc & 3;
+    pc = (void *)((tcg_target_long) pc & ~3);
+
+    disp = target - (tcg_target_long) pc;
+    imm = (uint64_t) disp >> 4;
+
+    switch(slot) {
+    case 0:
+        *(uint64_t *)(pc + 0) = (*(uint64_t *)(pc + 8) & 0xfffffdc00003ffffull)
+                                | ((imm & 0x100000) << 21)  /* s */
+                                | ((imm & 0x0fffff) << 18); /* imm20b */
+        break;
+    case 1:
+        *(uint64_t *)(pc + 8) = (*(uint64_t *)(pc + 8) & 0xfffffffffffb8000ull)
+                                | ((imm & 0x100000) >> 2)   /* s */
+                                | ((imm & 0x0fffe0) >> 5);  /* imm20b */
+        *(uint64_t *)(pc + 0) = (*(uint64_t *)(pc + 0) & 0x07ffffffffffffffull)
+                                | ((imm & 0x00001f) << 59); /* imm20b */
+        break;
+    case 2:
+        *(uint64_t *)(pc + 8) = (*(uint64_t *)(pc + 8) & 0xf700000fffffffffull)
+                                | ((imm & 0x100000) << 39)  /* s */
+                                | ((imm & 0x0fffff) << 36); /* imm20b */
+        break;
+    }
+}
+
+static inline uint64_t get_reloc_pcrel21b (void *pc)
+{
+    int64_t low, high;
+    int slot;
+
+    slot = (tcg_target_long) pc & 3;
+    pc = (void *)((tcg_target_long) pc & ~3);
+
+    low  = (*(uint64_t *)(pc + 0));
+    high = (*(uint64_t *)(pc + 8));
+
+    switch(slot) {
+    case 0:
+        return ((low >> 21) & 0x100000) + /* s */
+               ((low >> 18) & 0x0fffff);  /* imm20b */
+    case 1:
+        return ((high << 2) & 0x100000) + /* s */
+               ((high << 5) & 0x0fffe0) + /* imm20b */
+               ((low >> 59) & 0x00001f);  /* imm20b */
+    case 2:
+        return ((high >> 39) & 0x100000) + /* s */
+               ((high >> 36) & 0x0fffff);  /* imm20b */
+    default:
+        tcg_abort();
+    }
+}
+
+static inline void reloc_pcrel60b (void *pc, tcg_target_long target)
+{
+    int64_t disp;
+    uint64_t imm;
+
+    disp = target - (tcg_target_long) pc;
+    imm = (uint64_t) disp >> 4;
+
+    *(uint64_t *)(pc + 8) = (*(uint64_t *)(pc + 8) & 0xf700000fff800000ull)
+                             |  (imm & 0x0800000000000000ull)         /* s */
+                             | ((imm & 0x07fffff000000000ull) >> 36)  /* imm39 */
+                             | ((imm & 0x00000000000fffffull) << 36); /* imm20b */
+    *(uint64_t *)(pc + 0) = (*(uint64_t *)(pc + 0) & 0x00003fffffffffffull)
+                             | ((imm & 0x0000000ffff00000ull) << 28); /* imm39 */
+}
+
+static inline uint64_t get_reloc_pcrel60b (void *pc)
+{
+    int64_t low, high;
+
+    low  = (*(uint64_t *)(pc + 0));
+    high = (*(uint64_t *)(pc + 8));
+
+    return ((high)       & 0x0800000000000000ull) + /* s */
+           ((high >> 36) & 0x00000000000fffffull) + /* imm20b */
+           ((high << 36) & 0x07fffff000000000ull) + /* imm39 */
+           ((low >> 28)  & 0x0000000ffff00000ull);  /* imm39 */
+}
+
+
+static void patch_reloc(uint8_t *code_ptr, int type,
+                        tcg_target_long value, tcg_target_long addend)
+{
+    value += addend;
+    switch (type) {
+    case R_IA64_PCREL21B:
+        reloc_pcrel21b(code_ptr, value);
+        break;
+    case R_IA64_PCREL60B:
+        reloc_pcrel60b(code_ptr, value);
+    default:
+        tcg_abort();
+    }
+}
+
+/*
+ * Constraints
+ */
+
+/* parse target specific constraints */
+static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
+{
+    const char *ct_str;
+
+    ct_str = *pct_str;
+    switch(ct_str[0]) {
+    case 'r':
+        ct->ct |= TCG_CT_REG;
+        tcg_regset_set(ct->u.regs, 0xffffffffffffffffull);
+        break;
+    case 'I':
+        ct->ct |= TCG_CT_CONST_S22;
+        break;
+    case 'S':
+        ct->ct |= TCG_CT_REG;
+        tcg_regset_set(ct->u.regs, 0xffffffffffffffffull);
+#if defined(CONFIG_SOFTMMU)
+        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R56);
+        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R57);
+#endif
+        break;
+    case 'Z':
+        /* We are cheating a bit here, using the fact that the register
+           r0 is also the register number 0. Hence there is no need
+           to check for const_args in each instruction. */
+        ct->ct |= TCG_CT_CONST_ZERO;
+        break;
+    default:
+        return -1;
+    }
+    ct_str++;
+    *pct_str = ct_str;
+    return 0;
+}
+
+/* test if a constant matches the constraint */
+static inline int tcg_target_const_match(tcg_target_long val,
+                                         const TCGArgConstraint *arg_ct)
+{
+    int ct;
+    ct = arg_ct->ct;
+    if (ct & TCG_CT_CONST)
+        return 1;
+    else if ((ct & TCG_CT_CONST_ZERO) && val == 0)
+        return 1;
+    else if ((ct & TCG_CT_CONST_S22) && val == ((int32_t)val << 10) >> 10)
+        return 1;
+    else
+        return 0;
+}
+
+/*
+ * Code generation
+ */
+
+static uint8_t *tb_ret_addr;
+
+static inline void tcg_out_bundle(TCGContext *s, int template,
+                                  uint64_t slot0, uint64_t slot1,
+                                  uint64_t slot2)
+{
+    template &= 0x1f;          /* 5 bits */
+    slot0 &= 0x1ffffffffffull; /* 41 bits */
+    slot1 &= 0x1ffffffffffull; /* 41 bits */
+    slot2 &= 0x1ffffffffffull; /* 41 bits */
+
+    *(uint64_t *)(s->code_ptr + 0) = (slot1 << 46) | (slot0 << 5) | template;
+    *(uint64_t *)(s->code_ptr + 8) = (slot2 << 23) | (slot1 >> 18);
+    s->code_ptr += 16;
+}
+
+static inline void tcg_out_mov(TCGContext *s, TCGArg ret, TCGArg arg)
+{
+    tcg_out_bundle(s, mmI,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, ret, 0, arg));
+}
+
+static inline void tcg_out_movi(TCGContext *s, TCGType type,
+                                TCGArg reg, tcg_target_long arg)
+{
+    tcg_out_bundle(s, mLX,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_l2 (arg),
+                   tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, reg, arg));
+}
+
+static inline void tcg_out_addi(TCGContext *s, TCGArg reg, tcg_target_long val)
+{
+    if (val == ((int32_t)val << 10) >> 10) {
+        tcg_out_bundle(s, MmI,
+                       tcg_opc_a5(TCG_REG_P0, OPC_ADDL_A5,
+                                  TCG_REG_R2, val, TCG_REG_R0),
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, reg,
+                                   reg, TCG_REG_R2));
+    } else {
+        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, val);
+        tcg_out_bundle(s, mmI,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, reg,
+                                   reg, TCG_REG_R2));
+    }
+}
+
+static void tcg_out_br(TCGContext *s, int label_index)
+{
+    TCGLabel *l = &s->labels[label_index];
+
+    tcg_out_bundle(s, mmB,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_b1 (TCG_REG_P0, OPC_BR_SPTK_MANY_B1,
+                               get_reloc_pcrel21b(s->code_ptr + 2)));
+
+    if (l->has_value) {
+        reloc_pcrel21b((s->code_ptr - 16) + 2, l->u.value);
+    } else {
+        tcg_out_reloc(s, (s->code_ptr - 16) + 2,
+                      R_IA64_PCREL21B, label_index, 0);
+    }
+}
+
+static inline void tcg_out_call(TCGContext *s, TCGArg addr)
+{
+    tcg_out_bundle(s, MmI,
+                   tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1, TCG_REG_R2, addr),
+                   tcg_opc_a4 (TCG_REG_P0, OPC_ADDS_A4, TCG_REG_R3, 8, addr),
+                   tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21,
+                               TCG_REG_B6, TCG_REG_R2, 0));
+    tcg_out_bundle(s, mmB,
+                   tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R3),
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_b5 (TCG_REG_P0, OPC_BR_CALL_SPTK_MANY_B5,
+                               TCG_REG_B0, TCG_REG_B6));
+}
+
+static void tcg_out_exit_tb(TCGContext *s, tcg_target_long arg)
+{
+    int64_t disp;
+    uint64_t imm;
+
+    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R8, arg);
+
+    disp = tb_ret_addr - s->code_ptr;
+    imm = (uint64_t)disp >> 4;
+
+    tcg_out_bundle(s, mLX,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_l3 (imm),
+                   tcg_opc_x3 (TCG_REG_P0, OPC_BRL_SPTK_MANY_X3, imm));
+}
+
+static inline void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
+{
+    if (s->tb_jmp_offset) {
+        /* direct jump method */
+        tcg_abort();
+    } else {
+        /* indirect jump method */
+        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2,
+                     (tcg_target_long)(s->tb_next + arg));
+        tcg_out_bundle(s, MmI,
+                       tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1,
+                                   TCG_REG_R2, TCG_REG_R2),
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21, TCG_REG_B6,
+                                   TCG_REG_R2, 0));
+        tcg_out_bundle(s, mmB,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_b4 (TCG_REG_P0, OPC_BR_SPTK_MANY_B4,
+                                   TCG_REG_B6));
+    }
+    s->tb_next_offset[arg] = s->code_ptr - s->code_buf;
+}
+
+static inline void tcg_out_jmp(TCGContext *s, TCGArg addr)
+{
+    tcg_out_bundle(s, mmI,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21, TCG_REG_B6, addr, 0));
+    tcg_out_bundle(s, mmB,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_b4(TCG_REG_P0, OPC_BR_SPTK_MANY_B4, TCG_REG_B6));
+}
+
+static inline void tcg_out_ld_rel(TCGContext *s, uint64_t opc_m4, TCGArg arg,
+                                  TCGArg arg1, tcg_target_long arg2)
+{
+    if (arg2 == ((int16_t)arg2 >> 2) << 2) {
+        tcg_out_bundle(s, MmI,
+                       tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4,
+                                  TCG_REG_R2, arg2, arg1),
+                       tcg_opc_m1 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+    } else {
+        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, arg2);
+        tcg_out_bundle(s, MmI,
+                       tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1,
+                                   TCG_REG_R2, TCG_REG_R2, arg1),
+                       tcg_opc_m1 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+    }
+}
+
+static inline void tcg_out_st_rel(TCGContext *s, uint64_t opc_m4, TCGArg arg,
+                                  TCGArg arg1, tcg_target_long arg2)
+{
+    if (arg2 == ((int16_t)arg2 >> 2) << 2) {
+        tcg_out_bundle(s, MmI,
+                       tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4,
+                                  TCG_REG_R2, arg2, arg1),
+                       tcg_opc_m4 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+    } else {
+        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, arg2);
+        tcg_out_bundle(s, MmI,
+                       tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1,
+                                   TCG_REG_R2, TCG_REG_R2, arg1),
+                       tcg_opc_m4 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+    }
+}
+
+static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGArg arg,
+                              TCGArg arg1, tcg_target_long arg2)
+{
+    if (type == TCG_TYPE_I32) {
+        tcg_out_ld_rel(s, OPC_LD4_M1, arg, arg1, arg2);
+    } else {
+        tcg_out_ld_rel(s, OPC_LD8_M1, arg, arg1, arg2);
+    }
+}
+
+static inline void tcg_out_st(TCGContext *s, TCGType type, TCGArg arg,
+                              TCGArg arg1, tcg_target_long arg2)
+{
+    if (type == TCG_TYPE_I32) {
+        tcg_out_st_rel(s, OPC_ST4_M4, arg, arg1, arg2);
+    } else {
+        tcg_out_st_rel(s, OPC_ST8_M4, arg, arg1, arg2);
+    }
+}
+
+static inline void tcg_out_alu(TCGContext *s, uint64_t opc_a1, TCGArg ret,
+                               TCGArg arg1, int const_arg1,
+                               TCGArg arg2, int const_arg2)
+{
+    uint64_t opc1, opc2;
+
+    if (const_arg1 && arg1 != 0) {
+        opc1 = tcg_opc_a5(TCG_REG_P0, OPC_ADDL_A5,
+                          TCG_REG_R2, arg1, TCG_REG_R0);
+        arg1 = TCG_REG_R2;
+    } else {
+        opc1 = tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0);
+    }
+
+    if (const_arg2 && arg2 != 0) {
+        opc2 = tcg_opc_a5(TCG_REG_P0, OPC_ADDL_A5,
+                          TCG_REG_R3, arg2, TCG_REG_R0);
+        arg2 = TCG_REG_R3;
+    } else {
+        opc2 = tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0);
+    }
+
+    tcg_out_bundle(s, mII,
+                   opc1,
+                   opc2,
+                   tcg_opc_a1(TCG_REG_P0, opc_a1, ret, arg1, arg2));
+}
+
+static inline void tcg_out_eqv(TCGContext *s, TCGArg ret,
+                               TCGArg arg1, int const_arg1,
+                               TCGArg arg2, int const_arg2)
+{
+    tcg_out_bundle(s, mII,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_a1 (TCG_REG_P0, OPC_XOR_A1, ret, arg1, arg2),
+                   tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, ret, -1, ret));
+}
+
+static inline void tcg_out_nand(TCGContext *s, TCGArg ret,
+                                TCGArg arg1, int const_arg1,
+                                TCGArg arg2, int const_arg2)
+{
+    tcg_out_bundle(s, mII,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_a1 (TCG_REG_P0, OPC_AND_A1, ret, arg1, arg2),
+                   tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, ret, -1, ret));
+}
+
+static inline void tcg_out_nor(TCGContext *s, TCGArg ret,
+                               TCGArg arg1, int const_arg1,
+                               TCGArg arg2, int const_arg2)
+{
+    tcg_out_bundle(s, mII,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret, arg1, arg2),
+                   tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, ret, -1, ret));
+}
+
+static inline void tcg_out_orc(TCGContext *s, TCGArg ret,
+                               TCGArg arg1, int const_arg1,
+                               TCGArg arg2, int const_arg2)
+{
+    tcg_out_bundle(s, mII,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, TCG_REG_R2, -1, arg2),
+                   tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret, arg1, TCG_REG_R2));
+}
+
+static inline void tcg_out_mul(TCGContext *s, TCGArg ret,
+                               TCGArg arg1, TCGArg arg2)
+{
+    tcg_out_bundle(s, mmI,
+                   tcg_opc_m18(TCG_REG_P0, OPC_SETF_SIG_M18, TCG_REG_F6, arg1),
+                   tcg_opc_m18(TCG_REG_P0, OPC_SETF_SIG_M18, TCG_REG_F7, arg2),
+                   tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+    tcg_out_bundle(s, mmF,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_f2 (TCG_REG_P0, OPC_XMA_L_F2, TCG_REG_F6, TCG_REG_F6,
+                               TCG_REG_F7, TCG_REG_F0));
+    tcg_out_bundle(s, miI,
+                   tcg_opc_m19(TCG_REG_P0, OPC_GETF_SIG_M19, ret, TCG_REG_F6),
+                   tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                   tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+}
+
+static inline void tcg_out_sar_i32(TCGContext *s, TCGArg ret, TCGArg arg1,
+                                   TCGArg arg2, int const_arg2)
+{
+    if (const_arg2) {
+        tcg_out_bundle(s, miI,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                       tcg_opc_i11(TCG_REG_P0, OPC_EXTR_I11,
+                                   ret, arg1, arg2, 31 - arg2));
+    } else {
+        tcg_out_bundle(s, mII,
+                       tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3,
+                                   TCG_REG_R3, 0x1f, arg2),
+                       tcg_opc_i29(TCG_REG_P0, OPC_SXT4_I29, TCG_REG_R2, arg1),
+                       tcg_opc_i5 (TCG_REG_P0, OPC_SHR_I5, ret,
+                                   TCG_REG_R2, TCG_REG_R3));
+    }
+}
+
+static inline void tcg_out_sar_i64(TCGContext *s, TCGArg ret, TCGArg arg1,
+                                   TCGArg arg2, int const_arg2)
+{
+    if (const_arg2) {
+        tcg_out_bundle(s, miI,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                       tcg_opc_i11(TCG_REG_P0, OPC_EXTR_I11,
+                                   ret, arg1, arg2, 63 - arg2));
+    } else {
+        tcg_out_bundle(s, miI,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                       tcg_opc_i5 (TCG_REG_P0, OPC_SHR_I5, ret, arg1, arg2));
+    }
+}
+
+static inline void tcg_out_shl_i32(TCGContext *s, TCGArg ret, TCGArg arg1,
+                                   TCGArg arg2, int const_arg2)
+{
+    if (const_arg2) {
+        tcg_out_bundle(s, miI,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                       tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret,
+                                   arg1, 63 - arg2, 31 - arg2));
+    } else {
+        tcg_out_bundle(s, mII,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R2,
+                                   0x1f, arg2),
+                       tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, ret,
+                                   arg1, TCG_REG_R2));
+    }
+}
+
+static inline void tcg_out_shl_i64(TCGContext *s, TCGArg ret, TCGArg arg1,
+                                   TCGArg arg2, int const_arg2)
+{
+    if (const_arg2) {
+        tcg_out_bundle(s, miI,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                       tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret,
+                                   arg1, 63 - arg2, 63 - arg2));
+    } else {
+        tcg_out_bundle(s, miI,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                       tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, ret,
+                                   arg1, arg2));
+    }
+}
+
+static inline void tcg_out_shr_i32(TCGContext *s, TCGArg ret, TCGArg arg1,
+                                   TCGArg arg2, int const_arg2)
+{
+    if (const_arg2) {
+        tcg_out_bundle(s, miI,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                       tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret,
+                                   arg1, arg2, 31 - arg2));
+    } else {
+        tcg_out_bundle(s, mII,
+                       tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R3,
+                                   0x1f, arg2),
+                       tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, TCG_REG_R2, arg1),
+                       tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret,
+                                   TCG_REG_R2, TCG_REG_R3));
+    }
+}
+
+static inline void tcg_out_shr_i64(TCGContext *s, TCGArg ret, TCGArg arg1,
+                                   TCGArg arg2, int const_arg2)
+{
+    if (const_arg2) {
+        tcg_out_bundle(s, miI,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                       tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret,
+                                   arg1, arg2, 63 - arg2));
+    } else {
+        tcg_out_bundle(s, miI,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                       tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret,
+                                   arg1, arg2));
+    }
+}
+
+static inline void tcg_out_rotl_i32(TCGContext *s, TCGArg ret, TCGArg arg1,
+                                    TCGArg arg2, int const_arg2)
+{
+    if (const_arg2) {
+        tcg_out_bundle(s, mII,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2,
+                                   TCG_REG_R2, arg1, arg1),
+                       tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret,
+                                   TCG_REG_R2, 32 - arg2, 31));
+    } else {
+        tcg_out_bundle(s, miI,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2,
+                                   TCG_REG_R2, arg1, arg1),
+                       tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R3,
+                                   0x1f, arg2));
+        tcg_out_bundle(s, mII,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_a3 (TCG_REG_P0, OPC_SUB_A3, TCG_REG_R3,
+                                   0x20, TCG_REG_R3),
+                       tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret,
+                                   TCG_REG_R2, TCG_REG_R3));
+    }
+}
+
+static inline void tcg_out_rotl_i64(TCGContext *s, TCGArg ret, TCGArg arg1,
+                                    TCGArg arg2, int const_arg2)
+{
+    if (const_arg2) {
+        tcg_out_bundle(s, miI,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                       tcg_opc_i10(TCG_REG_P0, OPC_SHRP_I10, ret, arg1,
+                                   arg1, 0x40 - arg2));
+    } else {
+        tcg_out_bundle(s, mII,
+                       tcg_opc_a3 (TCG_REG_P0, OPC_SUB_A3, TCG_REG_R2,
+                                   0x40, arg2),
+                       tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, TCG_REG_R3,
+                                   arg1, arg2),
+                       tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, TCG_REG_R2,
+                                   arg1, TCG_REG_R2));
+        tcg_out_bundle(s, miI,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                       tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret,
+                                   TCG_REG_R2, TCG_REG_R3));
+    }
+}
+
+static inline void tcg_out_rotr_i32(TCGContext *s, TCGArg ret, TCGArg arg1,
+                                    TCGArg arg2, int const_arg2)
+{
+    if (const_arg2) {
+        tcg_out_bundle(s, mII,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2,
+                                   TCG_REG_R2, arg1, arg1),
+                       tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret,
+                                   TCG_REG_R2, arg2, 31));
+    } else {
+        tcg_out_bundle(s, mII,
+                       tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R3,
+                                   0x1f, arg2),
+                       tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2,
+                                   TCG_REG_R2, arg1, arg1),
+                       tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret,
+                                   TCG_REG_R2, TCG_REG_R3));
+    }
+}
+
+static inline void tcg_out_rotr_i64(TCGContext *s, TCGArg ret, TCGArg arg1,
+                                    TCGArg arg2, int const_arg2)
+{
+    if (const_arg2) {
+        tcg_out_bundle(s, miI,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                       tcg_opc_i10(TCG_REG_P0, OPC_SHRP_I10, ret, arg1,
+                                   arg1, arg2));
+    } else {
+        tcg_out_bundle(s, mII,
+                       tcg_opc_a3 (TCG_REG_P0, OPC_SUB_A3, TCG_REG_R2,
+                                   0x40, arg2),
+                       tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, TCG_REG_R3,
+                                   arg1, arg2),
+                       tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, TCG_REG_R2,
+                                   arg1, TCG_REG_R2));
+        tcg_out_bundle(s, miI,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                       tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret,
+                                   TCG_REG_R2, TCG_REG_R3));
+    }
+}
+
+static inline void tcg_out_ext(TCGContext *s, uint64_t opc_i29,
+                               TCGArg ret, TCGArg arg)
+{
+    tcg_out_bundle(s, miI,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                   tcg_opc_i29(TCG_REG_P0, opc_i29, ret, arg));
+}
+
+static inline void tcg_out_bswap16(TCGContext *s, TCGArg ret, TCGArg arg)
+{
+    tcg_out_bundle(s, mII,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret, arg, 15, 15),
+                   tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3, ret, ret, 0xb));
+}
+
+static inline void tcg_out_bswap32(TCGContext *s, TCGArg ret, TCGArg arg)
+{
+    tcg_out_bundle(s, mII,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret, arg, 31, 31),
+                   tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3, ret, ret, 0xb));
+}
+
+static inline void tcg_out_bswap64(TCGContext *s, TCGArg ret, TCGArg arg)
+{
+    tcg_out_bundle(s, mII,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                   tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3, ret, arg, 0xb));
+}
+
+static inline uint64_t tcg_opc_cmp_a(int qp, TCGCond cond, TCGArg arg1,
+                                     TCGArg arg2, int cmp4)
+{
+    uint64_t opc_eq_a6, opc_lt_a6, opc_ltu_a6;
+
+    if (cmp4) {
+        opc_eq_a6 = OPC_CMP4_EQ_A6;
+        opc_lt_a6 = OPC_CMP4_LT_A6;
+        opc_ltu_a6 = OPC_CMP4_LTU_A6;
+    } else {
+        opc_eq_a6 = OPC_CMP_EQ_A6;
+        opc_lt_a6 = OPC_CMP_LT_A6;
+        opc_ltu_a6 = OPC_CMP_LTU_A6;
+    }
+
+    switch (cond) {
+    case TCG_COND_EQ:
+        return tcg_opc_a6 (qp, opc_eq_a6,  TCG_REG_P6, TCG_REG_P7, arg1, arg2);
+    case TCG_COND_NE:
+        return tcg_opc_a6 (qp, opc_eq_a6,  TCG_REG_P7, TCG_REG_P6, arg1, arg2);
+    case TCG_COND_LT:
+        return tcg_opc_a6 (qp, opc_lt_a6,  TCG_REG_P6, TCG_REG_P7, arg1, arg2);
+    case TCG_COND_LTU:
+        return tcg_opc_a6 (qp, opc_ltu_a6, TCG_REG_P6, TCG_REG_P7, arg1, arg2);
+    case TCG_COND_GE:
+        return tcg_opc_a6 (qp, opc_lt_a6,  TCG_REG_P7, TCG_REG_P6, arg1, arg2);
+    case TCG_COND_GEU:
+        return tcg_opc_a6 (qp, opc_ltu_a6, TCG_REG_P7, TCG_REG_P6, arg1, arg2);
+    case TCG_COND_LE:
+        return tcg_opc_a6 (qp, opc_lt_a6,  TCG_REG_P7, TCG_REG_P6, arg2, arg1);
+    case TCG_COND_LEU:
+        return tcg_opc_a6 (qp, opc_ltu_a6, TCG_REG_P7, TCG_REG_P6, arg2, arg1);
+    case TCG_COND_GT:
+        return tcg_opc_a6 (qp, opc_lt_a6,  TCG_REG_P6, TCG_REG_P7, arg2, arg1);
+    case TCG_COND_GTU:
+        return tcg_opc_a6 (qp, opc_ltu_a6, TCG_REG_P6, TCG_REG_P7, arg2, arg1);
+    default:
+        tcg_abort();
+        break;
+    }
+}
+
+static inline void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGArg arg1,
+                                  int const_arg1, TCGArg arg2, int const_arg2,
+                                  int label_index, int cmp4)
+{
+    TCGLabel *l = &s->labels[label_index];
+    uint64_t opc1, opc2;
+
+    if (const_arg1 && arg1 != 0) {
+        opc1 = tcg_opc_a5(TCG_REG_P0, OPC_ADDL_A5, TCG_REG_R2,
+                          arg1, TCG_REG_R0);
+        arg1 = TCG_REG_R2;
+    } else {
+        opc1 = tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0);
+    }
+
+    if (const_arg2 && arg2 != 0) {
+        opc2 = tcg_opc_a5(TCG_REG_P0, OPC_ADDL_A5, TCG_REG_R3,
+                          arg2, TCG_REG_R0);
+        arg2 = TCG_REG_R3;
+    } else {
+        opc2 = tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0);
+    }
+
+    tcg_out_bundle(s, mII,
+                   opc1,
+                   opc2,
+                   tcg_opc_cmp_a(TCG_REG_P0, cond, arg1, arg2, cmp4));
+    tcg_out_bundle(s, mmB,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_b1 (TCG_REG_P6, OPC_BR_DPTK_FEW_B1,
+                               get_reloc_pcrel21b(s->code_ptr + 2)));
+
+    if (l->has_value) {
+        reloc_pcrel21b((s->code_ptr - 16) + 2, l->u.value);
+    } else {
+        tcg_out_reloc(s, (s->code_ptr - 16) + 2,
+                      R_IA64_PCREL21B, label_index, 0);
+    }
+}
+
+static inline void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGArg ret,
+                                   TCGArg arg1, TCGArg arg2, int cmp4)
+{
+    tcg_out_bundle(s, MmI,
+                   tcg_opc_cmp_a(TCG_REG_P0, cond, arg1, arg2, cmp4),
+                   tcg_opc_a5(TCG_REG_P6, OPC_ADDL_A5, ret, 1, TCG_REG_R0),
+                   tcg_opc_a5(TCG_REG_P7, OPC_ADDL_A5, ret, 0, TCG_REG_R0));
+}
+
+#if defined(CONFIG_SOFTMMU)
+
+#include "../../softmmu_defs.h"
+
+/* Load and compare a TLB entry, and return the result in (p6, p7).
+   R2 is loaded with the address of the addend TLB entry.
+   R56 is loaded with the address, zero extented on 32-bit targets. */
+static inline void tcg_out_qemu_tlb(TCGContext *s, TCGArg addr_reg,
+                                    int s_bits, uint64_t offset_rw,
+                                    uint64_t offset_addend)
+{
+    tcg_out_bundle(s, mII,
+                   tcg_opc_a5 (TCG_REG_P0, OPC_ADDL_A5, TCG_REG_R3,
+                               TARGET_PAGE_MASK | ((1 << s_bits) - 1),
+                               TCG_REG_R0),
+                   tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, TCG_REG_R2,
+                               addr_reg, TARGET_PAGE_BITS, CPU_TLB_BITS - 1),
+                   tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, TCG_REG_R2,
+                               TCG_REG_R2, 63 - CPU_TLB_ENTRY_BITS,
+                               63 - CPU_TLB_ENTRY_BITS));
+    tcg_out_bundle(s, mII,
+                   tcg_opc_a5 (TCG_REG_P0, OPC_ADDL_A5, TCG_REG_R2,
+                               offset_rw, TCG_REG_R2),
+#if TARGET_LONG_BITS == 32
+                   tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, TCG_REG_R56, addr_reg),
+#else
+                   tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, TCG_REG_R56,
+                              0, addr_reg),
+#endif
+                   tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
+                               TCG_REG_R2, TCG_AREG0));
+    tcg_out_bundle(s, mII,
+#if TARGET_LONG_BITS == 32
+                   tcg_opc_m3 (TCG_REG_P0, OPC_LD4_M3, TCG_REG_R57,
+                               TCG_REG_R2, offset_addend - offset_rw),
+#else
+                   tcg_opc_m3 (TCG_REG_P0, OPC_LD8_M3, TCG_REG_R57,
+                               TCG_REG_R2, offset_addend - offset_rw),
+#endif
+                   tcg_opc_a1 (TCG_REG_P0, OPC_AND_A1, TCG_REG_R3,
+                               TCG_REG_R3, TCG_REG_R56),
+                   tcg_opc_a6 (TCG_REG_P0, OPC_CMP_EQ_A6, TCG_REG_P6,
+                               TCG_REG_P7, TCG_REG_R3, TCG_REG_R57));
+}
+
+static void *qemu_ld_helpers[4] = {
+    __ldb_mmu,
+    __ldw_mmu,
+    __ldl_mmu,
+    __ldq_mmu,
+};
+
+static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
+{
+    int addr_reg, data_reg, mem_index, s_bits, bswap;
+    uint64_t opc_ld_m1[4] = { OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1 };
+    uint64_t opc_ext_i29[8] = { OPC_ZXT1_I29, OPC_ZXT2_I29, OPC_ZXT4_I29, 0,
+                                OPC_SXT1_I29, OPC_SXT2_I29, OPC_SXT4_I29, 0 };
+
+    data_reg = *args++;
+    addr_reg = *args++;
+    mem_index = *args;
+    s_bits = opc & 3;
+
+#ifdef TARGET_WORDS_BIGENDIAN
+    bswap = 1;
+#else
+    bswap = 0;
+#endif
+
+    /* Read the TLB entry */
+    tcg_out_qemu_tlb(s, addr_reg, s_bits,
+                     offsetof(CPUState, tlb_table[mem_index][0].addr_read),
+                     offsetof(CPUState, tlb_table[mem_index][0].addend));
+
+    /* P6 is the fast path, and P7 the slow path */
+    tcg_out_bundle(s, mLX,
+                   tcg_opc_a5 (TCG_REG_P7, OPC_ADDL_A5, TCG_REG_R57,
+                               mem_index, TCG_REG_R0),
+                   tcg_opc_l2 ((tcg_target_long) qemu_ld_helpers[s_bits]),
+                   tcg_opc_x2 (TCG_REG_P7, OPC_MOVL_X2, TCG_REG_R2,
+                               (tcg_target_long) qemu_ld_helpers[s_bits]));
+    tcg_out_bundle(s, MmI,
+                   tcg_opc_m3 (TCG_REG_P0, OPC_LD8_M3, TCG_REG_R3,
+                               TCG_REG_R2, 8),
+                   tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R3,
+                               TCG_REG_R3, TCG_REG_R56),
+                   tcg_opc_i21(TCG_REG_P7, OPC_MOV_I21, TCG_REG_B6,
+                               TCG_REG_R3, 0));
+    if (bswap && s_bits == 1) {
+        tcg_out_bundle(s, MmI,
+                       tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits],
+                                   TCG_REG_R8, TCG_REG_R3),
+                       tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2),
+                       tcg_opc_i12(TCG_REG_P6, OPC_DEP_Z_I12,
+                                   TCG_REG_R8, TCG_REG_R8, 15, 15));
+    } else if (bswap && s_bits == 2) {
+        tcg_out_bundle(s, MmI,
+                       tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits],
+                                   TCG_REG_R8, TCG_REG_R3),
+                       tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2),
+                       tcg_opc_i12(TCG_REG_P6, OPC_DEP_Z_I12,
+                                   TCG_REG_R8, TCG_REG_R8, 31, 31));
+    } else {
+        tcg_out_bundle(s, mmI,
+                       tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits],
+                                   TCG_REG_R8, TCG_REG_R3),
+                       tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+    }
+    if (!bswap || s_bits == 0) {
+        tcg_out_bundle(s, miB,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                       tcg_opc_b5 (TCG_REG_P7, OPC_BR_CALL_SPTK_MANY_B5,
+                                   TCG_REG_B0, TCG_REG_B6));
+    } else {
+        tcg_out_bundle(s, miB,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i3 (TCG_REG_P6, OPC_MUX1_I3,
+                                   TCG_REG_R8, TCG_REG_R8, 0xb),
+                       tcg_opc_b5 (TCG_REG_P7, OPC_BR_CALL_SPTK_MANY_B5,
+                                   TCG_REG_B0, TCG_REG_B6));
+    }
+
+    if (opc == 3) {
+        tcg_out_bundle(s, miI,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                       tcg_opc_a4 (TCG_REG_P0, OPC_ADDS_A4,
+                                   data_reg, 0, TCG_REG_R8));
+    } else {
+        tcg_out_bundle(s, miI,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                       tcg_opc_i29(TCG_REG_P0, opc_ext_i29[opc],
+                                   data_reg, TCG_REG_R8));
+    }
+}
+
+static void *qemu_st_helpers[4] = {
+    __stb_mmu,
+    __stw_mmu,
+    __stl_mmu,
+    __stq_mmu,
+};
+
+static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
+{
+    int addr_reg, data_reg, mem_index, bswap;
+    uint64_t opc_st_m4[4] = { OPC_ST1_M4, OPC_ST2_M4, OPC_ST4_M4, OPC_ST8_M4 };
+
+    data_reg = *args++;
+    addr_reg = *args++;
+    mem_index = *args;
+
+#ifdef TARGET_WORDS_BIGENDIAN
+    bswap = 1;
+#else
+    bswap = 0;
+#endif
+
+    tcg_out_qemu_tlb(s, addr_reg, opc,
+                     offsetof(CPUState, tlb_table[mem_index][0].addr_write),
+                     offsetof(CPUState, tlb_table[mem_index][0].addend));
+
+    /* P6 is the fast path, and P7 the slow path */
+    tcg_out_bundle(s, mLX,
+                   tcg_opc_a4(TCG_REG_P7, OPC_ADDS_A4, TCG_REG_R57,
+                              0, data_reg),
+                   tcg_opc_l2 ((tcg_target_long) qemu_st_helpers[opc]),
+                   tcg_opc_x2 (TCG_REG_P7, OPC_MOVL_X2, TCG_REG_R2,
+                               (tcg_target_long) qemu_st_helpers[opc]));
+    tcg_out_bundle(s, MmI,
+                   tcg_opc_m3 (TCG_REG_P0, OPC_LD8_M3, TCG_REG_R3,
+                               TCG_REG_R2, 8),
+                   tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R3,
+                               TCG_REG_R3, TCG_REG_R56),
+                   tcg_opc_i21(TCG_REG_P7, OPC_MOV_I21, TCG_REG_B6,
+                               TCG_REG_R3, 0));
+
+    if (!bswap || opc == 0) {
+        tcg_out_bundle(s, mII,
+                       tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1,
+                                   TCG_REG_R1, TCG_REG_R2),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+    } else if (opc == 1) {
+        tcg_out_bundle(s, mII,
+                       tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1,
+                                   TCG_REG_R1, TCG_REG_R2),
+                       tcg_opc_i12(TCG_REG_P6, OPC_DEP_Z_I12,
+                                   TCG_REG_R2, data_reg, 15, 15),
+                       tcg_opc_i3 (TCG_REG_P6, OPC_MUX1_I3,
+                                   TCG_REG_R2, TCG_REG_R2, 0xb));
+        data_reg = TCG_REG_R2;
+    } else if (opc == 2) {
+        tcg_out_bundle(s, mII,
+                       tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1,
+                                   TCG_REG_R1, TCG_REG_R2),
+                       tcg_opc_i12(TCG_REG_P6, OPC_DEP_Z_I12,
+                                   TCG_REG_R2, data_reg, 31, 31),
+                       tcg_opc_i3 (TCG_REG_P6, OPC_MUX1_I3,
+                                   TCG_REG_R2, TCG_REG_R2, 0xb));
+        data_reg = TCG_REG_R2;
+    } else if (opc == 3) {
+        tcg_out_bundle(s, miI,
+                       tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1,
+                                   TCG_REG_R1, TCG_REG_R2),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                       tcg_opc_i3 (TCG_REG_P6, OPC_MUX1_I3,
+                                   TCG_REG_R2, data_reg, 0xb));
+        data_reg = TCG_REG_R2;
+    }
+
+    tcg_out_bundle(s, miB,
+                   tcg_opc_m4 (TCG_REG_P6, opc_st_m4[opc],
+                               data_reg, TCG_REG_R3),
+                   tcg_opc_a5 (TCG_REG_P7, OPC_ADDL_A5, TCG_REG_R58,
+                               mem_index, TCG_REG_R0),
+                   tcg_opc_b5 (TCG_REG_P7, OPC_BR_CALL_SPTK_MANY_B5,
+                               TCG_REG_B0, TCG_REG_B6));
+}
+
+#else /* !CONFIG_SOFTMMU */
+
+static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
+{
+    int addr_reg, data_reg, mem_index, s_bits, bswap;
+    uint64_t opc_ld_m1[4] = { OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1 };
+    uint64_t opc_sxt_i29[8] = { OPC_SXT1_I29, OPC_SXT2_I29, OPC_SXT4_I29, 0 };
+
+    data_reg = *args++;
+    addr_reg = *args++;
+    mem_index = *args;
+    s_bits = opc & 3;
+
+#ifdef TARGET_WORDS_BIGENDIAN
+    bswap = 1;
+#else
+    bswap = 0;
+#endif
+
+    tcg_out_bundle(s, mLX,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_l2 ((tcg_target_long) GUEST_BASE),
+                   tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, TCG_REG_R2,
+                               GUEST_BASE));
+
+#if TARGET_LONG_BITS == 32
+    tcg_out_bundle(s, mII,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29,
+                               TCG_REG_R3, addr_reg),
+                   tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
+                               TCG_REG_R2, TCG_REG_R3));
+
+    if (!bswap || s_bits == 0) {
+        if (s_bits == opc) {
+            tcg_out_bundle(s, miI,
+                           tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
+                                       data_reg, TCG_REG_R2),
+                           tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                           tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+        } else {
+            tcg_out_bundle(s, mII,
+                           tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
+                                       data_reg, TCG_REG_R2),
+                           tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                           tcg_opc_i29(TCG_REG_P0, opc_sxt_i29[s_bits],
+                                       data_reg, data_reg));
+        }
+    } else if (s_bits == 3) {
+            tcg_out_bundle(s, mII,
+                           tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
+                                       data_reg, TCG_REG_R2),
+                           tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                           tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
+                                       data_reg, data_reg, 0xb));
+    } else {
+        if (s_bits == 1) {
+            tcg_out_bundle(s, mII,
+                           tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
+                                       data_reg, TCG_REG_R2),
+                           tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                           tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
+                                      data_reg, data_reg, 15, 15));
+        } else {
+            tcg_out_bundle(s, mII,
+                           tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
+                                       data_reg, TCG_REG_R2),
+                           tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                           tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
+                                      data_reg, data_reg, 31, 31));
+        }
+        if (opc == s_bits) {
+            tcg_out_bundle(s, miI,
+                           tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                           tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                           tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
+                                       data_reg, data_reg, 0xb));
+        } else {
+            tcg_out_bundle(s, mII,
+                           tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                           tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
+                                       data_reg, data_reg, 0xb),
+                           tcg_opc_i29(TCG_REG_P0, opc_sxt_i29[s_bits],
+                                       data_reg, data_reg));
+        }
+    }
+#else
+    tcg_out_bundle(s, MmI,
+                   tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
+                               TCG_REG_R2, addr_reg),
+                   tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
+                               data_reg, TCG_REG_R2),
+                   tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+
+    if (bswap && s_bits == 1) {
+        tcg_out_bundle(s, mII,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
+                                   data_reg, data_reg, 15, 15),
+                       tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
+                                   data_reg, data_reg, 0xb));
+    } else if (bswap && s_bits == 2) {
+        tcg_out_bundle(s, mII,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
+                                   data_reg, data_reg, 31, 31),
+                       tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
+                                   data_reg, data_reg, 0xb));
+    } else if (bswap && s_bits == 3) {
+        tcg_out_bundle(s, miI,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                       tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
+                                   data_reg, data_reg, 0xb));
+    }
+    if (s_bits != opc) {
+        tcg_out_bundle(s, miI,
+                       tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                       tcg_opc_i29(TCG_REG_P0, opc_sxt_i29[s_bits],
+                                   data_reg, data_reg));
+    }
+#endif
+}
+
+static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
+{
+    int addr_reg, data_reg, bswap;
+    uint64_t opc_st_m4[4] = { OPC_ST1_M4, OPC_ST2_M4, OPC_ST4_M4, OPC_ST8_M4 };
+
+    data_reg = *args++;
+    addr_reg = *args++;
+
+#ifdef TARGET_WORDS_BIGENDIAN
+    bswap = 1;
+#else
+    bswap = 0;
+#endif
+
+    tcg_out_bundle(s, mLX,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_l2 ((tcg_target_long) GUEST_BASE),
+                   tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, TCG_REG_R2,
+                               GUEST_BASE));
+
+#if TARGET_LONG_BITS == 32
+    tcg_out_bundle(s, mII,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29,
+                               TCG_REG_R3, addr_reg),
+                   tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
+                               TCG_REG_R2, TCG_REG_R3));
+    if (bswap) {
+        if (opc == 1) {
+            tcg_out_bundle(s, mII,
+                           tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                           tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
+                                       TCG_REG_R3, data_reg, 15, 15),
+                           tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
+                                       TCG_REG_R3, TCG_REG_R3, 0xb));
+            data_reg = TCG_REG_R3;
+        } else if (opc == 2) {
+            tcg_out_bundle(s, mII,
+                           tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                           tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
+                                       TCG_REG_R3, data_reg, 31, 31),
+                           tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
+                                       TCG_REG_R3, TCG_REG_R3, 0xb));
+            data_reg = TCG_REG_R3;
+        } else if (opc == 3) {
+            tcg_out_bundle(s, miI,
+                           tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                           tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                           tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
+                                       TCG_REG_R3, data_reg, 0xb));
+            data_reg = TCG_REG_R3;
+        }
+    }
+    tcg_out_bundle(s, mmI,
+                   tcg_opc_m4 (TCG_REG_P0, opc_st_m4[opc],
+                               data_reg, TCG_REG_R2),
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+#else
+    if (!bswap || opc == 0) {
+        tcg_out_bundle(s, MmI,
+                       tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
+                                   TCG_REG_R2, addr_reg),
+                       tcg_opc_m4 (TCG_REG_P0, opc_st_m4[opc],
+                                   data_reg, TCG_REG_R2),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+    } else {
+        if (opc == 1) {
+            tcg_out_bundle(s, mII,
+                           tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
+                                       TCG_REG_R2, addr_reg),
+                           tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
+                                       TCG_REG_R3, data_reg, 15, 15),
+                           tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
+                                       TCG_REG_R3, TCG_REG_R3, 0xb));
+            data_reg = TCG_REG_R3;
+        } else if (opc == 2) {
+            tcg_out_bundle(s, mII,
+                           tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
+                                       TCG_REG_R2, addr_reg),
+                           tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
+                                       TCG_REG_R3, data_reg, 31, 31),
+                           tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
+                                       TCG_REG_R3, TCG_REG_R3, 0xb));
+            data_reg = TCG_REG_R3;
+        } else if (opc == 3) {
+            tcg_out_bundle(s, miI,
+                           tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
+                                       TCG_REG_R2, addr_reg),
+                           tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                           tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
+                                       TCG_REG_R3, data_reg, 0xb));
+            data_reg = TCG_REG_R3;
+        }
+        tcg_out_bundle(s, miI,
+                       tcg_opc_m4 (TCG_REG_P0, opc_st_m4[opc],
+                                   data_reg, TCG_REG_R2),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+                       tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+    }
+#endif
+}
+
+#endif
+
+static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
+                              const TCGArg *args, const int *const_args)
+{
+    switch(opc) {
+    case INDEX_op_exit_tb:
+        tcg_out_exit_tb(s, args[0]);
+        break;
+    case INDEX_op_br:
+        tcg_out_br(s, args[0]);
+        break;
+    case INDEX_op_call:
+        tcg_out_call(s, args[0]);
+        break;
+    case INDEX_op_goto_tb:
+        tcg_out_goto_tb(s, args[0]);
+        break;
+    case INDEX_op_jmp:
+        tcg_out_jmp(s, args[0]);
+        break;
+
+    case INDEX_op_movi_i32:
+        tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
+        break;
+    case INDEX_op_movi_i64:
+        tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
+        break;
+
+    case INDEX_op_ld8u_i32:
+    case INDEX_op_ld8u_i64:
+        tcg_out_ld_rel(s, OPC_LD1_M1, args[0], args[1], args[2]);
+        break;
+    case INDEX_op_ld8s_i32:
+    case INDEX_op_ld8s_i64:
+        tcg_out_ld_rel(s, OPC_LD1_M1, args[0], args[1], args[2]);
+        tcg_out_ext(s, OPC_SXT1_I29, args[0], args[0]);
+        break;
+    case INDEX_op_ld16u_i32:
+    case INDEX_op_ld16u_i64:
+        tcg_out_ld_rel(s, OPC_LD2_M1, args[0], args[1], args[2]);
+        break;
+    case INDEX_op_ld16s_i32:
+    case INDEX_op_ld16s_i64:
+        tcg_out_ld_rel(s, OPC_LD2_M1, args[0], args[1], args[2]);
+        tcg_out_ext(s, OPC_SXT2_I29, args[0], args[0]);
+        break;
+    case INDEX_op_ld_i32:
+    case INDEX_op_ld32u_i64:
+        tcg_out_ld_rel(s, OPC_LD4_M1, args[0], args[1], args[2]);
+        break;
+    case INDEX_op_ld32s_i64:
+        tcg_out_ld_rel(s, OPC_LD4_M1, args[0], args[1], args[2]);
+        tcg_out_ext(s, OPC_SXT4_I29, args[0], args[0]);
+        break;
+    case INDEX_op_ld_i64:
+        tcg_out_ld_rel(s, OPC_LD8_M1, args[0], args[1], args[2]);
+        break;
+    case INDEX_op_st8_i32:
+    case INDEX_op_st8_i64:
+        tcg_out_st_rel(s, OPC_ST1_M4, args[0], args[1], args[2]);
+        break;
+    case INDEX_op_st16_i32:
+    case INDEX_op_st16_i64:
+        tcg_out_st_rel(s, OPC_ST2_M4, args[0], args[1], args[2]);
+        break;
+    case INDEX_op_st_i32:
+    case INDEX_op_st32_i64:
+        tcg_out_st_rel(s, OPC_ST4_M4, args[0], args[1], args[2]);
+        break;
+    case INDEX_op_st_i64:
+        tcg_out_st_rel(s, OPC_ST8_M4, args[0], args[1], args[2]);
+        break;
+
+    case INDEX_op_add_i32:
+    case INDEX_op_add_i64:
+        tcg_out_alu(s, OPC_ADD_A1, args[0], args[1], const_args[1],
+                    args[2], const_args[2]);
+        break;
+    case INDEX_op_sub_i32:
+    case INDEX_op_sub_i64:
+        tcg_out_alu(s, OPC_SUB_A1, args[0], args[1], const_args[1],
+                    args[2], const_args[2]);
+        break;
+
+    case INDEX_op_and_i32:
+    case INDEX_op_and_i64:
+        tcg_out_alu(s, OPC_AND_A1, args[0], args[1], const_args[1],
+                    args[2], const_args[2]);
+        break;
+    case INDEX_op_andc_i32:
+    case INDEX_op_andc_i64:
+        tcg_out_alu(s, OPC_ANDCM_A1, args[0], args[1], const_args[1],
+                    args[2], const_args[2]);
+        break;
+    case INDEX_op_eqv_i32:
+    case INDEX_op_eqv_i64:
+        tcg_out_eqv(s, args[0], args[1], const_args[1],
+                    args[2], const_args[2]);
+        break;
+    case INDEX_op_nand_i32:
+    case INDEX_op_nand_i64:
+        tcg_out_nand(s, args[0], args[1], const_args[1],
+                     args[2], const_args[2]);
+        break;
+    case INDEX_op_nor_i32:
+    case INDEX_op_nor_i64:
+        tcg_out_nor(s, args[0], args[1], const_args[1],
+                    args[2], const_args[2]);
+        break;
+    case INDEX_op_or_i32:
+    case INDEX_op_or_i64:
+        tcg_out_alu(s, OPC_OR_A1, args[0], args[1], const_args[1],
+                    args[2], const_args[2]);
+        break;
+    case INDEX_op_orc_i32:
+    case INDEX_op_orc_i64:
+        tcg_out_orc(s, args[0], args[1], const_args[1],
+                    args[2], const_args[2]);
+        break;
+    case INDEX_op_xor_i32:
+    case INDEX_op_xor_i64:
+        tcg_out_alu(s, OPC_XOR_A1, args[0], args[1], const_args[1],
+                    args[2], const_args[2]);
+        break;
+
+    case INDEX_op_mul_i32:
+    case INDEX_op_mul_i64:
+        tcg_out_mul(s, args[0], args[1], args[2]);
+        break;
+
+    case INDEX_op_sar_i32:
+        tcg_out_sar_i32(s, args[0], args[1], args[2], const_args[2]);
+        break;
+    case INDEX_op_sar_i64:
+        tcg_out_sar_i64(s, args[0], args[1], args[2], const_args[2]);
+        break;
+    case INDEX_op_shl_i32:
+        tcg_out_shl_i32(s, args[0], args[1], args[2], const_args[2]);
+        break;
+    case INDEX_op_shl_i64:
+        tcg_out_shl_i64(s, args[0], args[1], args[2], const_args[2]);
+        break;
+    case INDEX_op_shr_i32:
+        tcg_out_shr_i32(s, args[0], args[1], args[2], const_args[2]);
+        break;
+    case INDEX_op_shr_i64:
+        tcg_out_shr_i64(s, args[0], args[1], args[2], const_args[2]);
+        break;
+    case INDEX_op_rotl_i32:
+        tcg_out_rotl_i32(s, args[0], args[1], args[2], const_args[2]);
+        break;
+    case INDEX_op_rotl_i64:
+        tcg_out_rotl_i64(s, args[0], args[1], args[2], const_args[2]);
+        break;
+    case INDEX_op_rotr_i32:
+        tcg_out_rotr_i32(s, args[0], args[1], args[2], const_args[2]);
+        break;
+    case INDEX_op_rotr_i64:
+        tcg_out_rotr_i64(s, args[0], args[1], args[2], const_args[2]);
+        break;
+
+    case INDEX_op_ext8s_i32:
+    case INDEX_op_ext8s_i64:
+        tcg_out_ext(s, OPC_SXT1_I29, args[0], args[1]);
+        break;
+    case INDEX_op_ext8u_i32:
+    case INDEX_op_ext8u_i64:
+        tcg_out_ext(s, OPC_ZXT1_I29, args[0], args[1]);
+        break;
+    case INDEX_op_ext16s_i32:
+    case INDEX_op_ext16s_i64:
+        tcg_out_ext(s, OPC_SXT2_I29, args[0], args[1]);
+        break;
+    case INDEX_op_ext16u_i32:
+    case INDEX_op_ext16u_i64:
+        tcg_out_ext(s, OPC_ZXT2_I29, args[0], args[1]);
+        break;
+    case INDEX_op_ext32s_i64:
+        tcg_out_ext(s, OPC_SXT4_I29, args[0], args[1]);
+        break;
+    case INDEX_op_ext32u_i64:
+        tcg_out_ext(s, OPC_ZXT4_I29, args[0], args[1]);
+        break;
+
+    case INDEX_op_bswap16_i32:
+    case INDEX_op_bswap16_i64:
+        tcg_out_bswap16(s, args[0], args[1]);
+        break;
+    case INDEX_op_bswap32_i32:
+    case INDEX_op_bswap32_i64:
+        tcg_out_bswap32(s, args[0], args[1]);
+        break;
+    case INDEX_op_bswap64_i64:
+        tcg_out_bswap64(s, args[0], args[1]);
+        break;
+
+    case INDEX_op_brcond_i32:
+        tcg_out_brcond(s, args[2], args[0], const_args[0],
+                       args[1], const_args[1], args[3], 1);
+        break;
+    case INDEX_op_brcond_i64:
+        tcg_out_brcond(s, args[2], args[0], const_args[0],
+                       args[1], const_args[1], args[3], 0);
+        break;
+    case INDEX_op_setcond_i32:
+        tcg_out_setcond(s, args[3], args[0], args[1], args[2], 1);
+        break;
+    case INDEX_op_setcond_i64:
+        tcg_out_setcond(s, args[3], args[0], args[1], args[2], 0);
+        break;
+
+    case INDEX_op_qemu_ld8u:
+        tcg_out_qemu_ld(s, args, 0);
+        break;
+    case INDEX_op_qemu_ld8s:
+        tcg_out_qemu_ld(s, args, 0 | 4);
+        break;
+    case INDEX_op_qemu_ld16u:
+        tcg_out_qemu_ld(s, args, 1);
+        break;
+    case INDEX_op_qemu_ld16s:
+        tcg_out_qemu_ld(s, args, 1 | 4);
+        break;
+    case INDEX_op_qemu_ld32u:
+        tcg_out_qemu_ld(s, args, 2);
+        break;
+    case INDEX_op_qemu_ld32s:
+        tcg_out_qemu_ld(s, args, 2 | 4);
+        break;
+    case INDEX_op_qemu_ld64:
+        tcg_out_qemu_ld(s, args, 3);
+        break;
+
+    case INDEX_op_qemu_st8:
+        tcg_out_qemu_st(s, args, 0);
+        break;
+    case INDEX_op_qemu_st16:
+        tcg_out_qemu_st(s, args, 1);
+        break;
+    case INDEX_op_qemu_st32:
+        tcg_out_qemu_st(s, args, 2);
+        break;
+    case INDEX_op_qemu_st64:
+        tcg_out_qemu_st(s, args, 3);
+        break;
+
+    default:
+        tcg_abort();
+    }
+}
+
+static const TCGTargetOpDef ia64_op_defs[] = {
+    { INDEX_op_br, { } },
+    { INDEX_op_call, { "r" } },
+    { INDEX_op_exit_tb, { } },
+    { INDEX_op_goto_tb, { } },
+    { INDEX_op_jmp, { "r" } },
+
+    { INDEX_op_mov_i32, { "r", "r" } },
+    { INDEX_op_movi_i32, { "r" } },
+
+    { INDEX_op_ld8u_i32, { "r", "r" } },
+    { INDEX_op_ld8s_i32, { "r", "r" } },
+    { INDEX_op_ld16u_i32, { "r", "r" } },
+    { INDEX_op_ld16s_i32, { "r", "r" } },
+    { INDEX_op_ld_i32, { "r", "r" } },
+    { INDEX_op_st8_i32, { "rZ", "r" } },
+    { INDEX_op_st16_i32, { "rZ", "r" } },
+    { INDEX_op_st_i32, { "rZ", "r" } },
+
+    { INDEX_op_add_i32, { "r", "rI", "rI" } },
+    { INDEX_op_sub_i32, { "r", "rI", "rI" } },
+
+    { INDEX_op_and_i32, { "r", "rI", "rI" } },
+    { INDEX_op_andc_i32, { "r", "rI", "rI" } },
+    { INDEX_op_eqv_i32, { "r", "rZ", "rZ" } },
+    { INDEX_op_nand_i32, { "r", "rZ", "rZ" } },
+    { INDEX_op_nor_i32, { "r", "rZ", "rZ" } },
+    { INDEX_op_or_i32, { "r", "rI", "rI" } },
+    { INDEX_op_orc_i32, { "r", "rZ", "rZ" } },
+    { INDEX_op_xor_i32, { "r", "rI", "rI" } },
+
+    { INDEX_op_mul_i32, { "r", "rZ", "rZ" } },
+
+    { INDEX_op_sar_i32, { "r", "rZ", "ri" } },
+    { INDEX_op_shl_i32, { "r", "rZ", "ri" } },
+    { INDEX_op_shr_i32, { "r", "rZ", "ri" } },
+    { INDEX_op_rotl_i32, { "r", "rZ", "ri" } },
+    { INDEX_op_rotr_i32, { "r", "rZ", "ri" } },
+
+    { INDEX_op_ext8s_i32, { "r", "rZ"} },
+    { INDEX_op_ext8u_i32, { "r", "rZ"} },
+    { INDEX_op_ext16s_i32, { "r", "rZ"} },
+    { INDEX_op_ext16u_i32, { "r", "rZ"} },
+
+    { INDEX_op_bswap16_i32, { "r", "rZ" } },
+    { INDEX_op_bswap32_i32, { "r", "rZ" } },
+
+    { INDEX_op_brcond_i32, { "rI", "rI" } },
+    { INDEX_op_setcond_i32, { "r", "rZ", "rZ" } },
+
+    { INDEX_op_mov_i64, { "r", "r" } },
+    { INDEX_op_movi_i64, { "r" } },
+
+    { INDEX_op_ld8u_i64, { "r", "r" } },
+    { INDEX_op_ld8s_i64, { "r", "r" } },
+    { INDEX_op_ld16u_i64, { "r", "r" } },
+    { INDEX_op_ld16s_i64, { "r", "r" } },
+    { INDEX_op_ld32u_i64, { "r", "r" } },
+    { INDEX_op_ld32s_i64, { "r", "r" } },
+    { INDEX_op_ld_i64, { "r", "r" } },
+    { INDEX_op_st8_i64, { "rZ", "r" } },
+    { INDEX_op_st16_i64, { "rZ", "r" } },
+    { INDEX_op_st32_i64, { "rZ", "r" } },
+    { INDEX_op_st_i64, { "rZ", "r" } },
+
+    { INDEX_op_add_i64, { "r", "rI", "rI" } },
+    { INDEX_op_sub_i64, { "r", "rI", "rI" } },
+
+    { INDEX_op_and_i64, { "r", "rI", "rI" } },
+    { INDEX_op_andc_i64, { "r", "rI", "rI" } },
+    { INDEX_op_eqv_i64, { "r", "rZ", "rZ" } },
+    { INDEX_op_nand_i64, { "r", "rZ", "rZ" } },
+    { INDEX_op_nor_i64, { "r", "rZ", "rZ" } },
+    { INDEX_op_or_i64, { "r", "rI", "rI" } },
+    { INDEX_op_orc_i64, { "r", "rZ", "rZ" } },
+    { INDEX_op_xor_i64, { "r", "rI", "rI" } },
+
+    { INDEX_op_mul_i64, { "r", "rZ", "rZ" } },
+
+    { INDEX_op_sar_i64, { "r", "rZ", "ri" } },
+    { INDEX_op_shl_i64, { "r", "rZ", "ri" } },
+    { INDEX_op_shr_i64, { "r", "rZ", "ri" } },
+    { INDEX_op_rotl_i64, { "r", "rZ", "ri" } },
+    { INDEX_op_rotr_i64, { "r", "rZ", "ri" } },
+
+    { INDEX_op_ext8s_i64, { "r", "rZ"} },
+    { INDEX_op_ext8u_i64, { "r", "rZ"} },
+    { INDEX_op_ext16s_i64, { "r", "rZ"} },
+    { INDEX_op_ext16u_i64, { "r", "rZ"} },
+    { INDEX_op_ext32s_i64, { "r", "rZ"} },
+    { INDEX_op_ext32u_i64, { "r", "rZ"} },
+
+    { INDEX_op_bswap16_i64, { "r", "rZ" } },
+    { INDEX_op_bswap32_i64, { "r", "rZ" } },
+    { INDEX_op_bswap64_i64, { "r", "rZ" } },
+
+    { INDEX_op_brcond_i64, { "rI", "rI" } },
+    { INDEX_op_setcond_i64, { "r", "rZ", "rZ" } },
+
+    { INDEX_op_qemu_ld8u, { "r", "r" } },
+    { INDEX_op_qemu_ld8s, { "r", "r" } },
+    { INDEX_op_qemu_ld16u, { "r", "r" } },
+    { INDEX_op_qemu_ld16s, { "r", "r" } },
+    { INDEX_op_qemu_ld32, { "r", "r" } },
+    { INDEX_op_qemu_ld32u, { "r", "r" } },
+    { INDEX_op_qemu_ld32s, { "r", "r" } },
+    { INDEX_op_qemu_ld64, { "r", "r" } },
+
+    { INDEX_op_qemu_st8, { "SZ", "r" } },
+    { INDEX_op_qemu_st16, { "SZ", "r" } },
+    { INDEX_op_qemu_st32, { "SZ", "r" } },
+    { INDEX_op_qemu_st64, { "SZ", "r" } },
+
+    { -1 },
+};
+
+/* Generate global QEMU prologue and epilogue code */
+void tcg_target_qemu_prologue(TCGContext *s)
+{
+    int frame_size;
+
+    /* reserve some stack space */
+    frame_size = TCG_STATIC_CALL_ARGS_SIZE;
+    frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
+                 ~(TCG_TARGET_STACK_ALIGN - 1);
+
+    /* First emit adhoc function descriptor */
+    *(uint64_t *)(s->code_ptr) = (uint64_t)s->code_ptr + 16; /* entry point */
+    s->code_ptr += 16; /* skip GP */
+
+    /* prologue */
+    tcg_out_bundle(s, mII,
+                   tcg_opc_m34(TCG_REG_P0, OPC_ALLOC_M34,
+                               TCG_REG_R33, 32, 24, 0),
+                   tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21,
+                               TCG_REG_B6, TCG_REG_R32, 0),
+                   tcg_opc_i22(TCG_REG_P0, OPC_MOV_I22,
+                               TCG_REG_R32, TCG_REG_B0));
+    tcg_out_bundle(s, miB,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_a4 (TCG_REG_P0, OPC_ADDS_A4,
+                               TCG_REG_R12, -frame_size, TCG_REG_R12),
+                   tcg_opc_b4 (TCG_REG_P0, OPC_BR_SPTK_MANY_B4, TCG_REG_B6));
+
+    /* epilogue */
+    tb_ret_addr = s->code_ptr;
+    tcg_out_bundle(s, miI,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21,
+                               TCG_REG_B0, TCG_REG_R32, 0),
+                   tcg_opc_a4 (TCG_REG_P0, OPC_ADDS_A4,
+                               TCG_REG_R12, frame_size, TCG_REG_R12));
+    tcg_out_bundle(s, miB,
+                   tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+                   tcg_opc_i26(TCG_REG_P0, OPC_MOV_I_I26,
+                               TCG_REG_PFS, TCG_REG_R33),
+                   tcg_opc_b4 (TCG_REG_P0, OPC_BR_RET_SPTK_MANY_B4,
+                               TCG_REG_B0));
+}
+
+void tcg_target_init(TCGContext *s)
+{
+    tcg_regset_set(tcg_target_available_regs[TCG_TYPE_I32],
+                   0xffffffffffffffffull);
+    tcg_regset_set(tcg_target_available_regs[TCG_TYPE_I64],
+                   0xffffffffffffffffull);
+    tcg_regset_set(tcg_target_call_clobber_regs,
+                   (1 << TCG_REG_R8)  |
+                   (1 << TCG_REG_R9)  |
+                   (1 << TCG_REG_R10) |
+                   (1 << TCG_REG_R11) |
+                   (1 << TCG_REG_R13) |
+                   (1 << TCG_REG_R14) |
+                   (1 << TCG_REG_R15) |
+                   (1 << TCG_REG_R16) |
+                   (1 << TCG_REG_R17) |
+                   (1 << TCG_REG_R18) |
+                   (1 << TCG_REG_R19) |
+                   (1 << TCG_REG_R20) |
+                   (1 << TCG_REG_R21) |
+                   (1 << TCG_REG_R22) |
+                   (1 << TCG_REG_R23) |
+                   (1 << TCG_REG_R24) |
+                   (1 << TCG_REG_R25) |
+                   (1 << TCG_REG_R26) |
+                   (1 << TCG_REG_R27) |
+                   (1 << TCG_REG_R28) |
+                   (1 << TCG_REG_R29) |
+                   (1 << TCG_REG_R30) |
+                   (1 << TCG_REG_R31));
+    tcg_regset_clear(s->reserved_regs);
+
+    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);   /* zero register */
+    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1);   /* global pointer */
+    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2);   /* internal use */
+    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3);   /* internal use */
+    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R12);  /* stack pointer */
+    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R32);  /* return address */
+    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R33);  /* PFS */
+
+    tcg_add_target_add_op_defs(ia64_op_defs);
+}
diff --git a/tcg/ia64/tcg-target.h b/tcg/ia64/tcg-target.h
new file mode 100644
index 0000000..e56e88f
--- /dev/null
+++ b/tcg/ia64/tcg-target.h
@@ -0,0 +1,156 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2009-2010 Aurelien Jarno <aurelien@aurel32.net>
+ * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#define TCG_TARGET_IA64 1
+
+#define TCG_TARGET_REG_BITS 64
+
+/* We only map the first 64 registers */
+#define TCG_TARGET_NB_REGS 64
+enum {
+    TCG_REG_R0 = 0,
+    TCG_REG_R1,
+    TCG_REG_R2,
+    TCG_REG_R3,
+    TCG_REG_R4,
+    TCG_REG_R5,
+    TCG_REG_R6,
+    TCG_REG_R7,
+    TCG_REG_R8,
+    TCG_REG_R9,
+    TCG_REG_R10,
+    TCG_REG_R11,
+    TCG_REG_R12,
+    TCG_REG_R13,
+    TCG_REG_R14,
+    TCG_REG_R15,
+    TCG_REG_R16,
+    TCG_REG_R17,
+    TCG_REG_R18,
+    TCG_REG_R19,
+    TCG_REG_R20,
+    TCG_REG_R21,
+    TCG_REG_R22,
+    TCG_REG_R23,
+    TCG_REG_R24,
+    TCG_REG_R25,
+    TCG_REG_R26,
+    TCG_REG_R27,
+    TCG_REG_R28,
+    TCG_REG_R29,
+    TCG_REG_R30,
+    TCG_REG_R31,
+    TCG_REG_R32,
+    TCG_REG_R33,
+    TCG_REG_R34,
+    TCG_REG_R35,
+    TCG_REG_R36,
+    TCG_REG_R37,
+    TCG_REG_R38,
+    TCG_REG_R39,
+    TCG_REG_R40,
+    TCG_REG_R41,
+    TCG_REG_R42,
+    TCG_REG_R43,
+    TCG_REG_R44,
+    TCG_REG_R45,
+    TCG_REG_R46,
+    TCG_REG_R47,
+    TCG_REG_R48,
+    TCG_REG_R49,
+    TCG_REG_R50,
+    TCG_REG_R51,
+    TCG_REG_R52,
+    TCG_REG_R53,
+    TCG_REG_R54,
+    TCG_REG_R55,
+    TCG_REG_R56,
+    TCG_REG_R57,
+    TCG_REG_R58,
+    TCG_REG_R59,
+    TCG_REG_R60,
+    TCG_REG_R61,
+    TCG_REG_R62,
+    TCG_REG_R63,
+};
+
+#define TCG_CT_CONST_ZERO 0x100
+#define TCG_CT_CONST_S22 0x200
+
+/* used for function call generation */
+#define TCG_REG_CALL_STACK TCG_REG_R12
+#define TCG_TARGET_STACK_ALIGN 16
+#define TCG_TARGET_CALL_STACK_OFFSET 16
+
+/* optional instructions */
+#define TCG_TARGET_HAS_andc_i32
+#define TCG_TARGET_HAS_andc_i64
+#define TCG_TARGET_HAS_bswap16_i32
+#define TCG_TARGET_HAS_bswap16_i64
+#define TCG_TARGET_HAS_bswap32_i32
+#define TCG_TARGET_HAS_bswap32_i64
+#define TCG_TARGET_HAS_bswap64_i64
+#define TCG_TARGET_HAS_eqv_i32
+#define TCG_TARGET_HAS_eqv_i64
+#define TCG_TARGET_HAS_ext8s_i32
+#define TCG_TARGET_HAS_ext16s_i32
+#define TCG_TARGET_HAS_ext8s_i64
+#define TCG_TARGET_HAS_ext16s_i64
+#define TCG_TARGET_HAS_ext32s_i64
+#define TCG_TARGET_HAS_ext8u_i32
+#define TCG_TARGET_HAS_ext16u_i32
+#define TCG_TARGET_HAS_ext8u_i64
+#define TCG_TARGET_HAS_ext16u_i64
+#define TCG_TARGET_HAS_ext32u_i64
+#define TCG_TARGET_HAS_nand_i32
+#define TCG_TARGET_HAS_nand_i64
+#define TCG_TARGET_HAS_nor_i32
+#define TCG_TARGET_HAS_nor_i64
+#define TCG_TARGET_HAS_orc_i32
+#define TCG_TARGET_HAS_orc_i64
+#define TCG_TARGET_HAS_rot_i32
+#define TCG_TARGET_HAS_rot_i64
+
+/* optional instructions automatically implemented */
+#undef TCG_TARGET_HAS_neg_i32   /* sub r1, r0, r3 */
+#undef TCG_TARGET_HAS_neg_i64   /* sub r1, r0, r3 */
+#undef TCG_TARGET_HAS_not_i32   /* xor r1, -1, r3 */
+#undef TCG_TARGET_HAS_not_i64   /* xor r1, -1, r3 */
+
+/* Note: must be synced with dyngen-exec.h */
+#define TCG_AREG0 TCG_REG_R7
+
+/* Guest base is supported */
+#define TCG_TARGET_HAS_GUEST_BASE
+
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+    start = start & ~(32UL - 1UL);
+    stop = (stop + (32UL - 1UL)) & ~(32UL - 1UL);
+
+    for (; start < stop; start += 32UL) {
+        asm volatile ("fc.i %0" :: "r" (start));
+    }
+    asm volatile (";;sync.i;;srlz.i;;");
+}
-- 
1.7.0.2

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH 3/5] ia64 disas support
  2010-03-29  0:25 [Qemu-devel] ia64 support Aurelien Jarno
                   ` (3 preceding siblings ...)
  2010-03-29  0:25 ` [Qemu-devel] [PATCH 5/5] tcg: initial ia64 support Aurelien Jarno
@ 2010-03-29  0:35 ` Aurelien Jarno
  4 siblings, 0 replies; 10+ messages in thread
From: Aurelien Jarno @ 2010-03-29  0:35 UTC (permalink / raw)
  To: qemu-devel; +Cc: Aurelien Jarno

[-- Attachment #1: Type: text/plain, Size: 480 bytes --]

Taken from binutils SVN, using last GPLv2 version.

Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
---
 Makefile.objs |    1 +
 configure     |    4 +
 dis-asm.h     |    5 +
 disas.c       |   17 +
 ia64-dis.c    |10598 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 5 files changed, 10625 insertions(+), 0 deletions(-)
 create mode 100644 ia64-dis.c

See attached gzipped file. Sorry for the inconvenience, but the message
size on the mailing list is limited.

[-- Attachment #2: 0003-ia64-disas-support.patch.gz --]
[-- Type: application/octet-stream, Size: 65642 bytes --]

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [Qemu-devel] Re: [PATCH 1/5] linux-user/ia64: workaround ia64 strangenesses
  2010-03-29  0:25 ` [Qemu-devel] [PATCH 1/5] linux-user/ia64: workaround ia64 strangenesses Aurelien Jarno
@ 2010-03-29  9:36   ` Paolo Bonzini
  2010-03-29 16:25     ` Aurelien Jarno
  0 siblings, 1 reply; 10+ messages in thread
From: Paolo Bonzini @ 2010-03-29  9:36 UTC (permalink / raw)
  To: Aurelien Jarno; +Cc: qemu-devel


> +#ifdef __ia64
> +        sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
> +#else
>           sigprocmask(SIG_SETMASK,&uc->uc_sigmask, NULL);
> +#endif

Any reason for the ifdef?

Paolo

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [Qemu-devel] Re: [PATCH 1/5] linux-user/ia64: workaround ia64 strangenesses
  2010-03-29  9:36   ` [Qemu-devel] " Paolo Bonzini
@ 2010-03-29 16:25     ` Aurelien Jarno
  2010-03-30  0:00       ` Jamie Lokier
  0 siblings, 1 reply; 10+ messages in thread
From: Aurelien Jarno @ 2010-03-29 16:25 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: qemu-devel

On Mon, Mar 29, 2010 at 11:36:50AM +0200, Paolo Bonzini wrote:
>
>> +#ifdef __ia64
>> +        sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
>> +#else
>>           sigprocmask(SIG_SETMASK,&uc->uc_sigmask, NULL);
>> +#endif
>
> Any reason for the ifdef?
>

It is not strictly needed, as all architectures can cope with the ia64
version. I have added it to make sure that a new architecture triggers a
warning if uc->uc_sigmask is not of type sigset_t, so that a human can
verify the cast is correct.

That said, I am fine removing the #ifdef if we consider this is unlikely
to happen.

-- 
Aurelien Jarno	                        GPG: 1024D/F1BCDB73
aurelien@aurel32.net                 http://www.aurel32.net

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [Qemu-devel] Re: [PATCH 1/5] linux-user/ia64: workaround ia64 strangenesses
  2010-03-29 16:25     ` Aurelien Jarno
@ 2010-03-30  0:00       ` Jamie Lokier
  2010-03-30  5:04         ` Aurelien Jarno
  0 siblings, 1 reply; 10+ messages in thread
From: Jamie Lokier @ 2010-03-30  0:00 UTC (permalink / raw)
  To: Aurelien Jarno; +Cc: Paolo Bonzini, qemu-devel

Aurelien Jarno wrote:
> On Mon, Mar 29, 2010 at 11:36:50AM +0200, Paolo Bonzini wrote:
> >
> >> +#ifdef __ia64
> >> +        sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
> >> +#else
> >>           sigprocmask(SIG_SETMASK,&uc->uc_sigmask, NULL);
> >> +#endif
> >
> > Any reason for the ifdef?
> >
> 
> It is not strictly needed, as all architectures can cope with the ia64
> version. I have added it to make sure that a new architecture triggers a
> warning if uc->uc_sigmask is not of type sigset_t, so that a human can
> verify the cast is correct.

What type is the ia64 uc_sigmask, if not sigset_t?
A git grep of the kernel found only:

  arch/ia64/include/asm/ucontext.h:#define uc_sigmask uc_mcontext.sc_sigmask

with sc_sigmask not defined anywhere.  The uc_mcontext.sc_mask field,
though, is a sigset_t.

-- Jamie

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [Qemu-devel] Re: [PATCH 1/5] linux-user/ia64: workaround ia64 strangenesses
  2010-03-30  0:00       ` Jamie Lokier
@ 2010-03-30  5:04         ` Aurelien Jarno
  0 siblings, 0 replies; 10+ messages in thread
From: Aurelien Jarno @ 2010-03-30  5:04 UTC (permalink / raw)
  To: Jamie Lokier; +Cc: Paolo Bonzini, qemu-devel

On Tue, Mar 30, 2010 at 01:00:39AM +0100, Jamie Lokier wrote:
> Aurelien Jarno wrote:
> > On Mon, Mar 29, 2010 at 11:36:50AM +0200, Paolo Bonzini wrote:
> > >
> > >> +#ifdef __ia64
> > >> +        sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
> > >> +#else
> > >>           sigprocmask(SIG_SETMASK,&uc->uc_sigmask, NULL);
> > >> +#endif
> > >
> > > Any reason for the ifdef?
> > >
> > 
> > It is not strictly needed, as all architectures can cope with the ia64
> > version. I have added it to make sure that a new architecture triggers a
> > warning if uc->uc_sigmask is not of type sigset_t, so that a human can
> > verify the cast is correct.
> 
> What type is the ia64 uc_sigmask, if not sigset_t?

It is defined as a unsigned long int. From <bits/sigcontext.h>:

|  /* sc_mask is actually an sigset_t but we don't want to
|   * include the kernel headers here. */
|  unsigned long int sc_mask;    /* signal mask to restore after handler returns */


-- 
Aurelien Jarno	                        GPG: 1024D/F1BCDB73
aurelien@aurel32.net                 http://www.aurel32.net

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2010-03-30  5:04 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-03-29  0:25 [Qemu-devel] ia64 support Aurelien Jarno
2010-03-29  0:25 ` [Qemu-devel] [PATCH 1/5] linux-user/ia64: workaround ia64 strangenesses Aurelien Jarno
2010-03-29  9:36   ` [Qemu-devel] " Paolo Bonzini
2010-03-29 16:25     ` Aurelien Jarno
2010-03-30  0:00       ` Jamie Lokier
2010-03-30  5:04         ` Aurelien Jarno
2010-03-29  0:25 ` [Qemu-devel] [PATCH 2/5] linux-user: fix page_unprotect when host page size > target page size Aurelien Jarno
2010-03-29  0:25 ` [Qemu-devel] [PATCH 4/5] tcg: align static_code_gen_buffer to CODE_GEN_ALIGN Aurelien Jarno
2010-03-29  0:25 ` [Qemu-devel] [PATCH 5/5] tcg: initial ia64 support Aurelien Jarno
2010-03-29  0:35 ` [Qemu-devel] [PATCH 3/5] ia64 disas support Aurelien Jarno

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).