From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Subject: [PULL 06/85] target/hppa: Populate an interval tree with valid tlb entries
Date: Mon, 6 Nov 2023 19:02:48 -0800 [thread overview]
Message-ID: <20231107030407.8979-7-richard.henderson@linaro.org> (raw)
In-Reply-To: <20231107030407.8979-1-richard.henderson@linaro.org>
Complete the data structure conversion started earlier. This reduces
the perf overhead of hppa_get_physical_address from ~5% to ~0.25%.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/hppa/cpu.h | 24 +++++-
target/hppa/cpu.c | 2 +
target/hppa/machine.c | 51 ++++++++++++-
target/hppa/mem_helper.c | 161 +++++++++++++++++++++++----------------
4 files changed, 167 insertions(+), 71 deletions(-)
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
index 1480d0237a..08de894393 100644
--- a/target/hppa/cpu.h
+++ b/target/hppa/cpu.h
@@ -176,7 +176,10 @@ typedef int64_t target_sreg;
#endif
typedef struct HPPATLBEntry {
- IntervalTreeNode itree;
+ union {
+ IntervalTreeNode itree;
+ struct HPPATLBEntry *unused_next;
+ };
target_ureg pa;
@@ -234,10 +237,22 @@ typedef struct CPUArchState {
#define HPPA_TLB_ENTRIES 256
#define HPPA_BTLB_ENTRIES (HPPA_BTLB_FIXED + HPPA_BTLB_VARIABLE)
- /* ??? Implement a unified itlb/dtlb for the moment. */
- /* ??? We should use a more intelligent data structure. */
- HPPATLBEntry tlb[HPPA_TLB_ENTRIES];
+ /* Index for round-robin tlb eviction. */
uint32_t tlb_last;
+
+ /*
+ * For pa1.x, the partial initialized, still invalid tlb entry
+ * which has had ITLBA performed, but not yet ITLBP.
+ */
+ HPPATLBEntry *tlb_partial;
+
+ /* Linked list of all invalid (unused) tlb entries. */
+ HPPATLBEntry *tlb_unused;
+
+ /* Root of the search tree for all valid tlb entries. */
+ IntervalTreeRoot tlb_root;
+
+ HPPATLBEntry tlb[HPPA_TLB_ENTRIES];
} CPUHPPAState;
/**
@@ -356,6 +371,7 @@ int hppa_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void hppa_cpu_dump_state(CPUState *cs, FILE *f, int);
#ifndef CONFIG_USER_ONLY
+void hppa_ptlbe(CPUHPPAState *env);
hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr);
bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
index 1644297bf8..5e1240c631 100644
--- a/target/hppa/cpu.c
+++ b/target/hppa/cpu.c
@@ -137,8 +137,10 @@ static void hppa_cpu_realizefn(DeviceState *dev, Error **errp)
#ifndef CONFIG_USER_ONLY
{
HPPACPU *cpu = HPPA_CPU(cs);
+
cpu->alarm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
hppa_cpu_alarm_timer, cpu);
+ hppa_ptlbe(&cpu->env);
}
#endif
}
diff --git a/target/hppa/machine.c b/target/hppa/machine.c
index 4535195ca2..ab3e8c81fa 100644
--- a/target/hppa/machine.c
+++ b/target/hppa/machine.c
@@ -72,8 +72,6 @@ static int get_tlb(QEMUFile *f, void *opaque, size_t size,
HPPATLBEntry *ent = opaque;
uint32_t val;
- memset(ent, 0, sizeof(*ent));
-
ent->itree.start = qemu_get_be64(f);
ent->pa = qemu_get_betr(f);
val = qemu_get_be32(f);
@@ -122,6 +120,53 @@ static const VMStateInfo vmstate_tlb = {
.put = put_tlb,
};
+static int tlb_pre_load(void *opaque)
+{
+ CPUHPPAState *env = opaque;
+
+ /*
+ * Zap the entire tlb, on-the-side data structures and all.
+ * Each tlb entry will have data re-filled by put_tlb.
+ */
+ memset(env->tlb, 0, sizeof(env->tlb));
+ memset(&env->tlb_root, 0, sizeof(env->tlb_root));
+ env->tlb_unused = NULL;
+ env->tlb_partial = NULL;
+
+ return 0;
+}
+
+static int tlb_post_load(void *opaque, int version_id)
+{
+ CPUHPPAState *env = opaque;
+ HPPATLBEntry **unused = &env->tlb_unused;
+ HPPATLBEntry *partial = NULL;
+
+ /*
+ * Re-create the interval tree from the valid entries.
+ * Truely invalid entries should have start == end == 0.
+ * Otherwise it should be the in-flight tlb_partial entry.
+ */
+ for (uint32_t i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
+ HPPATLBEntry *e = &env->tlb[i];
+
+ if (e->entry_valid) {
+ interval_tree_insert(&e->itree, &env->tlb_root);
+ } else if (i < HPPA_BTLB_ENTRIES) {
+ /* btlb not in unused list */
+ } else if (partial == NULL && e->itree.start < e->itree.last) {
+ partial = e;
+ } else {
+ *unused = e;
+ unused = &e->unused_next;
+ }
+ }
+ env->tlb_partial = partial;
+ *unused = NULL;
+
+ return 0;
+}
+
static VMStateField vmstate_env_fields[] = {
VMSTATE_UINTTR_ARRAY(gr, CPUHPPAState, 32),
VMSTATE_UINT64_ARRAY(fr, CPUHPPAState, 32),
@@ -164,6 +209,8 @@ static const VMStateDescription vmstate_env = {
.version_id = 1,
.minimum_version_id = 1,
.fields = vmstate_env_fields,
+ .pre_load = tlb_pre_load,
+ .post_load = tlb_post_load,
};
static VMStateField vmstate_cpu_fields[] = {
diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c
index 828cceb29c..b1773ece61 100644
--- a/target/hppa/mem_helper.c
+++ b/target/hppa/mem_helper.c
@@ -27,16 +27,13 @@
static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
{
- int i;
+ IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr);
- for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
- HPPATLBEntry *ent = &env->tlb[i];
- if (ent->itree.start <= addr && addr <= ent->itree.last) {
- trace_hppa_tlb_find_entry(env, ent + i, ent->entry_valid,
- ent->itree.start, ent->itree.last,
- ent->pa);
- return ent;
- }
+ if (i) {
+ HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
+ trace_hppa_tlb_find_entry(env, ent, ent->entry_valid,
+ ent->itree.start, ent->itree.last, ent->pa);
+ return ent;
}
trace_hppa_tlb_find_entry_not_found(env, addr);
return NULL;
@@ -46,6 +43,7 @@ static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent,
bool force_flush_btlb)
{
CPUState *cs = env_cpu(env);
+ bool is_btlb;
if (!ent->entry_valid) {
return;
@@ -58,50 +56,55 @@ static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent,
ent->itree.last - ent->itree.start + 1,
HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
- /* never clear BTLBs, unless forced to do so. */
- if (ent < &env->tlb[HPPA_BTLB_ENTRIES] && !force_flush_btlb) {
+ /* Never clear BTLBs, unless forced to do so. */
+ is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES];
+ if (is_btlb && !force_flush_btlb) {
return;
}
+ interval_tree_remove(&ent->itree, &env->tlb_root);
memset(ent, 0, sizeof(*ent));
- ent->itree.start = -1;
+
+ if (!is_btlb) {
+ ent->unused_next = env->tlb_unused;
+ env->tlb_unused = ent;
+ }
}
-static HPPATLBEntry *hppa_flush_tlb_range(CPUHPPAState *env,
- vaddr va_b, vaddr va_e)
+static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e)
{
- HPPATLBEntry *empty = NULL;
+ IntervalTreeNode *i, *n;
- /* Zap any old entries covering ADDR; notice empty entries on the way. */
- for (int i = HPPA_BTLB_ENTRIES; i < ARRAY_SIZE(env->tlb); ++i) {
- HPPATLBEntry *ent = &env->tlb[i];
+ i = interval_tree_iter_first(&env->tlb_root, va_b, va_e);
+ for (; i ; i = n) {
+ HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
- if (!ent->entry_valid) {
- empty = ent;
- } else if (va_e >= ent->itree.start && va_b <= ent->itree.last) {
- hppa_flush_tlb_ent(env, ent, false);
- empty = ent;
- }
+ /*
+ * Find the next entry now: In the normal case the current entry
+ * will be removed, but in the BTLB case it will remain.
+ */
+ n = interval_tree_iter_next(i, va_b, va_e);
+ hppa_flush_tlb_ent(env, ent, false);
}
- return empty;
}
static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env)
{
- HPPATLBEntry *ent;
- uint32_t i;
+ HPPATLBEntry *ent = env->tlb_unused;
- if (env->tlb_last < HPPA_BTLB_ENTRIES || env->tlb_last >= ARRAY_SIZE(env->tlb)) {
- i = HPPA_BTLB_ENTRIES;
- env->tlb_last = HPPA_BTLB_ENTRIES + 1;
- } else {
- i = env->tlb_last;
- env->tlb_last++;
+ if (ent == NULL) {
+ uint32_t i = env->tlb_last;
+
+ if (i < HPPA_BTLB_ENTRIES || i >= ARRAY_SIZE(env->tlb)) {
+ i = HPPA_BTLB_ENTRIES;
+ }
+ env->tlb_last = i + 1;
+
+ ent = &env->tlb[i];
+ hppa_flush_tlb_ent(env, ent, false);
}
- ent = &env->tlb[i];
-
- hppa_flush_tlb_ent(env, ent, false);
+ env->tlb_unused = ent->unused_next;
return ent;
}
@@ -127,7 +130,7 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
/* Find a valid tlb entry that matches the virtual address. */
ent = hppa_find_tlb(env, addr);
- if (ent == NULL || !ent->entry_valid) {
+ if (ent == NULL) {
phys = 0;
prot = 0;
ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
@@ -303,23 +306,23 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
/* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
{
- HPPATLBEntry *empty;
+ HPPATLBEntry *ent;
- /* Zap any old entries covering ADDR; notice empty entries on the way. */
+ /* Zap any old entries covering ADDR. */
addr &= TARGET_PAGE_MASK;
- empty = hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1);
+ hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1);
- /* If we didn't see an empty entry, evict one. */
- if (empty == NULL) {
- empty = hppa_alloc_tlb_ent(env);
+ ent = env->tlb_partial;
+ if (ent == NULL) {
+ ent = hppa_alloc_tlb_ent(env);
+ env->tlb_partial = ent;
}
- /* Note that empty->entry_valid == 0 already. */
- empty->itree.start = addr;
- empty->itree.last = addr + TARGET_PAGE_SIZE - 1;
- empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
- trace_hppa_tlb_itlba(env, empty, empty->itree.start,
- empty->itree.last, empty->pa);
+ /* Note that ent->entry_valid == 0 already. */
+ ent->itree.start = addr;
+ ent->itree.last = addr + TARGET_PAGE_SIZE - 1;
+ ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
+ trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
}
static void set_access_bits(CPUHPPAState *env, HPPATLBEntry *ent, target_ureg reg)
@@ -333,6 +336,8 @@ static void set_access_bits(CPUHPPAState *env, HPPATLBEntry *ent, target_ureg re
ent->d = extract32(reg, 28, 1);
ent->t = extract32(reg, 29, 1);
ent->entry_valid = 1;
+
+ interval_tree_insert(&ent->itree, &env->tlb_root);
trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
}
@@ -340,14 +345,16 @@ static void set_access_bits(CPUHPPAState *env, HPPATLBEntry *ent, target_ureg re
/* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
{
- HPPATLBEntry *ent = hppa_find_tlb(env, addr);
+ HPPATLBEntry *ent = env->tlb_partial;
- if (unlikely(ent == NULL)) {
- qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
- return;
+ if (ent) {
+ env->tlb_partial = NULL;
+ if (ent->itree.start <= addr && addr <= ent->itree.last) {
+ set_access_bits(env, ent, reg);
+ return;
+ }
}
-
- set_access_bits(env, ent, reg);
+ qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
}
/* Purge (Insn/Data) TLB. This is explicitly page-based, and is
@@ -356,17 +363,15 @@ static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
{
CPUHPPAState *env = cpu_env(cpu);
target_ulong addr = (target_ulong) data.target_ptr;
- HPPATLBEntry *ent = hppa_find_tlb(env, addr);
- if (ent && ent->entry_valid) {
- hppa_flush_tlb_ent(env, ent, false);
- }
+ hppa_flush_tlb_range(env, addr, addr);
}
void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
{
CPUState *src = env_cpu(env);
CPUState *cpu;
+
trace_hppa_tlb_ptlb(env);
run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
@@ -378,16 +383,40 @@ void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
async_safe_run_on_cpu(src, ptlb_work, data);
}
+void hppa_ptlbe(CPUHPPAState *env)
+{
+ uint32_t i;
+
+ /* Zap the (non-btlb) tlb entries themselves. */
+ memset(&env->tlb[HPPA_BTLB_ENTRIES], 0,
+ sizeof(env->tlb) - HPPA_BTLB_ENTRIES * sizeof(env->tlb[0]));
+ env->tlb_last = HPPA_BTLB_ENTRIES;
+ env->tlb_partial = NULL;
+
+ /* Put them all onto the unused list. */
+ env->tlb_unused = &env->tlb[HPPA_BTLB_ENTRIES];
+ for (i = HPPA_BTLB_ENTRIES; i < ARRAY_SIZE(env->tlb) - 1; ++i) {
+ env->tlb[i].unused_next = &env->tlb[i + 1];
+ }
+
+ /* Re-initialize the interval tree with only the btlb entries. */
+ memset(&env->tlb_root, 0, sizeof(env->tlb_root));
+ for (i = 0; i < HPPA_BTLB_ENTRIES; ++i) {
+ if (env->tlb[i].entry_valid) {
+ interval_tree_insert(&env->tlb[i].itree, &env->tlb_root);
+ }
+ }
+
+ tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
+}
+
/* Purge (Insn/Data) TLB entry. This affects an implementation-defined
number of pages/entries (we choose all), and is local to the cpu. */
void HELPER(ptlbe)(CPUHPPAState *env)
{
trace_hppa_tlb_ptlbe(env);
qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n");
- memset(&env->tlb[HPPA_BTLB_ENTRIES], 0,
- sizeof(env->tlb) - HPPA_BTLB_ENTRIES * sizeof(env->tlb[0]));
- env->tlb_last = HPPA_BTLB_ENTRIES;
- tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
+ hppa_ptlbe(env);
}
void cpu_hppa_change_prot_id(CPUHPPAState *env)
@@ -483,9 +512,11 @@ void HELPER(diag_btlb)(CPUHPPAState *env)
(long long) virt_page, phys_page, len, slot);
if (slot < HPPA_BTLB_ENTRIES) {
btlb = &env->tlb[slot];
- /* force flush of possibly existing BTLB entry */
+
+ /* Force flush of possibly existing BTLB entry. */
hppa_flush_tlb_ent(env, btlb, true);
- /* create new BTLB entry */
+
+ /* Create new BTLB entry */
btlb->itree.start = virt_page << TARGET_PAGE_BITS;
btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1;
btlb->pa = phys_page << TARGET_PAGE_BITS;
--
2.34.1
next prev parent reply other threads:[~2023-11-07 3:09 UTC|newest]
Thread overview: 89+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-07 3:02 [PULL 00/85] target/hppa patch queue Richard Henderson
2023-11-07 3:02 ` [PULL 01/85] target/hppa: Include PSW_P in tb flags and mmu index Richard Henderson
2023-11-07 3:02 ` [PULL 02/85] target/hppa: Rename hppa_tlb_entry to HPPATLBEntry Richard Henderson
2023-11-07 3:02 ` [PULL 03/85] target/hppa: Use IntervalTreeNode in HPPATLBEntry Richard Henderson
2023-11-07 3:02 ` [PULL 04/85] target/hppa: Always report one page to tlb_set_page Richard Henderson
2023-11-07 3:02 ` [PULL 05/85] target/hppa: Split out hppa_flush_tlb_range Richard Henderson
2023-11-07 3:02 ` Richard Henderson [this message]
2023-11-07 3:02 ` [PULL 07/85] target/hppa: Remove get_temp Richard Henderson
2023-11-07 3:02 ` [PULL 08/85] target/hppa: Remove get_temp_tl Richard Henderson
2023-11-07 3:02 ` [PULL 09/85] target/hppa: Remove load_const Richard Henderson
2023-11-07 3:02 ` [PULL 10/85] target/hppa: Fix hppa64 case in machine.c Richard Henderson
2023-11-07 3:02 ` [PULL 11/85] target/hppa: Fix load in do_load_32 Richard Henderson
2023-11-07 3:02 ` [PULL 12/85] target/hppa: Truncate rotate count in trans_shrpw_sar Richard Henderson
2023-11-07 3:02 ` [PULL 13/85] target/hppa: Fix trans_ds for hppa64 Richard Henderson
2023-11-07 3:02 ` [PULL 14/85] target/hppa: Fix do_add, do_sub " Richard Henderson
2023-11-07 3:02 ` [PULL 15/85] target/hppa: Fix bb_sar " Richard Henderson
2023-11-07 3:02 ` [PULL 16/85] target/hppa: Fix extrw and depw with sar " Richard Henderson
2023-11-07 3:02 ` [PULL 17/85] target/hppa: Introduce TYPE_HPPA64_CPU Richard Henderson
2023-11-07 3:03 ` [PULL 18/85] target/hppa: Make HPPA_BTLB_ENTRIES variable Richard Henderson
2023-11-07 3:03 ` [PULL 19/85] target/hppa: Implement cpu_list Richard Henderson
2023-11-07 3:03 ` [PULL 20/85] target/hppa: Implement hppa_cpu_class_by_name Richard Henderson
2023-11-07 3:03 ` [PULL 21/85] target/hppa: Update cpu_hppa_get/put_psw for hppa64 Richard Henderson
2023-11-07 3:03 ` [PULL 22/85] target/hppa: Handle absolute addresses for pa2.0 Richard Henderson
2023-11-07 3:03 ` [PULL 23/85] target/hppa: Adjust hppa_cpu_dump_state for hppa64 Richard Henderson
2023-11-07 3:03 ` [PULL 24/85] target/hppa: Fix hppa64 addressing Richard Henderson
2023-11-07 3:03 ` [PULL 25/85] target/hppa: Pass DisasContext to copy_iaoq_entry Richard Henderson
2023-11-07 3:03 ` [PULL 26/85] target/hppa: Always use copy_iaoq_entry to set cpu_iaoq_[fb] Richard Henderson
2023-11-07 3:03 ` [PULL 27/85] target/hppa: Use copy_iaoq_entry for link in do_ibranch Richard Henderson
2023-11-07 3:03 ` [PULL 28/85] target/hppa: Mask inputs in copy_iaoq_entry Richard Henderson
2023-11-07 3:03 ` [PULL 29/85] target/hppa: sar register allows only 5 bits on 32-bit CPU Richard Henderson
2023-11-07 3:03 ` [PULL 30/85] target/hppa: Pass d to do_cond Richard Henderson
2023-11-07 3:03 ` [PULL 31/85] target/hppa: Pass d to do_sub_cond Richard Henderson
2023-11-07 3:03 ` [PULL 32/85] target/hppa: Pass d to do_log_cond Richard Henderson
2023-11-07 3:03 ` [PULL 33/85] target/hppa: Pass d to do_sed_cond Richard Henderson
2023-11-07 3:03 ` [PULL 34/85] target/hppa: Pass d to do_unit_cond Richard Henderson
2023-11-07 3:03 ` [PULL 35/85] linux-user/hppa: Fixes for TARGET_ABI32 Richard Henderson
2023-11-07 3:03 ` [PULL 36/85] target/hppa: Drop attempted gdbstub support for hppa64 Richard Henderson
2023-11-07 3:03 ` [PULL 37/85] target/hppa: Remove TARGET_HPPA64 Richard Henderson
2023-11-07 3:03 ` [PULL 38/85] target/hppa: Decode d for logical instructions Richard Henderson
2023-11-07 3:03 ` [PULL 39/85] target/hppa: Decode d for unit instructions Richard Henderson
2023-11-07 3:03 ` [PULL 40/85] target/hppa: Decode d for cmpclr instructions Richard Henderson
2023-11-07 3:03 ` [PULL 41/85] target/hppa: Decode d for add instructions Richard Henderson
2023-11-07 3:03 ` [PULL 42/85] target/hppa: Decode d for sub instructions Richard Henderson
2023-11-07 3:03 ` [PULL 43/85] target/hppa: Decode d for bb instructions Richard Henderson
2023-11-07 3:03 ` [PULL 44/85] target/hppa: Decode d for cmpb instructions Richard Henderson
2023-11-07 3:03 ` [PULL 45/85] target/hppa: Decode CMPIB double-word Richard Henderson
2023-11-07 3:03 ` [PULL 46/85] target/hppa: Decode ADDB double-word Richard Henderson
2023-11-07 3:03 ` [PULL 47/85] target/hppa: Implement LDD, LDCD, LDDA, STD, STDA Richard Henderson
2023-11-07 3:03 ` [PULL 48/85] target/hppa: Implement DEPD, DEPDI Richard Henderson
2023-11-07 3:03 ` [PULL 49/85] target/hppa: Implement EXTRD Richard Henderson
2023-11-07 3:03 ` [PULL 50/85] target/hppa: Implement SHRPD Richard Henderson
2023-11-07 3:03 ` [PULL 51/85] target/hppa: Implement CLRBTS, POPBTS, PUSHBTS, PUSHNOM Richard Henderson
2023-11-07 3:03 ` [PULL 52/85] target/hppa: Implement STDBY Richard Henderson
2023-11-07 3:03 ` [PULL 53/85] target/hppa: Implement IDTLBT, IITLBT Richard Henderson
2023-11-09 15:13 ` Peter Maydell
2023-11-07 3:03 ` [PULL 54/85] hw/hppa: Use uint32_t instead of target_ureg Richard Henderson
2023-11-07 3:03 ` [PULL 55/85] target/hppa: Remove TARGET_REGISTER_BITS Richard Henderson
2023-11-07 3:03 ` [PULL 56/85] target/hppa: Remove most of the TARGET_REGISTER_BITS redirections Richard Henderson
2023-11-07 3:03 ` [PULL 57/85] target/hppa: Remove remaining " Richard Henderson
2023-11-07 3:03 ` [PULL 58/85] target/hppa: Adjust vmstate_env for pa2.0 tlb Richard Henderson
2023-11-07 3:03 ` [PULL 59/85] target/hppa: Use tcg_temp_new_i64 not tcg_temp_new Richard Henderson
2023-11-07 3:03 ` [PULL 60/85] target/hppa: Replace tcg_gen_*_tl with tcg_gen_*_i64 Richard Henderson
2023-11-07 3:03 ` [PULL 61/85] target/hppa: Implement HADD Richard Henderson
2023-11-07 3:03 ` [PULL 62/85] target/hppa: Implement HSUB Richard Henderson
2023-11-07 3:03 ` [PULL 63/85] target/hppa: Implement HAVG Richard Henderson
2023-11-07 3:03 ` [PULL 64/85] target/hppa: Implement HSHL, HSHR Richard Henderson
2023-11-07 3:03 ` [PULL 65/85] target/hppa: Implement HSHLADD, HSHRADD Richard Henderson
2023-11-07 3:03 ` [PULL 66/85] target/hppa: Implement MIXH, MIXW Richard Henderson
2023-11-07 3:03 ` [PULL 67/85] target/hppa: Implement PERMH Richard Henderson
2023-11-07 3:03 ` [PULL 68/85] target/hppa: Fix interruption based on default PSW Richard Henderson
2023-11-07 3:03 ` [PULL 69/85] target/hppa: Precompute zero into DisasContext Richard Henderson
2023-11-07 3:03 ` [PULL 70/85] target/hppa: Return zero for r0 from load_gpr Richard Henderson
2023-11-07 3:03 ` [PULL 71/85] include/hw/elf: Remove truncating signed casts Richard Henderson
2023-11-07 3:03 ` [PULL 72/85] hw/hppa: Translate phys addresses for the cpu Richard Henderson
2023-11-07 3:03 ` [PULL 73/85] linux-user/hppa: Drop EXCP_DUMP from handled exceptions Richard Henderson
2023-11-07 3:03 ` [PULL 74/85] target/hppa: Implement pa2.0 data prefetch instructions Richard Henderson
2023-11-07 3:03 ` [PULL 75/85] target/hppa: Add pa2.0 cpu local tlb flushes Richard Henderson
2023-11-09 15:12 ` Peter Maydell
2023-11-07 3:03 ` [PULL 76/85] target/hppa: Avoid async_safe_run_on_cpu on uniprocessor system Richard Henderson
2023-11-07 3:03 ` [PULL 77/85] target/hppa: Clear upper bits in mtctl for pa1.x Richard Henderson
2023-11-07 3:04 ` [PULL 78/85] target/hppa: Add unwind_breg to CPUHPPAState Richard Henderson
2023-11-07 3:04 ` [PULL 79/85] target/hppa: Create raise_exception_with_ior Richard Henderson
2023-11-07 3:04 ` [PULL 80/85] target/hppa: Update IIAOQ, IIASQ for pa2.0 Richard Henderson
2023-11-07 3:04 ` [PULL 81/85] target/hppa: Improve interrupt logging Richard Henderson
2023-11-07 3:04 ` [PULL 82/85] hw/pci-host/astro: Map Astro chip into 64-bit I/O memory region Richard Henderson
2023-11-07 3:04 ` [PULL 83/85] hw/pci-host/astro: Trigger CPU irq on CPU HPA in high memory Richard Henderson
2023-11-07 3:04 ` [PULL 84/85] hw/hppa: Turn on 64-bit CPU for C3700 machine Richard Henderson
2023-11-07 3:04 ` [PULL 85/85] hw/hppa: Allow C3700 with 64-bit and B160L with 32-bit CPU only Richard Henderson
2023-11-07 9:36 ` [PULL 00/85] target/hppa patch queue Stefan Hajnoczi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231107030407.8979-7-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).