From: "Philippe Mathieu-Daudé" <philmd@linaro.org>
To: qemu-devel@nongnu.org
Cc: "Alistair Francis" <alistair.francis@wdc.com>,
qemu-riscv@nongnu.org, "Max Filippov" <jcmvbkbc@gmail.com>,
"Liu Zhiwei" <zhiwei_liu@linux.alibaba.com>,
"Weiwei Li" <liwei1518@gmail.com>,
"Zhao Liu" <zhao1.liu@intel.com>,
"Paolo Bonzini" <pbonzini@redhat.com>,
"Artyom Tarasenko" <atar4qemu@gmail.com>,
"Daniel Henrique Barboza" <dbarboza@ventanamicro.com>,
"Palmer Dabbelt" <palmer@dabbelt.com>,
"Mark Cave-Ayland" <mark.cave-ayland@ilande.co.uk>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>
Subject: [PATCH 2/6] target/i386/monitor: Replace legacy cpu_physical_memory_read() calls
Date: Thu, 2 Oct 2025 16:57:37 +0200 [thread overview]
Message-ID: <20251002145742.75624-3-philmd@linaro.org> (raw)
In-Reply-To: <20251002145742.75624-1-philmd@linaro.org>
Commit b7ecba0f6f6 ("docs/devel/loads-stores.rst: Document our
various load and store APIs") mentioned cpu_physical_memory_*()
methods are legacy, the replacement being address_space_*().
Replace:
- cpu_physical_memory_read(len=4) -> address_space_ldl()
- cpu_physical_memory_read(len=8) -> address_space_ldq()
inlining the little endianness conversion via the '_le' suffix.
As with the previous implementation, ignore whether the memory
read succeeded or failed.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
---
target/i386/monitor.c | 96 ++++++++++++++++++++-----------------------
1 file changed, 44 insertions(+), 52 deletions(-)
diff --git a/target/i386/monitor.c b/target/i386/monitor.c
index 7e7854e6c1b..d2bb873d494 100644
--- a/target/i386/monitor.c
+++ b/target/i386/monitor.c
@@ -30,6 +30,7 @@
#include "qobject/qdict.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-misc.h"
+#include "system/memory.h"
/* Perform linear address sign extension */
static hwaddr addr_canonical(CPUArchState *env, hwaddr addr)
@@ -70,21 +71,21 @@ static void print_pte(Monitor *mon, CPUArchState *env, hwaddr addr,
static void tlb_info_32(Monitor *mon, CPUArchState *env, AddressSpace *as)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
unsigned int l1, l2;
uint32_t pgd, pde, pte;
pgd = env->cr[3] & ~0xfff;
for(l1 = 0; l1 < 1024; l1++) {
- cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
- pde = le32_to_cpu(pde);
+ pde = address_space_ldl_le(as, pgd + l1 * 4, attrs, NULL);
if (pde & PG_PRESENT_MASK) {
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
/* 4M pages */
print_pte(mon, env, (l1 << 22), pde, ~((1 << 21) - 1));
} else {
for(l2 = 0; l2 < 1024; l2++) {
- cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
- pte = le32_to_cpu(pte);
+ pte = address_space_ldl_le(as, (pde & ~0xfff) + l2 * 4,
+ attrs, NULL);
if (pte & PG_PRESENT_MASK) {
print_pte(mon, env, (l1 << 22) + (l2 << 12),
pte & ~PG_PSE_MASK,
@@ -98,19 +99,18 @@ static void tlb_info_32(Monitor *mon, CPUArchState *env, AddressSpace *as)
static void tlb_info_pae32(Monitor *mon, CPUArchState *env, AddressSpace *as)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
unsigned int l1, l2, l3;
uint64_t pdpe, pde, pte;
uint64_t pdp_addr, pd_addr, pt_addr;
pdp_addr = env->cr[3] & ~0x1f;
for (l1 = 0; l1 < 4; l1++) {
- cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
+ pdpe = address_space_ldq_le(as, pdp_addr + l1 * 8, attrs, NULL);
if (pdpe & PG_PRESENT_MASK) {
pd_addr = pdpe & 0x3fffffffff000ULL;
for (l2 = 0; l2 < 512; l2++) {
- cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
- pde = le64_to_cpu(pde);
+ pde = address_space_ldq_le(as, pd_addr + l2 * 8, attrs, NULL);
if (pde & PG_PRESENT_MASK) {
if (pde & PG_PSE_MASK) {
/* 2M pages with PAE, CR4.PSE is ignored */
@@ -119,8 +119,8 @@ static void tlb_info_pae32(Monitor *mon, CPUArchState *env, AddressSpace *as)
} else {
pt_addr = pde & 0x3fffffffff000ULL;
for (l3 = 0; l3 < 512; l3++) {
- cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
- pte = le64_to_cpu(pte);
+ pte = address_space_ldq_le(as, pt_addr + l3 * 8,
+ attrs, NULL);
if (pte & PG_PRESENT_MASK) {
print_pte(mon, env, (l1 << 30) + (l2 << 21)
+ (l3 << 12),
@@ -139,21 +139,20 @@ static void tlb_info_pae32(Monitor *mon, CPUArchState *env, AddressSpace *as)
static void tlb_info_la48(Monitor *mon, CPUArchState *env, AddressSpace *as,
uint64_t l0, uint64_t pml4_addr)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
uint64_t l1, l2, l3, l4;
uint64_t pml4e, pdpe, pde, pte;
uint64_t pdp_addr, pd_addr, pt_addr;
for (l1 = 0; l1 < 512; l1++) {
- cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
- pml4e = le64_to_cpu(pml4e);
+ pml4e = address_space_ldq_le(as, pml4_addr + l1 * 8, attrs, NULL);
if (!(pml4e & PG_PRESENT_MASK)) {
continue;
}
pdp_addr = pml4e & 0x3fffffffff000ULL;
for (l2 = 0; l2 < 512; l2++) {
- cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
+ pdpe = address_space_ldq_le(as, pdp_addr + l2 * 8, attrs, NULL);
if (!(pdpe & PG_PRESENT_MASK)) {
continue;
}
@@ -167,8 +166,7 @@ static void tlb_info_la48(Monitor *mon, CPUArchState *env, AddressSpace *as,
pd_addr = pdpe & 0x3fffffffff000ULL;
for (l3 = 0; l3 < 512; l3++) {
- cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
- pde = le64_to_cpu(pde);
+ pde = address_space_ldq_le(as, pd_addr + l3 * 8, attrs, NULL);
if (!(pde & PG_PRESENT_MASK)) {
continue;
}
@@ -182,10 +180,8 @@ static void tlb_info_la48(Monitor *mon, CPUArchState *env, AddressSpace *as,
pt_addr = pde & 0x3fffffffff000ULL;
for (l4 = 0; l4 < 512; l4++) {
- cpu_physical_memory_read(pt_addr
- + l4 * 8,
- &pte, 8);
- pte = le64_to_cpu(pte);
+ pte = address_space_ldq_le(as, pt_addr + l4 * 8,
+ attrs, NULL);
if (pte & PG_PRESENT_MASK) {
print_pte(mon, env, (l0 << 48) + (l1 << 39) +
(l2 << 30) + (l3 << 21) + (l4 << 12),
@@ -199,14 +195,14 @@ static void tlb_info_la48(Monitor *mon, CPUArchState *env, AddressSpace *as,
static void tlb_info_la57(Monitor *mon, CPUArchState *env, AddressSpace *as)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
uint64_t l0;
uint64_t pml5e;
uint64_t pml5_addr;
pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
for (l0 = 0; l0 < 512; l0++) {
- cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
- pml5e = le64_to_cpu(pml5e);
+ pml5e = address_space_ldq_le(as, pml5_addr + l0 * 8, attrs, NULL);
if (pml5e & PG_PRESENT_MASK) {
tlb_info_la48(mon, env, as, l0, pml5e & 0x3fffffffff000ULL);
}
@@ -275,6 +271,7 @@ static void mem_print(Monitor *mon, CPUArchState *env,
static void mem_info_32(Monitor *mon, CPUArchState *env, AddressSpace *as)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
unsigned int l1, l2;
int prot, last_prot;
uint32_t pgd, pde, pte;
@@ -284,8 +281,7 @@ static void mem_info_32(Monitor *mon, CPUArchState *env, AddressSpace *as)
last_prot = 0;
start = -1;
for(l1 = 0; l1 < 1024; l1++) {
- cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
- pde = le32_to_cpu(pde);
+ pde = address_space_ldl_le(as, pgd + l1 * 4, attrs, NULL);
end = l1 << 22;
if (pde & PG_PRESENT_MASK) {
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
@@ -293,8 +289,8 @@ static void mem_info_32(Monitor *mon, CPUArchState *env, AddressSpace *as)
mem_print(mon, env, &start, &last_prot, end, prot);
} else {
for(l2 = 0; l2 < 1024; l2++) {
- cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
- pte = le32_to_cpu(pte);
+ pte = address_space_ldl_le(as, (pde & ~0xfff) + l2 * 4,
+ attrs, NULL);
end = (l1 << 22) + (l2 << 12);
if (pte & PG_PRESENT_MASK) {
prot = pte & pde &
@@ -316,6 +312,7 @@ static void mem_info_32(Monitor *mon, CPUArchState *env, AddressSpace *as)
static void mem_info_pae32(Monitor *mon, CPUArchState *env, AddressSpace *as)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
unsigned int l1, l2, l3;
int prot, last_prot;
uint64_t pdpe, pde, pte;
@@ -326,14 +323,12 @@ static void mem_info_pae32(Monitor *mon, CPUArchState *env, AddressSpace *as)
last_prot = 0;
start = -1;
for (l1 = 0; l1 < 4; l1++) {
- cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
+ pdpe = address_space_ldq_le(as, pdp_addr + l1 * 8, attrs, NULL);
end = l1 << 30;
if (pdpe & PG_PRESENT_MASK) {
pd_addr = pdpe & 0x3fffffffff000ULL;
for (l2 = 0; l2 < 512; l2++) {
- cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
- pde = le64_to_cpu(pde);
+ pde = address_space_ldq_le(as, pd_addr + l2 * 8, attrs, NULL);
end = (l1 << 30) + (l2 << 21);
if (pde & PG_PRESENT_MASK) {
if (pde & PG_PSE_MASK) {
@@ -343,8 +338,8 @@ static void mem_info_pae32(Monitor *mon, CPUArchState *env, AddressSpace *as)
} else {
pt_addr = pde & 0x3fffffffff000ULL;
for (l3 = 0; l3 < 512; l3++) {
- cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
- pte = le64_to_cpu(pte);
+ pte = address_space_ldq_le(as, pt_addr + l3 * 8,
+ attrs, NULL);
end = (l1 << 30) + (l2 << 21) + (l3 << 12);
if (pte & PG_PRESENT_MASK) {
prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
@@ -373,6 +368,7 @@ static void mem_info_pae32(Monitor *mon, CPUArchState *env, AddressSpace *as)
#ifdef TARGET_X86_64
static void mem_info_la48(Monitor *mon, CPUArchState *env, AddressSpace *as)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
int prot, last_prot;
uint64_t l1, l2, l3, l4;
uint64_t pml4e, pdpe, pde, pte;
@@ -382,14 +378,12 @@ static void mem_info_la48(Monitor *mon, CPUArchState *env, AddressSpace *as)
last_prot = 0;
start = -1;
for (l1 = 0; l1 < 512; l1++) {
- cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
- pml4e = le64_to_cpu(pml4e);
+ pml4e = address_space_ldq_le(as, pml4_addr + l1 * 8, attrs, NULL);
end = l1 << 39;
if (pml4e & PG_PRESENT_MASK) {
pdp_addr = pml4e & 0x3fffffffff000ULL;
for (l2 = 0; l2 < 512; l2++) {
- cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
+ pdpe = address_space_ldq_le(as, pdp_addr + l2 * 8, attrs, NULL);
end = (l1 << 39) + (l2 << 30);
if (pdpe & PG_PRESENT_MASK) {
if (pdpe & PG_PSE_MASK) {
@@ -400,8 +394,8 @@ static void mem_info_la48(Monitor *mon, CPUArchState *env, AddressSpace *as)
} else {
pd_addr = pdpe & 0x3fffffffff000ULL;
for (l3 = 0; l3 < 512; l3++) {
- cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
- pde = le64_to_cpu(pde);
+ pde = address_space_ldq_le(as, pd_addr + l3 * 8,
+ attrs, NULL);
end = (l1 << 39) + (l2 << 30) + (l3 << 21);
if (pde & PG_PRESENT_MASK) {
if (pde & PG_PSE_MASK) {
@@ -413,10 +407,10 @@ static void mem_info_la48(Monitor *mon, CPUArchState *env, AddressSpace *as)
} else {
pt_addr = pde & 0x3fffffffff000ULL;
for (l4 = 0; l4 < 512; l4++) {
- cpu_physical_memory_read(pt_addr
- + l4 * 8,
- &pte, 8);
- pte = le64_to_cpu(pte);
+ pte = address_space_ldq_le(as,
+ pt_addr
+ + l4 * 8,
+ attrs, NULL);
end = (l1 << 39) + (l2 << 30) +
(l3 << 21) + (l4 << 12);
if (pte & PG_PRESENT_MASK) {
@@ -453,6 +447,7 @@ static void mem_info_la48(Monitor *mon, CPUArchState *env, AddressSpace *as)
static void mem_info_la57(Monitor *mon, CPUArchState *env, AddressSpace *as)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
int prot, last_prot;
uint64_t l0, l1, l2, l3, l4;
uint64_t pml5e, pml4e, pdpe, pde, pte;
@@ -462,8 +457,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env, AddressSpace *as)
last_prot = 0;
start = -1;
for (l0 = 0; l0 < 512; l0++) {
- cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
- pml5e = le64_to_cpu(pml5e);
+ pml5e = address_space_ldq_le(as, pml5_addr + l0 * 8, attrs, NULL);
end = l0 << 48;
if (!(pml5e & PG_PRESENT_MASK)) {
prot = 0;
@@ -473,8 +467,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env, AddressSpace *as)
pml4_addr = pml5e & 0x3fffffffff000ULL;
for (l1 = 0; l1 < 512; l1++) {
- cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
- pml4e = le64_to_cpu(pml4e);
+ pml4e = address_space_ldq_le(as, pml4_addr + l1 * 8, attrs, NULL);
end = (l0 << 48) + (l1 << 39);
if (!(pml4e & PG_PRESENT_MASK)) {
prot = 0;
@@ -484,8 +477,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env, AddressSpace *as)
pdp_addr = pml4e & 0x3fffffffff000ULL;
for (l2 = 0; l2 < 512; l2++) {
- cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
+ pdpe = address_space_ldq_le(as, pdp_addr + l2 * 8, attrs, NULL);
end = (l0 << 48) + (l1 << 39) + (l2 << 30);
if (pdpe & PG_PRESENT_MASK) {
prot = 0;
@@ -503,8 +495,8 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env, AddressSpace *as)
pd_addr = pdpe & 0x3fffffffff000ULL;
for (l3 = 0; l3 < 512; l3++) {
- cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
- pde = le64_to_cpu(pde);
+ pde = address_space_ldq_le(as, pd_addr + l3 * 8,
+ attrs, NULL);
end = (l0 << 48) + (l1 << 39) + (l2 << 30) + (l3 << 21);
if (pde & PG_PRESENT_MASK) {
prot = 0;
@@ -522,8 +514,8 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env, AddressSpace *as)
pt_addr = pde & 0x3fffffffff000ULL;
for (l4 = 0; l4 < 512; l4++) {
- cpu_physical_memory_read(pt_addr + l4 * 8, &pte, 8);
- pte = le64_to_cpu(pte);
+ pte = address_space_ldq_le(as, pt_addr + l4 * 8,
+ attrs, NULL);
end = (l0 << 48) + (l1 << 39) + (l2 << 30) +
(l3 << 21) + (l4 << 12);
if (pte & PG_PRESENT_MASK) {
--
2.51.0
next prev parent reply other threads:[~2025-10-02 15:00 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-02 14:57 [PATCH 0/6] target: Remove remnant legacy cpu_physical_memory_*() calls Philippe Mathieu-Daudé
2025-10-02 14:57 ` [PATCH 1/6] target/i386/monitor: Propagate CPU address space to 'info mem' handlers Philippe Mathieu-Daudé
2025-10-08 14:04 ` Manos Pitsidianakis
2025-10-02 14:57 ` Philippe Mathieu-Daudé [this message]
2025-10-08 14:03 ` [PATCH 2/6] target/i386/monitor: Replace legacy cpu_physical_memory_read() calls Manos Pitsidianakis
2025-10-02 14:57 ` [PATCH 3/6] target/riscv/kvm: Replace legacy cpu_physical_memory_read/write() calls Philippe Mathieu-Daudé
2025-10-08 14:01 ` Manos Pitsidianakis
2025-10-02 14:57 ` [PATCH 4/6] target/riscv/monitor: Replace legacy cpu_physical_memory_read() call Philippe Mathieu-Daudé
2025-10-08 14:00 ` Manos Pitsidianakis
2025-10-02 14:57 ` [PATCH 5/6] target/xtensa: Replace legacy cpu_physical_memory_[un]map() calls Philippe Mathieu-Daudé
2025-10-08 13:57 ` Manos Pitsidianakis
2025-10-02 14:57 ` [PATCH 6/6] target/sparc: Reduce inclusions of 'exec/cpu-common.h' Philippe Mathieu-Daudé
2025-10-08 13:58 ` Manos Pitsidianakis
2025-10-16 15:08 ` [PATCH 0/6] target: Remove remnant legacy cpu_physical_memory_*() calls Philippe Mathieu-Daudé
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251002145742.75624-3-philmd@linaro.org \
--to=philmd@linaro.org \
--cc=alistair.francis@wdc.com \
--cc=atar4qemu@gmail.com \
--cc=dbarboza@ventanamicro.com \
--cc=jcmvbkbc@gmail.com \
--cc=liwei1518@gmail.com \
--cc=mark.cave-ayland@ilande.co.uk \
--cc=palmer@dabbelt.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=qemu-riscv@nongnu.org \
--cc=zhao1.liu@intel.com \
--cc=zhiwei_liu@linux.alibaba.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).