From: Mukesh Rathor <mrathor@linux.microsoft.com>
To: linux-hyperv@vger.kernel.org
Cc: wei.liu@kernel.org, nunodasneves@linux.microsoft.com
Subject: [PATCH] mshv: make certain field names descriptive in a header struct
Date: Fri, 9 Jan 2026 12:06:11 -0800 [thread overview]
Message-ID: <20260109200611.1422390-1-mrathor@linux.microsoft.com> (raw)
There is no functional change. Just make couple field names in
struct mshv_mem_region, in a header that can be used in many
places, a little descriptive to make code easier to read by
allowing better support for grep, cscope, etc.
Signed-off-by: Mukesh Rathor <mrathor@linux.microsoft.com>
---
drivers/hv/mshv_regions.c | 44 ++++++++++++++++++-------------------
drivers/hv/mshv_root.h | 6 ++---
drivers/hv/mshv_root_main.c | 10 ++++-----
3 files changed, 30 insertions(+), 30 deletions(-)
diff --git a/drivers/hv/mshv_regions.c b/drivers/hv/mshv_regions.c
index 202b9d551e39..af81405f859b 100644
--- a/drivers/hv/mshv_regions.c
+++ b/drivers/hv/mshv_regions.c
@@ -52,7 +52,7 @@ static long mshv_region_process_chunk(struct mshv_mem_region *region,
struct page *page;
int ret;
- page = region->pages[page_offset];
+ page = region->mreg_pages[page_offset];
if (!page)
return -EINVAL;
@@ -65,7 +65,7 @@ static long mshv_region_process_chunk(struct mshv_mem_region *region,
/* Start at stride since the first page is validated */
for (count = stride; count < page_count; count += stride) {
- page = region->pages[page_offset + count];
+ page = region->mreg_pages[page_offset + count];
/* Break if current page is not present */
if (!page)
@@ -117,7 +117,7 @@ static int mshv_region_process_range(struct mshv_mem_region *region,
while (page_count) {
/* Skip non-present pages */
- if (!region->pages[page_offset]) {
+ if (!region->mreg_pages[page_offset]) {
page_offset++;
page_count--;
continue;
@@ -164,13 +164,13 @@ static int mshv_region_chunk_share(struct mshv_mem_region *region,
u32 flags,
u64 page_offset, u64 page_count)
{
- struct page *page = region->pages[page_offset];
+ struct page *page = region->mreg_pages[page_offset];
if (PageHuge(page) || PageTransCompound(page))
flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
return hv_call_modify_spa_host_access(region->partition->pt_id,
- region->pages + page_offset,
+ region->mreg_pages + page_offset,
page_count,
HV_MAP_GPA_READABLE |
HV_MAP_GPA_WRITABLE,
@@ -190,13 +190,13 @@ static int mshv_region_chunk_unshare(struct mshv_mem_region *region,
u32 flags,
u64 page_offset, u64 page_count)
{
- struct page *page = region->pages[page_offset];
+ struct page *page = region->mreg_pages[page_offset];
if (PageHuge(page) || PageTransCompound(page))
flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
return hv_call_modify_spa_host_access(region->partition->pt_id,
- region->pages + page_offset,
+ region->mreg_pages + page_offset,
page_count, 0,
flags, false);
}
@@ -214,7 +214,7 @@ static int mshv_region_chunk_remap(struct mshv_mem_region *region,
u32 flags,
u64 page_offset, u64 page_count)
{
- struct page *page = region->pages[page_offset];
+ struct page *page = region->mreg_pages[page_offset];
if (PageHuge(page) || PageTransCompound(page))
flags |= HV_MAP_GPA_LARGE_PAGE;
@@ -222,7 +222,7 @@ static int mshv_region_chunk_remap(struct mshv_mem_region *region,
return hv_call_map_gpa_pages(region->partition->pt_id,
region->start_gfn + page_offset,
page_count, flags,
- region->pages + page_offset);
+ region->mreg_pages + page_offset);
}
static int mshv_region_remap_pages(struct mshv_mem_region *region,
@@ -245,10 +245,10 @@ int mshv_region_map(struct mshv_mem_region *region)
static void mshv_region_invalidate_pages(struct mshv_mem_region *region,
u64 page_offset, u64 page_count)
{
- if (region->type == MSHV_REGION_TYPE_MEM_PINNED)
- unpin_user_pages(region->pages + page_offset, page_count);
+ if (region->mreg_type == MSHV_REGION_TYPE_MEM_PINNED)
+ unpin_user_pages(region->mreg_pages + page_offset, page_count);
- memset(region->pages + page_offset, 0,
+ memset(region->mreg_pages + page_offset, 0,
page_count * sizeof(struct page *));
}
@@ -265,7 +265,7 @@ int mshv_region_pin(struct mshv_mem_region *region)
int ret;
for (done_count = 0; done_count < region->nr_pages; done_count += ret) {
- pages = region->pages + done_count;
+ pages = region->mreg_pages + done_count;
userspace_addr = region->start_uaddr +
done_count * HV_HYP_PAGE_SIZE;
nr_pages = min(region->nr_pages - done_count,
@@ -297,7 +297,7 @@ static int mshv_region_chunk_unmap(struct mshv_mem_region *region,
u32 flags,
u64 page_offset, u64 page_count)
{
- struct page *page = region->pages[page_offset];
+ struct page *page = region->mreg_pages[page_offset];
if (PageHuge(page) || PageTransCompound(page))
flags |= HV_UNMAP_GPA_LARGE_PAGE;
@@ -321,7 +321,7 @@ static void mshv_region_destroy(struct kref *ref)
struct mshv_partition *partition = region->partition;
int ret;
- if (region->type == MSHV_REGION_TYPE_MEM_MOVABLE)
+ if (region->mreg_type == MSHV_REGION_TYPE_MEM_MOVABLE)
mshv_region_movable_fini(region);
if (mshv_partition_encrypted(partition)) {
@@ -374,9 +374,9 @@ static int mshv_region_hmm_fault_and_lock(struct mshv_mem_region *region,
int ret;
range->notifier_seq = mmu_interval_read_begin(range->notifier);
- mmap_read_lock(region->mni.mm);
+ mmap_read_lock(region->mreg_mni.mm);
ret = hmm_range_fault(range);
- mmap_read_unlock(region->mni.mm);
+ mmap_read_unlock(region->mreg_mni.mm);
if (ret)
return ret;
@@ -407,7 +407,7 @@ static int mshv_region_range_fault(struct mshv_mem_region *region,
u64 page_offset, u64 page_count)
{
struct hmm_range range = {
- .notifier = ®ion->mni,
+ .notifier = ®ion->mreg_mni,
.default_flags = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE,
};
unsigned long *pfns;
@@ -430,7 +430,7 @@ static int mshv_region_range_fault(struct mshv_mem_region *region,
goto out;
for (i = 0; i < page_count; i++)
- region->pages[page_offset + i] = hmm_pfn_to_page(pfns[i]);
+ region->mreg_pages[page_offset + i] = hmm_pfn_to_page(pfns[i]);
ret = mshv_region_remap_pages(region, region->hv_map_flags,
page_offset, page_count);
@@ -489,7 +489,7 @@ static bool mshv_region_interval_invalidate(struct mmu_interval_notifier *mni,
{
struct mshv_mem_region *region = container_of(mni,
struct mshv_mem_region,
- mni);
+ mreg_mni);
u64 page_offset, page_count;
unsigned long mstart, mend;
int ret = -EPERM;
@@ -535,14 +535,14 @@ static const struct mmu_interval_notifier_ops mshv_region_mni_ops = {
void mshv_region_movable_fini(struct mshv_mem_region *region)
{
- mmu_interval_notifier_remove(®ion->mni);
+ mmu_interval_notifier_remove(®ion->mreg_mni);
}
bool mshv_region_movable_init(struct mshv_mem_region *region)
{
int ret;
- ret = mmu_interval_notifier_insert(®ion->mni, current->mm,
+ ret = mmu_interval_notifier_insert(®ion->mreg_mni, current->mm,
region->start_uaddr,
region->nr_pages << HV_HYP_PAGE_SHIFT,
&mshv_region_mni_ops);
diff --git a/drivers/hv/mshv_root.h b/drivers/hv/mshv_root.h
index 3c1d88b36741..f5b6d3979e5a 100644
--- a/drivers/hv/mshv_root.h
+++ b/drivers/hv/mshv_root.h
@@ -85,10 +85,10 @@ struct mshv_mem_region {
u64 start_uaddr;
u32 hv_map_flags;
struct mshv_partition *partition;
- enum mshv_region_type type;
- struct mmu_interval_notifier mni;
+ enum mshv_region_type mreg_type;
+ struct mmu_interval_notifier mreg_mni;
struct mutex mutex; /* protects region pages remapping */
- struct page *pages[];
+ struct page *mreg_pages[];
};
struct mshv_irq_ack_notifier {
diff --git a/drivers/hv/mshv_root_main.c b/drivers/hv/mshv_root_main.c
index 1134a82c7881..eff1b21461dc 100644
--- a/drivers/hv/mshv_root_main.c
+++ b/drivers/hv/mshv_root_main.c
@@ -657,7 +657,7 @@ static bool mshv_handle_gpa_intercept(struct mshv_vp *vp)
return false;
/* Only movable memory ranges are supported for GPA intercepts */
- if (region->type == MSHV_REGION_TYPE_MEM_MOVABLE)
+ if (region->mreg_type == MSHV_REGION_TYPE_MEM_MOVABLE)
ret = mshv_region_handle_gfn_fault(region, gfn);
else
ret = false;
@@ -1175,12 +1175,12 @@ static int mshv_partition_create_region(struct mshv_partition *partition,
return PTR_ERR(rg);
if (is_mmio)
- rg->type = MSHV_REGION_TYPE_MMIO;
+ rg->mreg_type = MSHV_REGION_TYPE_MMIO;
else if (mshv_partition_encrypted(partition) ||
!mshv_region_movable_init(rg))
- rg->type = MSHV_REGION_TYPE_MEM_PINNED;
+ rg->mreg_type = MSHV_REGION_TYPE_MEM_PINNED;
else
- rg->type = MSHV_REGION_TYPE_MEM_MOVABLE;
+ rg->mreg_type = MSHV_REGION_TYPE_MEM_MOVABLE;
rg->partition = partition;
@@ -1297,7 +1297,7 @@ mshv_map_user_memory(struct mshv_partition *partition,
if (ret)
return ret;
- switch (region->type) {
+ switch (region->mreg_type) {
case MSHV_REGION_TYPE_MEM_PINNED:
ret = mshv_prepare_pinned_region(region);
break;
--
2.51.2.vfs.0.1
next reply other threads:[~2026-01-09 20:06 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-09 20:06 Mukesh Rathor [this message]
2026-01-12 16:55 ` [PATCH] mshv: make certain field names descriptive in a header struct Nuno Das Neves
2026-01-14 0:41 ` Stanislav Kinsburskii
2026-01-14 19:27 ` Mukesh R
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260109200611.1422390-1-mrathor@linux.microsoft.com \
--to=mrathor@linux.microsoft.com \
--cc=linux-hyperv@vger.kernel.org \
--cc=nunodasneves@linux.microsoft.com \
--cc=wei.liu@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox