* [LTP] [PATCH] Fixed hugeshmat05 Test Failure with 1GB Hugepages.
@ 2026-04-06 15:50 Pavithra
2026-04-08 10:09 ` Cyril Hrubis
0 siblings, 1 reply; 2+ messages in thread
From: Pavithra @ 2026-04-06 15:50 UTC (permalink / raw)
To: ltp; +Cc: pavrampu
Modified the test to detect large hugepage sizes (≥1GB) and adjust test
sizes accordingly.
Signed-off-by: Pavithra <pavrampu@linux.ibm.com>
---
.../mem/hugetlb/hugeshmat/hugeshmat05.c | 45 +++++++++++++++----
1 file changed, 36 insertions(+), 9 deletions(-)
diff --git a/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat05.c b/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat05.c
index 3b2ae351c..870a61ec1 100644
--- a/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat05.c
+++ b/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat05.c
@@ -37,9 +37,13 @@ void setup(void)
{
page_size = getpagesize();
hpage_size = SAFE_READ_MEMINFO("Hugepagesize:") * 1024;
+
+ tst_res(TINFO, "Page size: %ld bytes", page_size);
+ tst_res(TINFO, "Hugepage size: %ld bytes (%ld MB)",
+ hpage_size, hpage_size / (1024 * 1024));
}
-void shm_test(int size)
+void shm_test(long size)
{
int shmid;
char *shmaddr;
@@ -56,7 +60,7 @@ void shm_test(int size)
}
shmaddr[0] = 1;
- tst_res(TINFO, "allocated %d huge bytes", size);
+ tst_res(TINFO, "allocated %ld huge bytes", size);
if (shmdt((const void *)shmaddr) != 0) {
shmctl(shmid, IPC_RMID, NULL);
@@ -69,16 +73,39 @@ void shm_test(int size)
static void test_hugeshmat(void)
{
unsigned int i;
+ long tst_sizes[4];
+
+ /*
+ * For large hugepage sizes (e.g., 1GB), we need to ensure
+ * test sizes are within reasonable bounds and properly aligned.
+ * The original test used N*hpage_size which could be 4GB for 1GB pages.
+ *
+ * We adjust the test to use N/2 multiplier for large hugepages
+ * to avoid excessive memory requirements while still testing the
+ * alignment boundary conditions with multiple pages.
+ */
+ if (hpage_size >= 1024 * 1024 * 1024) {
+ /* For 1GB or larger hugepages, use N/2 pages (2 pages for N=4) */
+ long multiplier = N / 2;
- const int tst_sizes[] = {
- N * hpage_size - page_size,
- N * hpage_size - page_size - 1,
- hpage_size,
- hpage_size + 1
- };
+ tst_sizes[0] = multiplier * hpage_size - page_size;
+ tst_sizes[1] = multiplier * hpage_size - page_size - 1;
+ tst_sizes[2] = hpage_size;
+ tst_sizes[3] = hpage_size + 1;
+ tst_res(TINFO, "Using N/2=%ld hugepage test sizes for large hugepages", multiplier);
+ } else {
+ /* For smaller hugepages (2MB, 16MB, etc.), use original test */
+ tst_sizes[0] = N * hpage_size - page_size;
+ tst_sizes[1] = N * hpage_size - page_size - 1;
+ tst_sizes[2] = hpage_size;
+ tst_sizes[3] = hpage_size + 1;
+ tst_res(TINFO, "Using N=%d hugepage test sizes", N);
+ }
- for (i = 0; i < ARRAY_SIZE(tst_sizes); ++i)
+ for (i = 0; i < ARRAY_SIZE(tst_sizes); ++i) {
+ tst_res(TINFO, "Testing size: %ld bytes", tst_sizes[i]);
shm_test(tst_sizes[i]);
+ }
tst_res(TPASS, "No regression found.");
}
--
2.53.0
--
Mailing list info: https://lists.linux.it/listinfo/ltp
^ permalink raw reply related [flat|nested] 2+ messages in thread* Re: [LTP] [PATCH] Fixed hugeshmat05 Test Failure with 1GB Hugepages.
2026-04-06 15:50 [LTP] [PATCH] Fixed hugeshmat05 Test Failure with 1GB Hugepages Pavithra
@ 2026-04-08 10:09 ` Cyril Hrubis
0 siblings, 0 replies; 2+ messages in thread
From: Cyril Hrubis @ 2026-04-08 10:09 UTC (permalink / raw)
To: Pavithra; +Cc: ltp
Hi!
> Modified the test to detect large hugepage sizes (≥1GB) and adjust test
> sizes accordingly.
>
> Signed-off-by: Pavithra <pavrampu@linux.ibm.com>
> ---
> .../mem/hugetlb/hugeshmat/hugeshmat05.c | 45 +++++++++++++++----
> 1 file changed, 36 insertions(+), 9 deletions(-)
>
> diff --git a/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat05.c b/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat05.c
> index 3b2ae351c..870a61ec1 100644
> --- a/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat05.c
> +++ b/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat05.c
> @@ -37,9 +37,13 @@ void setup(void)
> {
> page_size = getpagesize();
> hpage_size = SAFE_READ_MEMINFO("Hugepagesize:") * 1024;
> +
> + tst_res(TINFO, "Page size: %ld bytes", page_size);
> + tst_res(TINFO, "Hugepage size: %ld bytes (%ld MB)",
> + hpage_size, hpage_size / (1024 * 1024));
> }
>
> -void shm_test(int size)
> +void shm_test(long size)
> {
> int shmid;
> char *shmaddr;
> @@ -56,7 +60,7 @@ void shm_test(int size)
> }
>
> shmaddr[0] = 1;
> - tst_res(TINFO, "allocated %d huge bytes", size);
> + tst_res(TINFO, "allocated %ld huge bytes", size);
>
> if (shmdt((const void *)shmaddr) != 0) {
> shmctl(shmid, IPC_RMID, NULL);
> @@ -69,16 +73,39 @@ void shm_test(int size)
> static void test_hugeshmat(void)
> {
> unsigned int i;
> + long tst_sizes[4];
> +
> + /*
> + * For large hugepage sizes (e.g., 1GB), we need to ensure
> + * test sizes are within reasonable bounds and properly aligned.
> + * The original test used N*hpage_size which could be 4GB for 1GB pages.
> + *
> + * We adjust the test to use N/2 multiplier for large hugepages
> + * to avoid excessive memory requirements while still testing the
> + * alignment boundary conditions with multiple pages.
> + */
> + if (hpage_size >= 1024 * 1024 * 1024) {
> + /* For 1GB or larger hugepages, use N/2 pages (2 pages for N=4) */
> + long multiplier = N / 2;
Two obvious comments for this if? Really?
> - const int tst_sizes[] = {
> - N * hpage_size - page_size,
> - N * hpage_size - page_size - 1,
> - hpage_size,
> - hpage_size + 1
> - };
> + tst_sizes[0] = multiplier * hpage_size - page_size;
> + tst_sizes[1] = multiplier * hpage_size - page_size - 1;
> + tst_sizes[2] = hpage_size;
> + tst_sizes[3] = hpage_size + 1;
> + tst_res(TINFO, "Using N/2=%ld hugepage test sizes for large hugepages", multiplier);
> + } else {
> + /* For smaller hugepages (2MB, 16MB, etc.), use original test */
> + tst_sizes[0] = N * hpage_size - page_size;
> + tst_sizes[1] = N * hpage_size - page_size - 1;
> + tst_sizes[2] = hpage_size;
> + tst_sizes[3] = hpage_size + 1;
> + tst_res(TINFO, "Using N=%d hugepage test sizes", N);
> + }
This is ugly. Why can't we set just the multiplier and initialize the
array only once?
> - for (i = 0; i < ARRAY_SIZE(tst_sizes); ++i)
> + for (i = 0; i < ARRAY_SIZE(tst_sizes); ++i) {
> + tst_res(TINFO, "Testing size: %ld bytes", tst_sizes[i]);
This patch adds way too much debugging output. The shm_test already
prints the size.
> shm_test(tst_sizes[i]);
> + }
>
> tst_res(TPASS, "No regression found.");
> }
> --
> 2.53.0
>
>
> --
> Mailing list info: https://lists.linux.it/listinfo/ltp
--
Cyril Hrubis
chrubis@suse.cz
--
Mailing list info: https://lists.linux.it/listinfo/ltp
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2026-04-08 10:09 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-06 15:50 [LTP] [PATCH] Fixed hugeshmat05 Test Failure with 1GB Hugepages Pavithra
2026-04-08 10:09 ` Cyril Hrubis
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox