* Re: [PATCH v2 1/2] fsx: support reads/writes from buffers backed by hugepages
2024-12-27 19:33 ` [PATCH v2 1/2] fsx: support reads/writes from buffers backed by hugepages Joanne Koong
@ 2024-12-30 5:00 ` Nirjhar Roy
2025-01-06 15:33 ` Brian Foster
2025-01-12 4:16 ` Zorro Lang
2 siblings, 0 replies; 8+ messages in thread
From: Nirjhar Roy @ 2024-12-30 5:00 UTC (permalink / raw)
To: Joanne Koong, fstests; +Cc: linux-fsdevel, bfoster, djwong, kernel-team
On 12/28/24 01:03, Joanne Koong wrote:
> Add support for reads/writes from buffers backed by hugepages.
> This can be enabled through the '-h' flag. This flag should only be used
> on systems where THP capabilities are enabled.
>
> This is motivated by a recent bug that was due to faulty handling of
> userspace buffers backed by hugepages. This patch is a mitigation
> against problems like this in the future.
>
> Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
> ---
> ltp/fsx.c | 108 ++++++++++++++++++++++++++++++++++++++++++++++++------
> 1 file changed, 97 insertions(+), 11 deletions(-)
>
> diff --git a/ltp/fsx.c b/ltp/fsx.c
> index 41933354..fb6a9b31 100644
> --- a/ltp/fsx.c
> +++ b/ltp/fsx.c
> @@ -190,6 +190,7 @@ int o_direct; /* -Z */
> int aio = 0;
> int uring = 0;
> int mark_nr = 0;
> +int hugepages = 0; /* -h flag */
>
> int page_size;
> int page_mask;
> @@ -2471,7 +2472,7 @@ void
> usage(void)
> {
> fprintf(stdout, "usage: %s",
> - "fsx [-dfknqxyzBEFHIJKLORWXZ0]\n\
> + "fsx [-dfhknqxyzBEFHIJKLORWXZ0]\n\
> [-b opnum] [-c Prob] [-g filldata] [-i logdev] [-j logid]\n\
> [-l flen] [-m start:end] [-o oplen] [-p progressinterval]\n\
> [-r readbdy] [-s style] [-t truncbdy] [-w writebdy]\n\
> @@ -2484,6 +2485,7 @@ usage(void)
> -e: pollute post-eof on size changes (default 0)\n\
> -f: flush and invalidate cache after I/O\n\
> -g X: write character X instead of random generated data\n\
> + -h hugepages: use buffers backed by hugepages for reads/writes\n\
> -i logdev: do integrity testing, logdev is the dm log writes device\n\
> -j logid: prefix debug log messsages with this id\n\
> -k: do not truncate existing file and use its size as upper bound on file size\n\
> @@ -2856,6 +2858,95 @@ keep_running(void)
> return numops-- != 0;
> }
>
> +static long
> +get_hugepage_size(void)
> +{
> + const char str[] = "Hugepagesize:";
> + size_t str_len = sizeof(str) - 1;
> + unsigned int hugepage_size = 0;
> + char buffer[64];
> + FILE *file;
> +
> + file = fopen("/proc/meminfo", "r");
> + if (!file) {
> + prterr("get_hugepage_size: fopen /proc/meminfo");
> + return -1;
> + }
> + while (fgets(buffer, sizeof(buffer), file)) {
> + if (strncmp(buffer, str, str_len) == 0) {
> + sscanf(buffer + str_len, "%u", &hugepage_size);
> + break;
> + }
> + }
> + fclose(file);
> + if (!hugepage_size) {
> + prterr("get_hugepage_size: failed to find "
> + "hugepage size in /proc/meminfo\n");
> + return -1;
> + }
> +
> + /* convert from KiB to bytes */
> + return hugepage_size << 10;
Thanks for the change.
> +}
> +
> +static void *
> +init_hugepages_buf(unsigned len, int hugepage_size, int alignment)
> +{
> + void *buf;
> + long buf_size = roundup(len, hugepage_size) + alignment;
> +
> + if (posix_memalign(&buf, hugepage_size, buf_size)) {
> + prterr("posix_memalign for buf");
> + return NULL;
> + }
> + memset(buf, '\0', buf_size);
> + if (madvise(buf, buf_size, MADV_COLLAPSE)) {
> + prterr("madvise collapse for buf");
> + free(buf);
> + return NULL;
> + }
> +
> + return buf;
> +}
> +
> +static void
> +init_buffers(void)
> +{
> + int i;
> +
> + original_buf = (char *) malloc(maxfilelen);
> + for (i = 0; i < maxfilelen; i++)
> + original_buf[i] = random() % 256;
> + if (hugepages) {
> + long hugepage_size = get_hugepage_size();
> + if (hugepage_size == -1) {
> + prterr("get_hugepage_size()");
> + exit(100);
> + }
> + good_buf = init_hugepages_buf(maxfilelen, hugepage_size, writebdy);
> + if (!good_buf) {
> + prterr("init_hugepages_buf failed for good_buf");
> + exit(101);
> + }
> +
> + temp_buf = init_hugepages_buf(maxoplen, hugepage_size, readbdy);
> + if (!temp_buf) {
> + prterr("init_hugepages_buf failed for temp_buf");
> + exit(101);
> + }
> + } else {
> + unsigned long good_buf_len = maxfilelen + writebdy;
> + unsigned long temp_buf_len = maxoplen + readbdy;
> +
> + good_buf = (char *) malloc(good_buf_len);
> + memset(good_buf, '\0', good_buf_len);
minor: maybe good_buf = (char *)calloc(1, good_buf_len); So we don't
need to use memset.
> + temp_buf = (char *) malloc(temp_buf_len);
> + memset(temp_buf, '\0', temp_buf_len);
> + }
> + good_buf = round_ptr_up(good_buf, writebdy, 0);
> + temp_buf = round_ptr_up(temp_buf, readbdy, 0);
> +}
> +
> static struct option longopts[] = {
> {"replay-ops", required_argument, 0, 256},
> {"record-ops", optional_argument, 0, 255},
> @@ -2883,7 +2974,7 @@ main(int argc, char **argv)
> setvbuf(stdout, (char *)0, _IOLBF, 0); /* line buffered stdout */
>
> while ((ch = getopt_long(argc, argv,
> - "0b:c:de:fg:i:j:kl:m:no:p:qr:s:t:uw:xyABD:EFJKHzCILN:OP:RS:UWXZ",
> + "0b:c:de:fg:hi:j:kl:m:no:p:qr:s:t:uw:xyABD:EFJKHzCILN:OP:RS:UWXZ",
> longopts, NULL)) != EOF)
> switch (ch) {
> case 'b':
> @@ -2916,6 +3007,9 @@ main(int argc, char **argv)
> case 'g':
> filldata = *optarg;
> break;
> + case 'h':
> + hugepages = 1;
> + break;
> case 'i':
> integrity = 1;
> logdev = strdup(optarg);
> @@ -3229,15 +3323,7 @@ main(int argc, char **argv)
> exit(95);
> }
> }
> - original_buf = (char *) malloc(maxfilelen);
> - for (i = 0; i < maxfilelen; i++)
> - original_buf[i] = random() % 256;
> - good_buf = (char *) malloc(maxfilelen + writebdy);
> - good_buf = round_ptr_up(good_buf, writebdy, 0);
> - memset(good_buf, '\0', maxfilelen);
> - temp_buf = (char *) malloc(maxoplen + readbdy);
> - temp_buf = round_ptr_up(temp_buf, readbdy, 0);
> - memset(temp_buf, '\0', maxoplen);
> + init_buffers();
> if (lite) { /* zero entire existing file */
> ssize_t written;
>
--
Nirjhar Roy
Linux Kernel Developer
IBM, Bangalore
^ permalink raw reply [flat|nested] 8+ messages in thread* Re: [PATCH v2 1/2] fsx: support reads/writes from buffers backed by hugepages
2024-12-27 19:33 ` [PATCH v2 1/2] fsx: support reads/writes from buffers backed by hugepages Joanne Koong
2024-12-30 5:00 ` Nirjhar Roy
@ 2025-01-06 15:33 ` Brian Foster
2025-01-12 4:16 ` Zorro Lang
2 siblings, 0 replies; 8+ messages in thread
From: Brian Foster @ 2025-01-06 15:33 UTC (permalink / raw)
To: Joanne Koong; +Cc: fstests, linux-fsdevel, djwong, nirjhar, kernel-team
On Fri, Dec 27, 2024 at 11:33:10AM -0800, Joanne Koong wrote:
> Add support for reads/writes from buffers backed by hugepages.
> This can be enabled through the '-h' flag. This flag should only be used
> on systems where THP capabilities are enabled.
>
> This is motivated by a recent bug that was due to faulty handling of
> userspace buffers backed by hugepages. This patch is a mitigation
> against problems like this in the future.
>
> Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
> ---
> ltp/fsx.c | 108 ++++++++++++++++++++++++++++++++++++++++++++++++------
> 1 file changed, 97 insertions(+), 11 deletions(-)
Thanks for the buffer init code cleanup. This looks much nicer to me
now. Modulo comments from others:
Reviewed-by: Brian Foster <bfoster@redhat.com>
>
> diff --git a/ltp/fsx.c b/ltp/fsx.c
> index 41933354..fb6a9b31 100644
> --- a/ltp/fsx.c
> +++ b/ltp/fsx.c
> @@ -190,6 +190,7 @@ int o_direct; /* -Z */
> int aio = 0;
> int uring = 0;
> int mark_nr = 0;
> +int hugepages = 0; /* -h flag */
>
> int page_size;
> int page_mask;
> @@ -2471,7 +2472,7 @@ void
> usage(void)
> {
> fprintf(stdout, "usage: %s",
> - "fsx [-dfknqxyzBEFHIJKLORWXZ0]\n\
> + "fsx [-dfhknqxyzBEFHIJKLORWXZ0]\n\
> [-b opnum] [-c Prob] [-g filldata] [-i logdev] [-j logid]\n\
> [-l flen] [-m start:end] [-o oplen] [-p progressinterval]\n\
> [-r readbdy] [-s style] [-t truncbdy] [-w writebdy]\n\
> @@ -2484,6 +2485,7 @@ usage(void)
> -e: pollute post-eof on size changes (default 0)\n\
> -f: flush and invalidate cache after I/O\n\
> -g X: write character X instead of random generated data\n\
> + -h hugepages: use buffers backed by hugepages for reads/writes\n\
> -i logdev: do integrity testing, logdev is the dm log writes device\n\
> -j logid: prefix debug log messsages with this id\n\
> -k: do not truncate existing file and use its size as upper bound on file size\n\
> @@ -2856,6 +2858,95 @@ keep_running(void)
> return numops-- != 0;
> }
>
> +static long
> +get_hugepage_size(void)
> +{
> + const char str[] = "Hugepagesize:";
> + size_t str_len = sizeof(str) - 1;
> + unsigned int hugepage_size = 0;
> + char buffer[64];
> + FILE *file;
> +
> + file = fopen("/proc/meminfo", "r");
> + if (!file) {
> + prterr("get_hugepage_size: fopen /proc/meminfo");
> + return -1;
> + }
> + while (fgets(buffer, sizeof(buffer), file)) {
> + if (strncmp(buffer, str, str_len) == 0) {
> + sscanf(buffer + str_len, "%u", &hugepage_size);
> + break;
> + }
> + }
> + fclose(file);
> + if (!hugepage_size) {
> + prterr("get_hugepage_size: failed to find "
> + "hugepage size in /proc/meminfo\n");
> + return -1;
> + }
> +
> + /* convert from KiB to bytes */
> + return hugepage_size << 10;
> +}
> +
> +static void *
> +init_hugepages_buf(unsigned len, int hugepage_size, int alignment)
> +{
> + void *buf;
> + long buf_size = roundup(len, hugepage_size) + alignment;
> +
> + if (posix_memalign(&buf, hugepage_size, buf_size)) {
> + prterr("posix_memalign for buf");
> + return NULL;
> + }
> + memset(buf, '\0', buf_size);
> + if (madvise(buf, buf_size, MADV_COLLAPSE)) {
> + prterr("madvise collapse for buf");
> + free(buf);
> + return NULL;
> + }
> +
> + return buf;
> +}
> +
> +static void
> +init_buffers(void)
> +{
> + int i;
> +
> + original_buf = (char *) malloc(maxfilelen);
> + for (i = 0; i < maxfilelen; i++)
> + original_buf[i] = random() % 256;
> + if (hugepages) {
> + long hugepage_size = get_hugepage_size();
> + if (hugepage_size == -1) {
> + prterr("get_hugepage_size()");
> + exit(100);
> + }
> + good_buf = init_hugepages_buf(maxfilelen, hugepage_size, writebdy);
> + if (!good_buf) {
> + prterr("init_hugepages_buf failed for good_buf");
> + exit(101);
> + }
> +
> + temp_buf = init_hugepages_buf(maxoplen, hugepage_size, readbdy);
> + if (!temp_buf) {
> + prterr("init_hugepages_buf failed for temp_buf");
> + exit(101);
> + }
> + } else {
> + unsigned long good_buf_len = maxfilelen + writebdy;
> + unsigned long temp_buf_len = maxoplen + readbdy;
> +
> + good_buf = (char *) malloc(good_buf_len);
> + memset(good_buf, '\0', good_buf_len);
> + temp_buf = (char *) malloc(temp_buf_len);
> + memset(temp_buf, '\0', temp_buf_len);
> + }
> + good_buf = round_ptr_up(good_buf, writebdy, 0);
> + temp_buf = round_ptr_up(temp_buf, readbdy, 0);
> +}
> +
> static struct option longopts[] = {
> {"replay-ops", required_argument, 0, 256},
> {"record-ops", optional_argument, 0, 255},
> @@ -2883,7 +2974,7 @@ main(int argc, char **argv)
> setvbuf(stdout, (char *)0, _IOLBF, 0); /* line buffered stdout */
>
> while ((ch = getopt_long(argc, argv,
> - "0b:c:de:fg:i:j:kl:m:no:p:qr:s:t:uw:xyABD:EFJKHzCILN:OP:RS:UWXZ",
> + "0b:c:de:fg:hi:j:kl:m:no:p:qr:s:t:uw:xyABD:EFJKHzCILN:OP:RS:UWXZ",
> longopts, NULL)) != EOF)
> switch (ch) {
> case 'b':
> @@ -2916,6 +3007,9 @@ main(int argc, char **argv)
> case 'g':
> filldata = *optarg;
> break;
> + case 'h':
> + hugepages = 1;
> + break;
> case 'i':
> integrity = 1;
> logdev = strdup(optarg);
> @@ -3229,15 +3323,7 @@ main(int argc, char **argv)
> exit(95);
> }
> }
> - original_buf = (char *) malloc(maxfilelen);
> - for (i = 0; i < maxfilelen; i++)
> - original_buf[i] = random() % 256;
> - good_buf = (char *) malloc(maxfilelen + writebdy);
> - good_buf = round_ptr_up(good_buf, writebdy, 0);
> - memset(good_buf, '\0', maxfilelen);
> - temp_buf = (char *) malloc(maxoplen + readbdy);
> - temp_buf = round_ptr_up(temp_buf, readbdy, 0);
> - memset(temp_buf, '\0', maxoplen);
> + init_buffers();
> if (lite) { /* zero entire existing file */
> ssize_t written;
>
> --
> 2.47.1
>
^ permalink raw reply [flat|nested] 8+ messages in thread* Re: [PATCH v2 1/2] fsx: support reads/writes from buffers backed by hugepages
2024-12-27 19:33 ` [PATCH v2 1/2] fsx: support reads/writes from buffers backed by hugepages Joanne Koong
2024-12-30 5:00 ` Nirjhar Roy
2025-01-06 15:33 ` Brian Foster
@ 2025-01-12 4:16 ` Zorro Lang
2025-01-15 17:53 ` Joanne Koong
2 siblings, 1 reply; 8+ messages in thread
From: Zorro Lang @ 2025-01-12 4:16 UTC (permalink / raw)
To: Joanne Koong
Cc: fstests, linux-fsdevel, bfoster, djwong, nirjhar, kernel-team
On Fri, Dec 27, 2024 at 11:33:10AM -0800, Joanne Koong wrote:
> Add support for reads/writes from buffers backed by hugepages.
> This can be enabled through the '-h' flag. This flag should only be used
> on systems where THP capabilities are enabled.
>
> This is motivated by a recent bug that was due to faulty handling of
> userspace buffers backed by hugepages. This patch is a mitigation
> against problems like this in the future.
>
> Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
> ---
> ltp/fsx.c | 108 ++++++++++++++++++++++++++++++++++++++++++++++++------
> 1 file changed, 97 insertions(+), 11 deletions(-)
>
[snip]
> +static void *
> +init_hugepages_buf(unsigned len, int hugepage_size, int alignment)
> +{
> + void *buf;
> + long buf_size = roundup(len, hugepage_size) + alignment;
> +
> + if (posix_memalign(&buf, hugepage_size, buf_size)) {
> + prterr("posix_memalign for buf");
> + return NULL;
> + }
> + memset(buf, '\0', buf_size);
> + if (madvise(buf, buf_size, MADV_COLLAPSE)) {
Hi Joanne,
Sorry I have to drop this patchset from the "upcoming" release v2025.01.12. Due to
it cause a regression build error on older system, e.g. RHEL-9:
[CC] fsx
fsx.c: In function 'init_hugepages_buf':
fsx.c:2935:36: error: 'MADV_COLLAPSE' undeclared (first use in this function); did you mean 'MADV_COLD'?
2935 | if (madvise(buf, buf_size, MADV_COLLAPSE)) {
| ^~~~~~~~~~~~~
| MADV_COLD
fsx.c:2935:36: note: each undeclared identifier is reported only once for each function it appears in
gmake[4]: *** [Makefile:51: fsx] Error 1
gmake[4]: *** Waiting for unfinished jobs....
gmake[3]: *** [include/buildrules:30: ltp] Error 2
It might cause xfstests totally can't be used on downstream systems, so it can't
catch up the release of this weekend. Sorry about that, let's try to have it
in next release :)
Thanks,
Zorro
> + prterr("madvise collapse for buf");
> + free(buf);
> + return NULL;
> + }
> +
> + return buf;
> +}
> +
> +static void
> +init_buffers(void)
> +{
> + int i;
> +
> + original_buf = (char *) malloc(maxfilelen);
> + for (i = 0; i < maxfilelen; i++)
> + original_buf[i] = random() % 256;
> + if (hugepages) {
> + long hugepage_size = get_hugepage_size();
> + if (hugepage_size == -1) {
> + prterr("get_hugepage_size()");
> + exit(100);
> + }
> + good_buf = init_hugepages_buf(maxfilelen, hugepage_size, writebdy);
> + if (!good_buf) {
> + prterr("init_hugepages_buf failed for good_buf");
> + exit(101);
> + }
> +
> + temp_buf = init_hugepages_buf(maxoplen, hugepage_size, readbdy);
> + if (!temp_buf) {
> + prterr("init_hugepages_buf failed for temp_buf");
> + exit(101);
> + }
> + } else {
> + unsigned long good_buf_len = maxfilelen + writebdy;
> + unsigned long temp_buf_len = maxoplen + readbdy;
> +
> + good_buf = (char *) malloc(good_buf_len);
> + memset(good_buf, '\0', good_buf_len);
> + temp_buf = (char *) malloc(temp_buf_len);
> + memset(temp_buf, '\0', temp_buf_len);
> + }
> + good_buf = round_ptr_up(good_buf, writebdy, 0);
> + temp_buf = round_ptr_up(temp_buf, readbdy, 0);
> +}
> +
> static struct option longopts[] = {
> {"replay-ops", required_argument, 0, 256},
> {"record-ops", optional_argument, 0, 255},
> @@ -2883,7 +2974,7 @@ main(int argc, char **argv)
> setvbuf(stdout, (char *)0, _IOLBF, 0); /* line buffered stdout */
>
> while ((ch = getopt_long(argc, argv,
> - "0b:c:de:fg:i:j:kl:m:no:p:qr:s:t:uw:xyABD:EFJKHzCILN:OP:RS:UWXZ",
> + "0b:c:de:fg:hi:j:kl:m:no:p:qr:s:t:uw:xyABD:EFJKHzCILN:OP:RS:UWXZ",
> longopts, NULL)) != EOF)
> switch (ch) {
> case 'b':
> @@ -2916,6 +3007,9 @@ main(int argc, char **argv)
> case 'g':
> filldata = *optarg;
> break;
> + case 'h':
> + hugepages = 1;
> + break;
> case 'i':
> integrity = 1;
> logdev = strdup(optarg);
> @@ -3229,15 +3323,7 @@ main(int argc, char **argv)
> exit(95);
> }
> }
> - original_buf = (char *) malloc(maxfilelen);
> - for (i = 0; i < maxfilelen; i++)
> - original_buf[i] = random() % 256;
> - good_buf = (char *) malloc(maxfilelen + writebdy);
> - good_buf = round_ptr_up(good_buf, writebdy, 0);
> - memset(good_buf, '\0', maxfilelen);
> - temp_buf = (char *) malloc(maxoplen + readbdy);
> - temp_buf = round_ptr_up(temp_buf, readbdy, 0);
> - memset(temp_buf, '\0', maxoplen);
> + init_buffers();
> if (lite) { /* zero entire existing file */
> ssize_t written;
>
> --
> 2.47.1
>
>
^ permalink raw reply [flat|nested] 8+ messages in thread* Re: [PATCH v2 1/2] fsx: support reads/writes from buffers backed by hugepages
2025-01-12 4:16 ` Zorro Lang
@ 2025-01-15 17:53 ` Joanne Koong
0 siblings, 0 replies; 8+ messages in thread
From: Joanne Koong @ 2025-01-15 17:53 UTC (permalink / raw)
To: Zorro Lang; +Cc: fstests, linux-fsdevel, bfoster, djwong, nirjhar, kernel-team
On Sat, Jan 11, 2025 at 8:16 PM Zorro Lang <zlang@redhat.com> wrote:
>
> On Fri, Dec 27, 2024 at 11:33:10AM -0800, Joanne Koong wrote:
> > Add support for reads/writes from buffers backed by hugepages.
> > This can be enabled through the '-h' flag. This flag should only be used
> > on systems where THP capabilities are enabled.
> >
> > This is motivated by a recent bug that was due to faulty handling of
> > userspace buffers backed by hugepages. This patch is a mitigation
> > against problems like this in the future.
> >
> > Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
> > ---
> > ltp/fsx.c | 108 ++++++++++++++++++++++++++++++++++++++++++++++++------
> > 1 file changed, 97 insertions(+), 11 deletions(-)
> >
>
> [snip]
>
> > +static void *
> > +init_hugepages_buf(unsigned len, int hugepage_size, int alignment)
> > +{
> > + void *buf;
> > + long buf_size = roundup(len, hugepage_size) + alignment;
> > +
> > + if (posix_memalign(&buf, hugepage_size, buf_size)) {
> > + prterr("posix_memalign for buf");
> > + return NULL;
> > + }
> > + memset(buf, '\0', buf_size);
> > + if (madvise(buf, buf_size, MADV_COLLAPSE)) {
>
> Hi Joanne,
>
> Sorry I have to drop this patchset from the "upcoming" release v2025.01.12. Due to
> it cause a regression build error on older system, e.g. RHEL-9:
>
> [CC] fsx
> fsx.c: In function 'init_hugepages_buf':
> fsx.c:2935:36: error: 'MADV_COLLAPSE' undeclared (first use in this function); did you mean 'MADV_COLD'?
> 2935 | if (madvise(buf, buf_size, MADV_COLLAPSE)) {
> | ^~~~~~~~~~~~~
> | MADV_COLD
> fsx.c:2935:36: note: each undeclared identifier is reported only once for each function it appears in
> gmake[4]: *** [Makefile:51: fsx] Error 1
> gmake[4]: *** Waiting for unfinished jobs....
> gmake[3]: *** [include/buildrules:30: ltp] Error 2
>
> It might cause xfstests totally can't be used on downstream systems, so it can't
> catch up the release of this weekend. Sorry about that, let's try to have it
> in next release :)
Hi Zorro,
Thanks for the update. I'll submit a v3 of this patch that gates this
function behind #ifdef MADV_COLLAPSE, and hopefully that should fix
this issue.
Thanks,
Joanne
>
> Thanks,
> Zorro
>
>
> > + prterr("madvise collapse for buf");
> > + free(buf);
> > + return NULL;
> > + }
> > +
> > + return buf;
> > +}
> > +
> > +static void
> > +init_buffers(void)
> > +{
> > + int i;
> > +
> > + original_buf = (char *) malloc(maxfilelen);
> > + for (i = 0; i < maxfilelen; i++)
> > + original_buf[i] = random() % 256;
> > + if (hugepages) {
> > + long hugepage_size = get_hugepage_size();
> > + if (hugepage_size == -1) {
> > + prterr("get_hugepage_size()");
> > + exit(100);
> > + }
> > + good_buf = init_hugepages_buf(maxfilelen, hugepage_size, writebdy);
> > + if (!good_buf) {
> > + prterr("init_hugepages_buf failed for good_buf");
> > + exit(101);
> > + }
> > +
> > + temp_buf = init_hugepages_buf(maxoplen, hugepage_size, readbdy);
> > + if (!temp_buf) {
> > + prterr("init_hugepages_buf failed for temp_buf");
> > + exit(101);
> > + }
> > + } else {
> > + unsigned long good_buf_len = maxfilelen + writebdy;
> > + unsigned long temp_buf_len = maxoplen + readbdy;
> > +
> > + good_buf = (char *) malloc(good_buf_len);
> > + memset(good_buf, '\0', good_buf_len);
> > + temp_buf = (char *) malloc(temp_buf_len);
> > + memset(temp_buf, '\0', temp_buf_len);
> > + }
> > + good_buf = round_ptr_up(good_buf, writebdy, 0);
> > + temp_buf = round_ptr_up(temp_buf, readbdy, 0);
> > +}
> > +
> > static struct option longopts[] = {
> > {"replay-ops", required_argument, 0, 256},
> > {"record-ops", optional_argument, 0, 255},
> > @@ -2883,7 +2974,7 @@ main(int argc, char **argv)
> > setvbuf(stdout, (char *)0, _IOLBF, 0); /* line buffered stdout */
> >
> > while ((ch = getopt_long(argc, argv,
> > - "0b:c:de:fg:i:j:kl:m:no:p:qr:s:t:uw:xyABD:EFJKHzCILN:OP:RS:UWXZ",
> > + "0b:c:de:fg:hi:j:kl:m:no:p:qr:s:t:uw:xyABD:EFJKHzCILN:OP:RS:UWXZ",
> > longopts, NULL)) != EOF)
> > switch (ch) {
> > case 'b':
> > @@ -2916,6 +3007,9 @@ main(int argc, char **argv)
> > case 'g':
> > filldata = *optarg;
> > break;
> > + case 'h':
> > + hugepages = 1;
> > + break;
> > case 'i':
> > integrity = 1;
> > logdev = strdup(optarg);
> > @@ -3229,15 +3323,7 @@ main(int argc, char **argv)
> > exit(95);
> > }
> > }
> > - original_buf = (char *) malloc(maxfilelen);
> > - for (i = 0; i < maxfilelen; i++)
> > - original_buf[i] = random() % 256;
> > - good_buf = (char *) malloc(maxfilelen + writebdy);
> > - good_buf = round_ptr_up(good_buf, writebdy, 0);
> > - memset(good_buf, '\0', maxfilelen);
> > - temp_buf = (char *) malloc(maxoplen + readbdy);
> > - temp_buf = round_ptr_up(temp_buf, readbdy, 0);
> > - memset(temp_buf, '\0', maxoplen);
> > + init_buffers();
> > if (lite) { /* zero entire existing file */
> > ssize_t written;
> >
> > --
> > 2.47.1
> >
> >
>
^ permalink raw reply [flat|nested] 8+ messages in thread