From: Alexander Potapenko <glider@google.com>
To: glider@google.com, catalin.marinas@arm.com, will@kernel.org,
pcc@google.com, andreyknvl@gmail.com,
andriy.shevchenko@linux.intel.com, linux@rasmusvillemoes.dk,
yury.norov@gmail.com
Cc: linux-kernel@vger.kernel.org,
linux-arm-kernel@lists.infradead.org, eugenis@google.com,
syednwaris@gmail.com, william.gray@linaro.org
Subject: [PATCH v4 2/5] lib/test_bitmap: add tests for bitmap_{set,get}_value()
Date: Thu, 20 Jul 2023 19:39:53 +0200 [thread overview]
Message-ID: <20230720173956.3674987-3-glider@google.com> (raw)
In-Reply-To: <20230720173956.3674987-1-glider@google.com>
Add basic tests ensuring that values can be added at arbitrary positions
of the bitmap, including those spanning into the adjacent unsigned
longs.
Signed-off-by: Alexander Potapenko <glider@google.com>
Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
---
This patch was previously called
"lib/test_bitmap: add tests for bitmap_{set,get}_value_unaligned"
(https://lore.kernel.org/lkml/20230713125706.2884502-3-glider@google.com/)
v4:
- Address comments by Andy Shevchenko: added Reviewed-by: and a link to
the previous discussion
- Address comments by Yury Norov:
- expand the bitmap to catch more corner cases
- add code testing that bitmap_set_value() does not touch adjacent
bits
- add code testing the nbits==0 case
- rename bitmap_{get,set}_value() to bitmap_{read,write}()
v3:
- switch to using bitmap_{set,get}_value()
- change the expected bit pattern in test_set_get_value(),
as the test was incorrectly assuming 0 is the LSB.
---
lib/test_bitmap.c | 81 +++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 81 insertions(+)
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 187f5b2db4cf1..601000c7799df 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -71,6 +71,17 @@ __check_eq_uint(const char *srcfile, unsigned int line,
return true;
}
+static bool __init
+__check_eq_ulong(const char *srcfile, unsigned int line,
+ const unsigned long exp_ulong, unsigned long x)
+{
+ if (exp_ulong != x) {
+ pr_err("[%s:%u] expected %lu, got %lu\n",
+ srcfile, line, exp_ulong, x);
+ return false;
+ }
+ return true;
+}
static bool __init
__check_eq_bitmap(const char *srcfile, unsigned int line,
@@ -186,6 +197,7 @@ __check_eq_str(const char *srcfile, unsigned int line,
})
#define expect_eq_uint(...) __expect_eq(uint, ##__VA_ARGS__)
+#define expect_eq_ulong(...) __expect_eq(ulong, ##__VA_ARGS__)
#define expect_eq_bitmap(...) __expect_eq(bitmap, ##__VA_ARGS__)
#define expect_eq_pbl(...) __expect_eq(pbl, ##__VA_ARGS__)
#define expect_eq_u32_array(...) __expect_eq(u32_array, ##__VA_ARGS__)
@@ -1222,6 +1234,73 @@ static void __init test_bitmap_const_eval(void)
BUILD_BUG_ON(~var != ~BIT(25));
}
+/*
+ * Test bitmap should be big enough to include the cases when start is not in
+ * the first word, and start+nbits lands in the following word.
+ */
+#define TEST_BIT_LEN (BITS_PER_LONG * 3)
+#define TEST_BYTE_LEN (BITS_TO_LONGS(TEST_BIT_LEN) * sizeof(unsigned long))
+static void __init test_set_get_value(void)
+{
+ DECLARE_BITMAP(bitmap, TEST_BIT_LEN);
+ DECLARE_BITMAP(exp_bitmap, TEST_BIT_LEN);
+ /* Prevent constant folding. */
+ volatile unsigned long zero_bits = 0;
+ unsigned long val, bit;
+ int i;
+
+ /* Setting/getting zero bytes should not crash the kernel. */
+ bitmap_write(NULL, 0, 0, zero_bits);
+ val = bitmap_read(NULL, 0, zero_bits);
+ expect_eq_ulong(0, val);
+
+ /*
+ * Ensure that bitmap_read() reads the same value that was previously
+ * written, and two consequent values are correctly merged.
+ * The resulting bit pattern is asymmetric to rule out possible issues
+ * with bit numeration order.
+ */
+ for (i = 0; i < TEST_BIT_LEN - 7; i++) {
+ bitmap_zero(bitmap, TEST_BIT_LEN);
+ bitmap_write(bitmap, 0b10101UL, i, 5);
+ val = bitmap_read(bitmap, i, 5);
+ expect_eq_ulong(0b10101UL, val);
+ bitmap_write(bitmap, 0b101UL, i + 5, 3);
+ val = bitmap_read(bitmap, i + 5, 3);
+ expect_eq_ulong(0b101UL, val);
+ val = bitmap_read(bitmap, i, 8);
+ expect_eq_ulong(0b10110101UL, val);
+ }
+
+ /*
+ * Check that setting a single bit does not accidentally touch the
+ * adjacent bits.
+ */
+ for (i = 0; i < TEST_BIT_LEN; i++) {
+ /*
+ * A 0b10101010 pattern to catch both 0s replaced to 1s and vice
+ * versa.
+ */
+ memset(bitmap, 0xaa, TEST_BYTE_LEN);
+ memset(exp_bitmap, 0xaa, TEST_BYTE_LEN);
+ for (bit = 0; bit <= 1; bit++) {
+ bitmap_write(bitmap, bit, i, 1);
+ __assign_bit(i, exp_bitmap, bit);
+ expect_eq_bitmap(exp_bitmap, bitmap, TEST_BIT_LEN);
+ }
+ }
+
+ /* Ensure setting 0 bits does not change anything. */
+ memset(bitmap, 0xaa, TEST_BYTE_LEN);
+ memset(exp_bitmap, 0xaa, TEST_BYTE_LEN);
+ for (i = 0; i < TEST_BIT_LEN; i++) {
+ bitmap_write(bitmap, ~0UL, i, 0);
+ expect_eq_bitmap(exp_bitmap, bitmap, TEST_BIT_LEN);
+ }
+}
+#undef TEST_BYTE_LEN
+#undef TEST_BIT_LEN
+
static void __init selftest(void)
{
test_zero_clear();
@@ -1249,6 +1328,8 @@ static void __init selftest(void)
test_for_each_clear_bitrange_from();
test_for_each_set_clump8();
test_for_each_set_bit_wrap();
+
+ test_set_get_value();
}
KSTM_MODULE_LOADERS(test_bitmap);
--
2.41.0.487.g6d72f3e995-goog
next prev parent reply other threads:[~2023-07-20 17:40 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-07-20 17:39 [PATCH v4 0/5] Implement MTE tag compression for swapped pages Alexander Potapenko
2023-07-20 17:39 ` [PATCH v4 1/5] lib/bitmap: add bitmap_{set,get}_value() Alexander Potapenko
2023-07-23 1:57 ` Yury Norov
2023-07-23 15:38 ` Yury Norov
2023-09-22 7:48 ` Alexander Potapenko
2023-07-24 8:36 ` Andy Shevchenko
2023-07-25 5:04 ` Yury Norov
2023-07-25 9:00 ` Andy Shevchenko
2023-07-26 8:08 ` Alexander Potapenko
2023-07-27 0:14 ` Yury Norov
2023-08-04 16:07 ` Alexander Potapenko
2023-08-04 19:55 ` Yury Norov
2023-08-04 20:05 ` Andy Shevchenko
2023-09-22 7:49 ` Alexander Potapenko
2023-09-22 7:47 ` Alexander Potapenko
2023-07-20 17:39 ` Alexander Potapenko [this message]
2023-07-23 2:29 ` [PATCH v4 2/5] lib/test_bitmap: add tests for bitmap_{set,get}_value() Yury Norov
2023-09-22 7:57 ` Alexander Potapenko
2023-09-22 13:28 ` Yury Norov
2023-09-27 12:33 ` Alexander Potapenko
2023-07-20 17:39 ` [PATCH v4 3/5] arm64: mte: implement CONFIG_ARM64_MTE_COMP Alexander Potapenko
2023-07-21 11:22 ` Andy Shevchenko
2023-09-22 8:03 ` Alexander Potapenko
2023-08-18 17:57 ` Catalin Marinas
2023-09-22 8:04 ` Alexander Potapenko
2023-07-20 17:39 ` [PATCH v4 4/5] arm64: mte: add a test for MTE tags compression Alexander Potapenko
2023-07-21 11:25 ` Andy Shevchenko
2023-09-22 8:05 ` Alexander Potapenko
2023-07-20 17:39 ` [PATCH v4 5/5] arm64: mte: add compression support to mteswap.c Alexander Potapenko
2023-08-18 18:18 ` Catalin Marinas
2023-09-20 13:26 ` Alexander Potapenko
2023-09-20 16:18 ` Alexander Potapenko
2023-09-20 14:22 ` Alexander Potapenko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230720173956.3674987-3-glider@google.com \
--to=glider@google.com \
--cc=andreyknvl@gmail.com \
--cc=andriy.shevchenko@linux.intel.com \
--cc=catalin.marinas@arm.com \
--cc=eugenis@google.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux@rasmusvillemoes.dk \
--cc=pcc@google.com \
--cc=syednwaris@gmail.com \
--cc=will@kernel.org \
--cc=william.gray@linaro.org \
--cc=yury.norov@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox