* [kvm-unit-tests PATCH] s390x: Test effect of storage keys on some instructions
@ 2022-02-24 11:09 Janis Schoetterl-Glausch
2022-02-24 14:30 ` Claudio Imbrenda
0 siblings, 1 reply; 3+ messages in thread
From: Janis Schoetterl-Glausch @ 2022-02-24 11:09 UTC (permalink / raw)
To: Thomas Huth, Janosch Frank, Claudio Imbrenda
Cc: Janis Schoetterl-Glausch, David Hildenbrand, kvm, linux-s390
Some instructions are emulated by KVM. Test that KVM correctly emulates
storage key checking for two of those instructions (STORE CPU ADDRESS,
SET PREFIX).
Test success and error conditions, including coverage of storage and
fetch protection override.
Also add test for TEST PROTECTION, even if that instruction will not be
emulated by KVM under normal conditions.
Signed-off-by: Janis Schoetterl-Glausch <scgl@linux.ibm.com>
---
*entry_0_p = entry_pagebuf;
I'm wondering if we need a barrier here, or would if set_prefix_key_1
wasn't made up of an asm volatile. But the mmu code seems to not have a
barrier in the equivalent code, so maybe it's never needed.
set_prefix_key_1(0);
lib/s390x/asm/arch_def.h | 20 ++---
s390x/skey.c | 169 +++++++++++++++++++++++++++++++++++++++
2 files changed, 180 insertions(+), 9 deletions(-)
diff --git a/lib/s390x/asm/arch_def.h b/lib/s390x/asm/arch_def.h
index 40626d72..e443a9cd 100644
--- a/lib/s390x/asm/arch_def.h
+++ b/lib/s390x/asm/arch_def.h
@@ -55,15 +55,17 @@ struct psw {
#define PSW_MASK_BA 0x0000000080000000UL
#define PSW_MASK_64 (PSW_MASK_BA | PSW_MASK_EA)
-#define CTL0_LOW_ADDR_PROT (63 - 35)
-#define CTL0_EDAT (63 - 40)
-#define CTL0_IEP (63 - 43)
-#define CTL0_AFP (63 - 45)
-#define CTL0_VECTOR (63 - 46)
-#define CTL0_EMERGENCY_SIGNAL (63 - 49)
-#define CTL0_EXTERNAL_CALL (63 - 50)
-#define CTL0_CLOCK_COMPARATOR (63 - 52)
-#define CTL0_SERVICE_SIGNAL (63 - 54)
+#define CTL0_LOW_ADDR_PROT (63 - 35)
+#define CTL0_EDAT (63 - 40)
+#define CTL0_FETCH_PROTECTION_OVERRIDE (63 - 38)
+#define CTL0_STORAGE_PROTECTION_OVERRIDE (63 - 39)
+#define CTL0_IEP (63 - 43)
+#define CTL0_AFP (63 - 45)
+#define CTL0_VECTOR (63 - 46)
+#define CTL0_EMERGENCY_SIGNAL (63 - 49)
+#define CTL0_EXTERNAL_CALL (63 - 50)
+#define CTL0_CLOCK_COMPARATOR (63 - 52)
+#define CTL0_SERVICE_SIGNAL (63 - 54)
#define CR0_EXTM_MASK 0x0000000000006200UL /* Combined external masks */
#define CTL2_GUARDED_STORAGE (63 - 59)
diff --git a/s390x/skey.c b/s390x/skey.c
index 58a55436..6ae2d026 100644
--- a/s390x/skey.c
+++ b/s390x/skey.c
@@ -10,7 +10,10 @@
#include <libcflat.h>
#include <asm/asm-offsets.h>
#include <asm/interrupt.h>
+#include <vmalloc.h>
+#include <mmu.h>
#include <asm/page.h>
+#include <asm/pgtable.h>
#include <asm/facility.h>
#include <asm/mem.h>
@@ -147,6 +150,167 @@ static void test_invalid_address(void)
report_prefix_pop();
}
+static void test_test_protection(void)
+{
+ unsigned long addr = (unsigned long)pagebuf;
+
+ report_prefix_push("TPROT");
+ set_storage_key(pagebuf, 0x10, 0);
+ report(tprot(addr, 0) == 0, "access key 0 -> no protection");
+ report(tprot(addr, 1) == 0, "access key matches -> no protection");
+ report(tprot(addr, 2) == 1, "access key mismatches, no fetch protection -> store protection");
+ set_storage_key(pagebuf, 0x18, 0);
+ report(tprot(addr, 2) == 2, "access key mismatches, fetch protection -> fetch & store protection");
+ report_prefix_pop();
+}
+
+static void store_cpu_address_key_1(uint16_t *out)
+{
+ asm volatile (
+ "spka 0x10(0)\n\t"
+ "stap %0\n\t"
+ "spka 0(0)\n"
+ : "=Q" (*out)
+ );
+}
+
+static void test_store_cpu_address(void)
+{
+ uint16_t *out = (uint16_t *)pagebuf;
+ uint16_t cpu_addr;
+
+ asm ("stap %0" : "=Q" (cpu_addr));
+
+ report_prefix_push("STORE CPU ADDRESS, zero key");
+ set_storage_key(pagebuf, 0x20, 0);
+ *out = 0xbeef;
+ asm ("stap %0" : "=Q" (*out));
+ report(*out == cpu_addr, "store occurred");
+ report_prefix_pop();
+
+ report_prefix_push("STORE CPU ADDRESS, matching key");
+ set_storage_key(pagebuf, 0x10, 0);
+ *out = 0xbeef;
+ store_cpu_address_key_1(out);
+ report(*out == cpu_addr, "store occurred");
+ report_prefix_pop();
+
+ report_prefix_push("STORE CPU ADDRESS, mismatching key");
+ set_storage_key(pagebuf, 0x20, 0);
+ expect_pgm_int();
+ store_cpu_address_key_1(out);
+ check_pgm_int_code(PGM_INT_CODE_PROTECTION);
+ report_prefix_pop();
+
+ ctl_set_bit(0, CTL0_STORAGE_PROTECTION_OVERRIDE);
+
+ report_prefix_push("STORE CPU ADDRESS, storage-protection override, invalid key");
+ set_storage_key(pagebuf, 0x20, 0);
+ expect_pgm_int();
+ store_cpu_address_key_1(out);
+ check_pgm_int_code(PGM_INT_CODE_PROTECTION);
+ report_prefix_pop();
+
+ report_prefix_push("STORE CPU ADDRESS, storage-protection override, override key");
+ set_storage_key(pagebuf, 0x90, 0);
+ *out = 0xbeef;
+ store_cpu_address_key_1(out);
+ report(*out == cpu_addr, "override occurred");
+ report_prefix_pop();
+
+ ctl_clear_bit(0, CTL0_STORAGE_PROTECTION_OVERRIDE);
+}
+
+static void set_prefix_key_1(uint32_t *out)
+{
+ asm volatile (
+ "spka 0x10(0)\n\t"
+ "spx %0\n\t"
+ "spka 0(0)\n"
+ : "=Q" (*out)
+ );
+}
+
+/*
+ * We remapped page 0, making the lowcore inaccessible, which breaks the normal
+ * hanlder and breaks skipping the faulting instruction.
+ * Just disable dynamic address translation to make things work.
+ */
+static void dat_fixup_pgm_int(void)
+{
+ uint64_t psw_mask = extract_psw_mask();
+
+ psw_mask &= ~PSW_MASK_DAT;
+ load_psw_mask(psw_mask);
+}
+
+static void test_set_prefix(void)
+{
+ uint32_t *out = (uint32_t *)pagebuf;
+ pgd_t *root;
+ pte_t *entry_0_p;
+ pte_t entry_lowcore, entry_pagebuf;
+
+ root = (pgd_t *)(stctg(1) & PAGE_MASK);
+ entry_0_p = get_dat_entry(root, 0, pgtable_level_pte);
+ entry_lowcore = *entry_0_p;
+ entry_pagebuf = __pte((virt_to_pte_phys(root, out) & PAGE_MASK));
+
+ asm volatile("stpx %0" : "=Q"(*out));
+
+ report_prefix_push("SET PREFIX, zero key");
+ set_storage_key(pagebuf, 0x20, 0);
+ asm volatile("spx %0" : "=Q" (*out));
+ report_pass("no exception");
+ report_prefix_pop();
+
+ report_prefix_push("SET PREFIX, matching key");
+ set_storage_key(pagebuf, 0x10, 0);
+ set_prefix_key_1(out);
+ report_pass("no exception");
+ report_prefix_pop();
+
+ report_prefix_push("SET PREFIX, mismatching key, no fetch protection");
+ set_storage_key(pagebuf, 0x20, 0);
+ set_prefix_key_1(out);
+ report_pass("no exception");
+ report_prefix_pop();
+
+ report_prefix_push("SET PREFIX, mismatching key, fetch protection");
+ set_storage_key(pagebuf, 0x28, 0);
+ expect_pgm_int();
+ set_prefix_key_1(out);
+ check_pgm_int_code(PGM_INT_CODE_PROTECTION);
+ report_prefix_pop();
+
+ register_pgm_cleanup_func(dat_fixup_pgm_int);
+ ctl_set_bit(0, CTL0_FETCH_PROTECTION_OVERRIDE);
+
+ report_prefix_push("SET PREFIX, mismatching key, fetch protection override applies");
+ set_storage_key(pagebuf, 0x28, 0);
+ ipte(0, &pte_val(*entry_0_p));
+ *entry_0_p = entry_pagebuf;
+ set_prefix_key_1(0);
+ ipte(0, &pte_val(*entry_0_p));
+ *entry_0_p = entry_lowcore;
+ report_pass("no exception");
+ report_prefix_pop();
+
+ report_prefix_push("SET PREFIX, mismatching key, fetch protection override does not apply");
+ set_storage_key(pagebuf, 0x28, 0);
+ expect_pgm_int();
+ ipte(0, &pte_val(*entry_0_p));
+ *entry_0_p = entry_pagebuf;
+ set_prefix_key_1((uint32_t *)2048);
+ ipte(0, &pte_val(*entry_0_p));
+ *entry_0_p = entry_lowcore;
+ check_pgm_int_code(PGM_INT_CODE_PROTECTION);
+ report_prefix_pop();
+
+ ctl_clear_bit(0, CTL0_FETCH_PROTECTION_OVERRIDE);
+ register_pgm_cleanup_func(NULL);
+}
+
int main(void)
{
report_prefix_push("skey");
@@ -159,6 +323,11 @@ int main(void)
test_set();
test_set_mb();
test_chg();
+ test_test_protection();
+ test_store_cpu_address();
+
+ setup_vm();
+ test_set_prefix();
done:
report_prefix_pop();
return report_summary();
base-commit: 257c962f3d1b2d0534af59de4ad18764d734903a
--
2.33.1
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [kvm-unit-tests PATCH] s390x: Test effect of storage keys on some instructions
2022-02-24 11:09 [kvm-unit-tests PATCH] s390x: Test effect of storage keys on some instructions Janis Schoetterl-Glausch
@ 2022-02-24 14:30 ` Claudio Imbrenda
2022-02-24 15:55 ` Janis Schoetterl-Glausch
0 siblings, 1 reply; 3+ messages in thread
From: Claudio Imbrenda @ 2022-02-24 14:30 UTC (permalink / raw)
To: Janis Schoetterl-Glausch
Cc: Thomas Huth, Janosch Frank, David Hildenbrand, kvm, linux-s390
On Thu, 24 Feb 2022 12:09:50 +0100
Janis Schoetterl-Glausch <scgl@linux.ibm.com> wrote:
> Some instructions are emulated by KVM. Test that KVM correctly emulates
> storage key checking for two of those instructions (STORE CPU ADDRESS,
> SET PREFIX).
> Test success and error conditions, including coverage of storage and
> fetch protection override.
> Also add test for TEST PROTECTION, even if that instruction will not be
> emulated by KVM under normal conditions.
>
> Signed-off-by: Janis Schoetterl-Glausch <scgl@linux.ibm.com>
> ---
>
> *entry_0_p = entry_pagebuf;
>
> I'm wondering if we need a barrier here, or would if set_prefix_key_1
> wasn't made up of an asm volatile. But the mmu code seems to not have a
> barrier in the equivalent code, so maybe it's never needed.
>
> set_prefix_key_1(0);
>
> lib/s390x/asm/arch_def.h | 20 ++---
> s390x/skey.c | 169 +++++++++++++++++++++++++++++++++++++++
> 2 files changed, 180 insertions(+), 9 deletions(-)
>
> diff --git a/lib/s390x/asm/arch_def.h b/lib/s390x/asm/arch_def.h
> index 40626d72..e443a9cd 100644
> --- a/lib/s390x/asm/arch_def.h
> +++ b/lib/s390x/asm/arch_def.h
> @@ -55,15 +55,17 @@ struct psw {
> #define PSW_MASK_BA 0x0000000080000000UL
> #define PSW_MASK_64 (PSW_MASK_BA | PSW_MASK_EA)
>
> -#define CTL0_LOW_ADDR_PROT (63 - 35)
> -#define CTL0_EDAT (63 - 40)
> -#define CTL0_IEP (63 - 43)
> -#define CTL0_AFP (63 - 45)
> -#define CTL0_VECTOR (63 - 46)
> -#define CTL0_EMERGENCY_SIGNAL (63 - 49)
> -#define CTL0_EXTERNAL_CALL (63 - 50)
> -#define CTL0_CLOCK_COMPARATOR (63 - 52)
> -#define CTL0_SERVICE_SIGNAL (63 - 54)
> +#define CTL0_LOW_ADDR_PROT (63 - 35)
> +#define CTL0_EDAT (63 - 40)
> +#define CTL0_FETCH_PROTECTION_OVERRIDE (63 - 38)
> +#define CTL0_STORAGE_PROTECTION_OVERRIDE (63 - 39)
> +#define CTL0_IEP (63 - 43)
> +#define CTL0_AFP (63 - 45)
> +#define CTL0_VECTOR (63 - 46)
> +#define CTL0_EMERGENCY_SIGNAL (63 - 49)
> +#define CTL0_EXTERNAL_CALL (63 - 50)
> +#define CTL0_CLOCK_COMPARATOR (63 - 52)
> +#define CTL0_SERVICE_SIGNAL (63 - 54)
> #define CR0_EXTM_MASK 0x0000000000006200UL /* Combined external masks */
>
> #define CTL2_GUARDED_STORAGE (63 - 59)
> diff --git a/s390x/skey.c b/s390x/skey.c
> index 58a55436..6ae2d026 100644
> --- a/s390x/skey.c
> +++ b/s390x/skey.c
> @@ -10,7 +10,10 @@
> #include <libcflat.h>
> #include <asm/asm-offsets.h>
> #include <asm/interrupt.h>
> +#include <vmalloc.h>
> +#include <mmu.h>
> #include <asm/page.h>
> +#include <asm/pgtable.h>
> #include <asm/facility.h>
> #include <asm/mem.h>
>
> @@ -147,6 +150,167 @@ static void test_invalid_address(void)
> report_prefix_pop();
> }
>
> +static void test_test_protection(void)
> +{
> + unsigned long addr = (unsigned long)pagebuf;
> +
> + report_prefix_push("TPROT");
> + set_storage_key(pagebuf, 0x10, 0);
> + report(tprot(addr, 0) == 0, "access key 0 -> no protection");
> + report(tprot(addr, 1) == 0, "access key matches -> no protection");
> + report(tprot(addr, 2) == 1, "access key mismatches, no fetch protection -> store protection");
> + set_storage_key(pagebuf, 0x18, 0);
> + report(tprot(addr, 2) == 2, "access key mismatches, fetch protection -> fetch & store protection");
> + report_prefix_pop();
is there a reason why you don't set the storage key back to 0 once
you're done?
> +}
> +
> +static void store_cpu_address_key_1(uint16_t *out)
> +{
> + asm volatile (
> + "spka 0x10(0)\n\t"
> + "stap %0\n\t"
> + "spka 0(0)\n"
> + : "=Q" (*out)
> + );
> +}
> +
> +static void test_store_cpu_address(void)
> +{
> + uint16_t *out = (uint16_t *)pagebuf;
> + uint16_t cpu_addr;
> +
> + asm ("stap %0" : "=Q" (cpu_addr));
> +
> + report_prefix_push("STORE CPU ADDRESS, zero key");
> + set_storage_key(pagebuf, 0x20, 0);
> + *out = 0xbeef;
> + asm ("stap %0" : "=Q" (*out));
> + report(*out == cpu_addr, "store occurred");
> + report_prefix_pop();
> +
> + report_prefix_push("STORE CPU ADDRESS, matching key");
> + set_storage_key(pagebuf, 0x10, 0);
> + *out = 0xbeef;
> + store_cpu_address_key_1(out);
> + report(*out == cpu_addr, "store occurred");
> + report_prefix_pop();
> +
> + report_prefix_push("STORE CPU ADDRESS, mismatching key");
> + set_storage_key(pagebuf, 0x20, 0);
> + expect_pgm_int();
> + store_cpu_address_key_1(out);
> + check_pgm_int_code(PGM_INT_CODE_PROTECTION);
for completeness, maybe also check that nothing gets stored?
> + report_prefix_pop();
> +
> + ctl_set_bit(0, CTL0_STORAGE_PROTECTION_OVERRIDE);
> +
> + report_prefix_push("STORE CPU ADDRESS, storage-protection override, invalid key");
> + set_storage_key(pagebuf, 0x20, 0);
> + expect_pgm_int();
> + store_cpu_address_key_1(out);
> + check_pgm_int_code(PGM_INT_CODE_PROTECTION);
same here
> + report_prefix_pop();
> +
> + report_prefix_push("STORE CPU ADDRESS, storage-protection override, override key");
> + set_storage_key(pagebuf, 0x90, 0);
> + *out = 0xbeef;
> + store_cpu_address_key_1(out);
> + report(*out == cpu_addr, "override occurred");
> + report_prefix_pop();
> +
> + ctl_clear_bit(0, CTL0_STORAGE_PROTECTION_OVERRIDE);
> +}
> +
> +static void set_prefix_key_1(uint32_t *out)
> +{
> + asm volatile (
> + "spka 0x10(0)\n\t"
> + "spx %0\n\t"
> + "spka 0(0)\n"
> + : "=Q" (*out)
> + );
> +}
> +
> +/*
> + * We remapped page 0, making the lowcore inaccessible, which breaks the normal
> + * hanlder and breaks skipping the faulting instruction.
> + * Just disable dynamic address translation to make things work.
> + */
> +static void dat_fixup_pgm_int(void)
> +{
> + uint64_t psw_mask = extract_psw_mask();
> +
> + psw_mask &= ~PSW_MASK_DAT;
> + load_psw_mask(psw_mask);
> +}
> +
> +static void test_set_prefix(void)
> +{
> + uint32_t *out = (uint32_t *)pagebuf;
> + pgd_t *root;
> + pte_t *entry_0_p;
> + pte_t entry_lowcore, entry_pagebuf;
> +
> + root = (pgd_t *)(stctg(1) & PAGE_MASK);
> + entry_0_p = get_dat_entry(root, 0, pgtable_level_pte);
> + entry_lowcore = *entry_0_p;
> + entry_pagebuf = __pte((virt_to_pte_phys(root, out) & PAGE_MASK));
> +
> + asm volatile("stpx %0" : "=Q"(*out));
> +
> + report_prefix_push("SET PREFIX, zero key");
> + set_storage_key(pagebuf, 0x20, 0);
> + asm volatile("spx %0" : "=Q" (*out));
> + report_pass("no exception");
> + report_prefix_pop();
> +
> + report_prefix_push("SET PREFIX, matching key");
> + set_storage_key(pagebuf, 0x10, 0);
> + set_prefix_key_1(out);
> + report_pass("no exception");
> + report_prefix_pop();
> +
> + report_prefix_push("SET PREFIX, mismatching key, no fetch protection");
> + set_storage_key(pagebuf, 0x20, 0);
> + set_prefix_key_1(out);
> + report_pass("no exception");
> + report_prefix_pop();
> +
> + report_prefix_push("SET PREFIX, mismatching key, fetch protection");
> + set_storage_key(pagebuf, 0x28, 0);
> + expect_pgm_int();
> + set_prefix_key_1(out);
> + check_pgm_int_code(PGM_INT_CODE_PROTECTION);
> + report_prefix_pop();
> +
> + register_pgm_cleanup_func(dat_fixup_pgm_int);
> + ctl_set_bit(0, CTL0_FETCH_PROTECTION_OVERRIDE);
> +
> + report_prefix_push("SET PREFIX, mismatching key, fetch protection override applies");
> + set_storage_key(pagebuf, 0x28, 0);
> + ipte(0, &pte_val(*entry_0_p));
> + *entry_0_p = entry_pagebuf;
> + set_prefix_key_1(0);
> + ipte(0, &pte_val(*entry_0_p));
> + *entry_0_p = entry_lowcore;
> + report_pass("no exception");
> + report_prefix_pop();
> +
> + report_prefix_push("SET PREFIX, mismatching key, fetch protection override does not apply");
> + set_storage_key(pagebuf, 0x28, 0);
> + expect_pgm_int();
> + ipte(0, &pte_val(*entry_0_p));
> + *entry_0_p = entry_pagebuf;
> + set_prefix_key_1((uint32_t *)2048);
> + ipte(0, &pte_val(*entry_0_p));
> + *entry_0_p = entry_lowcore;
> + check_pgm_int_code(PGM_INT_CODE_PROTECTION);
> + report_prefix_pop();
> +
> + ctl_clear_bit(0, CTL0_FETCH_PROTECTION_OVERRIDE);
> + register_pgm_cleanup_func(NULL);
> +}
> +
> int main(void)
> {
> report_prefix_push("skey");
> @@ -159,6 +323,11 @@ int main(void)
> test_set();
> test_set_mb();
> test_chg();
> + test_test_protection();
> + test_store_cpu_address();
> +
> + setup_vm();
> + test_set_prefix();
> done:
> report_prefix_pop();
> return report_summary();
>
> base-commit: 257c962f3d1b2d0534af59de4ad18764d734903a
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [kvm-unit-tests PATCH] s390x: Test effect of storage keys on some instructions
2022-02-24 14:30 ` Claudio Imbrenda
@ 2022-02-24 15:55 ` Janis Schoetterl-Glausch
0 siblings, 0 replies; 3+ messages in thread
From: Janis Schoetterl-Glausch @ 2022-02-24 15:55 UTC (permalink / raw)
To: Claudio Imbrenda
Cc: Thomas Huth, Janosch Frank, David Hildenbrand, kvm, linux-s390
On 2/24/22 15:30, Claudio Imbrenda wrote:
> On Thu, 24 Feb 2022 12:09:50 +0100
> Janis Schoetterl-Glausch <scgl@linux.ibm.com> wrote:
>
>> Some instructions are emulated by KVM. Test that KVM correctly emulates
>> storage key checking for two of those instructions (STORE CPU ADDRESS,
>> SET PREFIX).
>> Test success and error conditions, including coverage of storage and
>> fetch protection override.
>> Also add test for TEST PROTECTION, even if that instruction will not be
>> emulated by KVM under normal conditions.
>>
>> Signed-off-by: Janis Schoetterl-Glausch <scgl@linux.ibm.com>
>> ---
>>
>> *entry_0_p = entry_pagebuf;
>>
>> I'm wondering if we need a barrier here, or would if set_prefix_key_1
>> wasn't made up of an asm volatile. But the mmu code seems to not have a
>> barrier in the equivalent code, so maybe it's never needed.
>>
>> set_prefix_key_1(0);
>>
>> lib/s390x/asm/arch_def.h | 20 ++---
>> s390x/skey.c | 169 +++++++++++++++++++++++++++++++++++++++
>> 2 files changed, 180 insertions(+), 9 deletions(-)
>>
[...]
>> diff --git a/s390x/skey.c b/s390x/skey.c
>> index 58a55436..6ae2d026 100644
>> --- a/s390x/skey.c
>> +++ b/s390x/skey.c
>> @@ -10,7 +10,10 @@
>> #include <libcflat.h>
>> #include <asm/asm-offsets.h>
>> #include <asm/interrupt.h>
>> +#include <vmalloc.h>
>> +#include <mmu.h>
>> #include <asm/page.h>
>> +#include <asm/pgtable.h>
>> #include <asm/facility.h>
>> #include <asm/mem.h>
>>
>> @@ -147,6 +150,167 @@ static void test_invalid_address(void)
>> report_prefix_pop();
>> }
>>
>> +static void test_test_protection(void)
>> +{
>> + unsigned long addr = (unsigned long)pagebuf;
>> +
>> + report_prefix_push("TPROT");
>> + set_storage_key(pagebuf, 0x10, 0);
>> + report(tprot(addr, 0) == 0, "access key 0 -> no protection");
>> + report(tprot(addr, 1) == 0, "access key matches -> no protection");
>> + report(tprot(addr, 2) == 1, "access key mismatches, no fetch protection -> store protection");
>> + set_storage_key(pagebuf, 0x18, 0);
>> + report(tprot(addr, 2) == 2, "access key mismatches, fetch protection -> fetch & store protection");
>> + report_prefix_pop();
>
> is there a reason why you don't set the storage key back to 0 once
> you're done?
None, other than it not being necessary, but I like the idea.
>
>> +}
>> +
>> +static void store_cpu_address_key_1(uint16_t *out)
>> +{
>> + asm volatile (
>> + "spka 0x10(0)\n\t"
>> + "stap %0\n\t"
>> + "spka 0(0)\n"
>> + : "=Q" (*out)
>> + );
>> +}
>> +
>> +static void test_store_cpu_address(void)
>> +{
>> + uint16_t *out = (uint16_t *)pagebuf;
>> + uint16_t cpu_addr;
>> +
>> + asm ("stap %0" : "=Q" (cpu_addr));
>> +
>> + report_prefix_push("STORE CPU ADDRESS, zero key");
>> + set_storage_key(pagebuf, 0x20, 0);
>> + *out = 0xbeef;
>> + asm ("stap %0" : "=Q" (*out));
>> + report(*out == cpu_addr, "store occurred");
>> + report_prefix_pop();
>> +
>> + report_prefix_push("STORE CPU ADDRESS, matching key");
>> + set_storage_key(pagebuf, 0x10, 0);
>> + *out = 0xbeef;
>> + store_cpu_address_key_1(out);
>> + report(*out == cpu_addr, "store occurred");
>> + report_prefix_pop();
>> +
>> + report_prefix_push("STORE CPU ADDRESS, mismatching key");
>> + set_storage_key(pagebuf, 0x20, 0);
>> + expect_pgm_int();
>> + store_cpu_address_key_1(out);
>> + check_pgm_int_code(PGM_INT_CODE_PROTECTION);
>
> for completeness, maybe also check that nothing gets stored?
Can do.
>
>> + report_prefix_pop();
>> +
>> + ctl_set_bit(0, CTL0_STORAGE_PROTECTION_OVERRIDE);
>> +
>> + report_prefix_push("STORE CPU ADDRESS, storage-protection override, invalid key");
>> + set_storage_key(pagebuf, 0x20, 0);
>> + expect_pgm_int();
>> + store_cpu_address_key_1(out);
>> + check_pgm_int_code(PGM_INT_CODE_PROTECTION);
>
> same here
>
[...]
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2022-02-24 15:55 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-02-24 11:09 [kvm-unit-tests PATCH] s390x: Test effect of storage keys on some instructions Janis Schoetterl-Glausch
2022-02-24 14:30 ` Claudio Imbrenda
2022-02-24 15:55 ` Janis Schoetterl-Glausch
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox