* [PATCH 1/3] powerpc: Add __hard_irqs_disabled()
@ 2017-05-16 9:26 Aneesh Kumar K.V
2017-05-16 9:26 ` [PATCH 2/3] powerpc/mm: Rename find_linux_pte_or_hugepte Aneesh Kumar K.V
` (3 more replies)
0 siblings, 4 replies; 11+ messages in thread
From: Aneesh Kumar K.V @ 2017-05-16 9:26 UTC (permalink / raw)
To: benh, paulus, mpe, Frederic Barrat; +Cc: linuxppc-dev, Aneesh Kumar K.V
Add __hard_irqs_disabled() similar to arch_irqs_disabled to check whether irqs
are hard disabled.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
arch/powerpc/include/asm/hw_irq.h | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index eba60416536e..541bd42f902f 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -88,6 +88,12 @@ static inline bool arch_irqs_disabled(void)
return arch_irqs_disabled_flags(arch_local_save_flags());
}
+static inline bool __hard_irqs_disabled(void)
+{
+ unsigned long flags = mfmsr();
+ return (flags & MSR_EE) == 0;
+}
+
#ifdef CONFIG_PPC_BOOK3E
#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory")
#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory")
@@ -197,6 +203,7 @@ static inline bool arch_irqs_disabled(void)
}
#define hard_irq_disable() arch_local_irq_disable()
+#define __hard_irqs_disabled() arch_irqs_disabled()
static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
{
--
2.7.4
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH 2/3] powerpc/mm: Rename find_linux_pte_or_hugepte
2017-05-16 9:26 [PATCH 1/3] powerpc: Add __hard_irqs_disabled() Aneesh Kumar K.V
@ 2017-05-16 9:26 ` Aneesh Kumar K.V
2017-05-16 11:22 ` Benjamin Herrenschmidt
2017-05-16 9:26 ` [PATCH 3/3] powerpc/mm: Don't send IPI to all cpus on THP updates Aneesh Kumar K.V
` (2 subsequent siblings)
3 siblings, 1 reply; 11+ messages in thread
From: Aneesh Kumar K.V @ 2017-05-16 9:26 UTC (permalink / raw)
To: benh, paulus, mpe, Frederic Barrat; +Cc: linuxppc-dev, Aneesh Kumar K.V
No functional change. Add newer helpers with addtional warnings and use those.
---
arch/powerpc/include/asm/pgtable.h | 10 +--------
arch/powerpc/include/asm/pte-walk.h | 38 ++++++++++++++++++++++++++++++++++
arch/powerpc/kernel/eeh.c | 4 ++--
arch/powerpc/kernel/io-workarounds.c | 5 +++--
arch/powerpc/kvm/book3s_64_mmu_hv.c | 5 +++--
arch/powerpc/kvm/book3s_64_mmu_radix.c | 33 ++++++++++++++++-------------
arch/powerpc/kvm/book3s_64_vio_hv.c | 3 ++-
arch/powerpc/kvm/book3s_hv_rm_mmu.c | 12 ++++-------
arch/powerpc/kvm/e500_mmu_host.c | 3 ++-
arch/powerpc/mm/hash_utils_64.c | 5 +++--
arch/powerpc/mm/hugetlbpage.c | 24 ++++++++++++---------
arch/powerpc/mm/tlb_hash64.c | 6 ++++--
arch/powerpc/perf/callchain.c | 3 ++-
13 files changed, 97 insertions(+), 54 deletions(-)
create mode 100644 arch/powerpc/include/asm/pte-walk.h
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index dd01212935ac..9fa263ad7cb3 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -66,16 +66,8 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_large(pmd) 0
#endif
-pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
- bool *is_thp, unsigned *shift);
-static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
- bool *is_thp, unsigned *shift)
-{
- VM_WARN(!arch_irqs_disabled(),
- "%s called with irq enabled\n", __func__);
- return __find_linux_pte_or_hugepte(pgdir, ea, is_thp, shift);
-}
+/* can we use this in kvm */
unsigned long vmalloc_to_phys(void *vmalloc_addr);
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
diff --git a/arch/powerpc/include/asm/pte-walk.h b/arch/powerpc/include/asm/pte-walk.h
new file mode 100644
index 000000000000..ea30c4ddd211
--- /dev/null
+++ b/arch/powerpc/include/asm/pte-walk.h
@@ -0,0 +1,38 @@
+#ifndef _ASM_POWERPC_PTE_WALK_H
+#define _ASM_POWERPC_PTE_WALK_H
+
+#ifndef __ASSEMBLY__
+#include <linux/sched.h>
+
+/* Don't use this directly */
+extern pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
+ bool *is_thp, unsigned *hshift);
+
+static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea,
+ bool *is_thp, unsigned *hshift)
+{
+ VM_WARN((!arch_irqs_disabled() && !__hard_irqs_disabled()) ,
+ "%s called with irq enabled\n", __func__);
+ return __find_linux_pte(pgdir, ea, is_thp, hshift);
+}
+
+static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
+{
+ pgd_t *pgdir = init_mm.pgd;
+ return __find_linux_pte(pgdir, ea, NULL, hshift);
+}
+/*
+ * This is what we should always use. Any other lockless page table lookup needs
+ * careful audit against THP split.
+ */
+static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea,
+ bool *is_thp, unsigned *hshift)
+{
+ VM_WARN((!arch_irqs_disabled() && !__hard_irqs_disabled()) ,
+ "%s called with irq enabled\n", __func__);
+ VM_WARN(pgdir != current->mm->pgd,
+ "%s lock less page table lookup called on wrong mm\n", __func__);
+ return __find_linux_pte(pgdir, ea, is_thp, hshift);
+}
+#endif
+#endif
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 63992b2d8e15..5e6887c40528 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -44,6 +44,7 @@
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
#include <asm/rtas.h>
+#include <asm/pte-walk.h>
/** Overview:
@@ -352,8 +353,7 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
* worried about _PAGE_SPLITTING/collapse. Also we will not hit
* page table free, because of init_mm.
*/
- ptep = __find_linux_pte_or_hugepte(init_mm.pgd, token,
- NULL, &hugepage_shift);
+ ptep = find_init_mm_pte(token, &hugepage_shift);
if (!ptep)
return token;
WARN_ON(hugepage_shift);
diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c
index a582e0d42525..bbe85f5aea71 100644
--- a/arch/powerpc/kernel/io-workarounds.c
+++ b/arch/powerpc/kernel/io-workarounds.c
@@ -19,6 +19,8 @@
#include <asm/pgtable.h>
#include <asm/ppc-pci.h>
#include <asm/io-workarounds.h>
+#include <asm/pte-walk.h>
+
#define IOWA_MAX_BUS 8
@@ -75,8 +77,7 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
* We won't find huge pages here (iomem). Also can't hit
* a page table free due to init_mm
*/
- ptep = __find_linux_pte_or_hugepte(init_mm.pgd, vaddr,
- NULL, &hugepage_shift);
+ ptep = find_init_mm_pte(vaddr, &hugepage_shift);
if (ptep == NULL)
paddr = 0;
else {
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 8c68145ba1bd..f8f60f5e3aca 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -37,6 +37,7 @@
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
#include <asm/cputable.h>
+#include <asm/pte-walk.h>
#include "trace_hv.h"
@@ -597,8 +598,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
* hugepage split and collapse.
*/
local_irq_save(flags);
- ptep = find_linux_pte_or_hugepte(current->mm->pgd,
- hva, NULL, NULL);
+ ptep = find_current_mm_pte(current->mm->pgd,
+ hva, NULL, NULL);
if (ptep) {
pte = kvmppc_read_update_linux_pte(ptep, 1);
if (__pte_write(pte))
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index f6b3e67c5762..dcd9e975c3d3 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -17,6 +17,7 @@
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
+#include <asm/pte-walk.h>
/*
* Supported radix tree geometry.
@@ -359,8 +360,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (writing)
pgflags |= _PAGE_DIRTY;
local_irq_save(flags);
- ptep = __find_linux_pte_or_hugepte(current->mm->pgd, hva,
- NULL, NULL);
+ ptep = find_current_mm_pte(current->mm->pgd, hva, NULL, NULL);
if (ptep) {
pte = READ_ONCE(*ptep);
if (pte_present(pte) &&
@@ -374,8 +374,16 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
spin_unlock(&kvm->mmu_lock);
return RESUME_GUEST;
}
- ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable,
- gpa, NULL, &shift);
+ /*
+ * it is ok to do the lookup with arch.pgtable here, because
+ * we are doing this on secondary cpus and current task there
+ * is not the hypervisor. Also this is safe against THP in the
+ * host, because an IPI to primary thread will wait for the secondary
+ * to exit which will agains result in the below page table walk
+ * to finish.
+ */
+ ptep = find_linux_pte(kvm->arch.pgtable,
+ gpa, NULL, &shift);
if (ptep && pte_present(*ptep)) {
kvmppc_radix_update_pte(kvm, ptep, 0, pgflags,
gpa, shift);
@@ -427,8 +435,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
pgflags |= _PAGE_WRITE;
} else {
local_irq_save(flags);
- ptep = __find_linux_pte_or_hugepte(current->mm->pgd,
- hva, NULL, NULL);
+ ptep = find_current_mm_pte(current->mm->pgd,
+ hva, NULL, NULL);
if (ptep && pte_write(*ptep) && pte_dirty(*ptep))
pgflags |= _PAGE_WRITE;
local_irq_restore(flags);
@@ -499,8 +507,8 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned int shift;
unsigned long old;
- ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa,
- NULL, &shift);
+ /* is that safe ? */
+ ptep = find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep)) {
old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT, 0,
gpa, shift);
@@ -525,8 +533,7 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned int shift;
int ref = 0;
- ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa,
- NULL, &shift);
+ ptep = find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
gpa, shift);
@@ -545,8 +552,7 @@ int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned int shift;
int ref = 0;
- ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa,
- NULL, &shift);
+ ptep = find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && pte_young(*ptep))
ref = 1;
return ref;
@@ -562,8 +568,7 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
unsigned int shift;
int ret = 0;
- ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa,
- NULL, &shift);
+ ptep = find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
ret = 1;
if (shift)
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index e4c4ea973e57..2ac2c0daddff 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -39,6 +39,7 @@
#include <asm/udbg.h>
#include <asm/iommu.h>
#include <asm/tce.h>
+#include <asm/pte-walk.h>
#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
@@ -210,7 +211,7 @@ static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
pte_t *ptep, pte;
unsigned shift = 0;
- ptep = __find_linux_pte_or_hugepte(vcpu->arch.pgdir, ua, NULL, &shift);
+ ptep = find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift);
if (!ptep || !pte_present(*ptep))
return -ENXIO;
pte = *ptep;
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index ce6f2121fffe..f8bfd947fe90 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -21,6 +21,7 @@
#include <asm/hvcall.h>
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
+#include <asm/pte-walk.h>
/* Translate address of a vmalloc'd thing to a linear map address */
static void *real_vmalloc_addr(void *x)
@@ -32,7 +33,7 @@ static void *real_vmalloc_addr(void *x)
* So don't worry about THP collapse/split. Called
* Only in realmode, hence won't need irq_save/restore.
*/
- p = __find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL, NULL);
+ p = find_init_mm_pte(addr, NULL);
if (!p || !pte_present(*p))
return NULL;
addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
@@ -229,14 +230,9 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
* If we had a page table table change after lookup, we would
* retry via mmu_notifier_retry.
*/
- if (realmode)
- ptep = __find_linux_pte_or_hugepte(pgdir, hva, NULL,
- &hpage_shift);
- else {
+ if (!realmode)
local_irq_save(irq_flags);
- ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL,
- &hpage_shift);
- }
+ ptep = find_linux_pte(pgdir, hva, NULL, &hpage_shift);
if (ptep) {
pte_t pte;
unsigned int host_pte_size;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 0fda4230f6c0..3284b2c2c865 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -30,6 +30,7 @@
#include <linux/vmalloc.h>
#include <linux/hugetlb.h>
#include <asm/kvm_ppc.h>
+#include <asm/pte-walk.h>
#include "e500.h"
#include "timing.h"
@@ -476,7 +477,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
* can't run hence pfn won't change.
*/
local_irq_save(flags);
- ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL, NULL);
+ ptep = find_linux_pte(pgdir, hva, NULL, NULL);
if (ptep) {
pte_t pte = READ_ONCE(*ptep);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index f2095ce9d4b0..2e5e04933350 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -61,6 +61,7 @@
#include <asm/tm.h>
#include <asm/trace.h>
#include <asm/ps3.h>
+#include <asm/pte-walk.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -1295,7 +1296,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
#endif /* CONFIG_PPC_64K_PAGES */
/* Get PTE and page size from page tables */
- ptep = __find_linux_pte_or_hugepte(pgdir, ea, &is_thp, &hugeshift);
+ ptep = find_linux_pte(pgdir, ea, &is_thp, &hugeshift);
if (ptep == NULL || !pte_present(*ptep)) {
DBG_LOW(" no PTE !\n");
rc = 1;
@@ -1524,7 +1525,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
* THP pages use update_mmu_cache_pmd. We don't do
* hash preload there. Hence can ignore THP here
*/
- ptep = find_linux_pte_or_hugepte(pgdir, ea, NULL, &hugepage_shift);
+ ptep = find_current_mm_pte(pgdir, ea, NULL, &hugepage_shift);
if (!ptep)
goto out_exit;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 4ebaa18f2495..a2ed4084e578 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -25,6 +25,8 @@
#include <asm/tlb.h>
#include <asm/setup.h>
#include <asm/hugetlb.h>
+#include <asm/pte-walk.h>
+
#ifdef CONFIG_HUGETLB_PAGE
@@ -60,8 +62,11 @@ static unsigned nr_gpages;
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
- /* Only called for hugetlbfs pages, hence can ignore THP */
- return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL, NULL);
+ /*
+ * Only called for hugetlbfs pages, hence can ignore THP and the
+ * irq disabled walk.
+ */
+ return __find_linux_pte(mm->pgd, addr, NULL, NULL);
}
static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
@@ -963,9 +968,8 @@ void flush_dcache_icache_hugepage(struct page *page)
* This function need to be called with interrupts disabled. We use this variant
* when we have MSR[EE] = 0 but the paca->soft_enabled = 1
*/
-
-pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
- bool *is_thp, unsigned *shift)
+pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
+ bool *is_thp, unsigned *hpage_shift)
{
pgd_t pgd, *pgdp;
pud_t pud, *pudp;
@@ -974,8 +978,8 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
hugepd_t *hpdp = NULL;
unsigned pdshift = PGDIR_SHIFT;
- if (shift)
- *shift = 0;
+ if (hpage_shift)
+ *hpage_shift = 0;
if (is_thp)
*is_thp = false;
@@ -1045,11 +1049,11 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
ret_pte = hugepte_offset(*hpdp, ea, pdshift);
pdshift = hugepd_shift(*hpdp);
out:
- if (shift)
- *shift = pdshift;
+ if (hpage_shift)
+ *hpage_shift = pdshift;
return ret_pte;
}
-EXPORT_SYMBOL_GPL(__find_linux_pte_or_hugepte);
+EXPORT_SYMBOL_GPL(__find_linux_pte);
int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 4517aa43a8b1..b3e6116b4317 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -29,6 +29,8 @@
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/bug.h>
+#include <asm/pte-walk.h>
+
#include <trace/events/thp.h>
@@ -209,8 +211,8 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
local_irq_save(flags);
arch_enter_lazy_mmu_mode();
for (; start < end; start += PAGE_SIZE) {
- pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start, &is_thp,
- &hugepage_shift);
+ pte_t *ptep = find_current_mm_pte(mm->pgd, start, &is_thp,
+ &hugepage_shift);
unsigned long pte;
if (ptep == NULL)
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index 0fc26714780a..0af051a1974e 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -22,6 +22,7 @@
#ifdef CONFIG_PPC64
#include "../kernel/ppc32.h"
#endif
+#include <asm/pte-walk.h>
/*
@@ -127,7 +128,7 @@ static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
return -EFAULT;
local_irq_save(flags);
- ptep = find_linux_pte_or_hugepte(pgdir, addr, NULL, &shift);
+ ptep = find_current_mm_pte(pgdir, addr, NULL, &shift);
if (!ptep)
goto err_out;
if (!shift)
--
2.7.4
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH 3/3] powerpc/mm: Don't send IPI to all cpus on THP updates
2017-05-16 9:26 [PATCH 1/3] powerpc: Add __hard_irqs_disabled() Aneesh Kumar K.V
2017-05-16 9:26 ` [PATCH 2/3] powerpc/mm: Rename find_linux_pte_or_hugepte Aneesh Kumar K.V
@ 2017-05-16 9:26 ` Aneesh Kumar K.V
2017-05-16 11:22 ` [PATCH 1/3] powerpc: Add __hard_irqs_disabled() Benjamin Herrenschmidt
2017-05-17 11:04 ` Balbir Singh
3 siblings, 0 replies; 11+ messages in thread
From: Aneesh Kumar K.V @ 2017-05-16 9:26 UTC (permalink / raw)
To: benh, paulus, mpe, Frederic Barrat; +Cc: linuxppc-dev, Aneesh Kumar K.V
Now that we made sure that lockless walk of linux page table is mostly limitted
to current task(current->mm->pgdir) we can update the THP update sequence to
only send IPI to cpus on which this task has run. This helps in reducing the IPI
overload on systems with large number of CPUs.
W.r.t kvm even though kvm is walking page table with vpc->arch.pgdir, it is
done only on secondary cpus and in that case we have primary cpu added to
task's mm cpumask. Sending an IPI to primary will force the secondary to do
a vm exit and hence this mm cpumask usage is safe here.
W.r.t CAPI, we still end up walking linux page table with capi context MM. For
now the pte lookup serialization sends an IPI to all cpus in CPI is in use. We
can further improve this by adding the CAPI interrupt handling cpu to task
mm cpumask. That will be done in a later patch.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
arch/powerpc/include/asm/book3s/64/pgtable.h | 1 +
arch/powerpc/mm/pgtable-book3s64.c | 32 +++++++++++++++++++++++++++-
arch/powerpc/mm/pgtable-hash64.c | 8 +++----
arch/powerpc/mm/pgtable-radix.c | 8 +++----
4 files changed, 40 insertions(+), 9 deletions(-)
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 85bc9875c3be..d8c3c18e220d 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1145,6 +1145,7 @@ static inline bool arch_needs_pgtable_deposit(void)
return false;
return true;
}
+extern void serialize_against_pte_lookup(struct mm_struct *mm);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index 5fcb3dd74c13..2679f57b90e2 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/mm_types.h>
+#include <misc/cxl-base.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
@@ -64,6 +65,35 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
trace_hugepage_set_pmd(addr, pmd_val(pmd));
return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
}
+
+static void do_nothing(void *unused)
+{
+
+}
+/*
+ * Serialize against find_current_mm_pte which does lock-less
+ * lookup in page tables with local interrupts disabled. For huge pages
+ * it casts pmd_t to pte_t. Since format of pte_t is different from
+ * pmd_t we want to prevent transit from pmd pointing to page table
+ * to pmd pointing to huge page (and back) while interrupts are disabled.
+ * We clear pmd to possibly replace it with page table pointer in
+ * different code paths. So make sure we wait for the parallel
+ * find_current_mm_pte to finish.
+ */
+void serialize_against_pte_lookup(struct mm_struct *mm)
+{
+ smp_mb();
+ /*
+ * Cxl fault handling requires us to do a lockless page table
+ * walk while inserting hash page table entry with mm tracked
+ * in cxl context. Hence we need to do a global flush.
+ */
+ if (cxl_ctx_in_use())
+ smp_call_function(do_nothing, NULL, 1);
+ else
+ smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
+}
+
/*
* We use this to invalidate a pmdp entry before switching from a
* hugepte to regular pmd entry.
@@ -77,7 +107,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
* This ensures that generic code that rely on IRQ disabling
* to prevent a parallel THP split work as expected.
*/
- kick_all_cpus_sync();
+ serialize_against_pte_lookup(vma->vm_mm);
}
static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c
index 8b85a14b08ea..f6313cc29ae4 100644
--- a/arch/powerpc/mm/pgtable-hash64.c
+++ b/arch/powerpc/mm/pgtable-hash64.c
@@ -159,7 +159,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres
* by sending an IPI to all the cpus and executing a dummy
* function there.
*/
- kick_all_cpus_sync();
+ serialize_against_pte_lookup(vma->vm_mm);
/*
* Now invalidate the hpte entries in the range
* covered by pmd. This make sure we take a
@@ -299,16 +299,16 @@ pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
*/
memset(pgtable, 0, PTE_FRAG_SIZE);
/*
- * Serialize against find_linux_pte_or_hugepte which does lock-less
+ * Serialize against find_current_mm_pte variants which does lock-less
* lookup in page tables with local interrupts disabled. For huge pages
* it casts pmd_t to pte_t. Since format of pte_t is different from
* pmd_t we want to prevent transit from pmd pointing to page table
* to pmd pointing to huge page (and back) while interrupts are disabled.
* We clear pmd to possibly replace it with page table pointer in
* different code paths. So make sure we wait for the parallel
- * find_linux_pte_or_hugepage to finish.
+ * find_curren_mm_pte to finish.
*/
- kick_all_cpus_sync();
+ serialize_against_pte_lookup(mm);
return old_pmd;
}
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index c28165d8970b..6e3d1518eef3 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -707,7 +707,7 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre
pmd = *pmdp;
pmd_clear(pmdp);
/*FIXME!! Verify whether we need this kick below */
- kick_all_cpus_sync();
+ serialize_against_pte_lookup(vma->vm_mm);
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return pmd;
}
@@ -767,16 +767,16 @@ pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
old_pmd = __pmd(old);
/*
- * Serialize against find_linux_pte_or_hugepte which does lock-less
+ * Serialize against find_current_mm_pte which does lock-less
* lookup in page tables with local interrupts disabled. For huge pages
* it casts pmd_t to pte_t. Since format of pte_t is different from
* pmd_t we want to prevent transit from pmd pointing to page table
* to pmd pointing to huge page (and back) while interrupts are disabled.
* We clear pmd to possibly replace it with page table pointer in
* different code paths. So make sure we wait for the parallel
- * find_linux_pte_or_hugepage to finish.
+ * find_current_mm_pte to finish.
*/
- kick_all_cpus_sync();
+ serialize_against_pte_lookup(mm);
return old_pmd;
}
--
2.7.4
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH 1/3] powerpc: Add __hard_irqs_disabled()
2017-05-16 9:26 [PATCH 1/3] powerpc: Add __hard_irqs_disabled() Aneesh Kumar K.V
2017-05-16 9:26 ` [PATCH 2/3] powerpc/mm: Rename find_linux_pte_or_hugepte Aneesh Kumar K.V
2017-05-16 9:26 ` [PATCH 3/3] powerpc/mm: Don't send IPI to all cpus on THP updates Aneesh Kumar K.V
@ 2017-05-16 11:22 ` Benjamin Herrenschmidt
2017-05-17 11:04 ` Balbir Singh
3 siblings, 0 replies; 11+ messages in thread
From: Benjamin Herrenschmidt @ 2017-05-16 11:22 UTC (permalink / raw)
To: Aneesh Kumar K.V, paulus, mpe, Frederic Barrat; +Cc: linuxppc-dev
On Tue, 2017-05-16 at 14:56 +0530, Aneesh Kumar K.V wrote:
>
> +static inline bool __hard_irqs_disabled(void)
> +{
> + unsigned long flags = mfmsr();
> + return (flags & MSR_EE) == 0;
> +}
> +
Reading the MSR has a cost. Can't we rely on paca->irq_happened being
non-0 ?
(If you are paranoid, add a test of msr as well and warn if there's
a mismatch ...)
Cheers,
Ben.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 2/3] powerpc/mm: Rename find_linux_pte_or_hugepte
2017-05-16 9:26 ` [PATCH 2/3] powerpc/mm: Rename find_linux_pte_or_hugepte Aneesh Kumar K.V
@ 2017-05-16 11:22 ` Benjamin Herrenschmidt
2017-05-17 3:27 ` Aneesh Kumar K.V
0 siblings, 1 reply; 11+ messages in thread
From: Benjamin Herrenschmidt @ 2017-05-16 11:22 UTC (permalink / raw)
To: Aneesh Kumar K.V, paulus, mpe, Frederic Barrat; +Cc: linuxppc-dev
On Tue, 2017-05-16 at 14:56 +0530, Aneesh Kumar K.V wrote:
> +static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea,
> + bool *is_thp, unsigned *hshift)
> +{
> + VM_WARN((!arch_irqs_disabled() && !__hard_irqs_disabled()) ,
> + "%s called with irq enabled\n", __func__);
> + return __find_linux_pte(pgdir, ea, is_thp, hshift);
> +}
> +
When is arch_irqs_disabled() not sufficient ?
Cheers,
Ben.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 2/3] powerpc/mm: Rename find_linux_pte_or_hugepte
2017-05-16 11:22 ` Benjamin Herrenschmidt
@ 2017-05-17 3:27 ` Aneesh Kumar K.V
2017-05-17 4:57 ` Benjamin Herrenschmidt
0 siblings, 1 reply; 11+ messages in thread
From: Aneesh Kumar K.V @ 2017-05-17 3:27 UTC (permalink / raw)
To: Benjamin Herrenschmidt, paulus, mpe, Frederic Barrat; +Cc: linuxppc-dev
Benjamin Herrenschmidt <benh@kernel.crashing.org> writes:
> On Tue, 2017-05-16 at 14:56 +0530, Aneesh Kumar K.V wrote:
>> +static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea,
>> +=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=
=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=
=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0 bool *is_thp, =
unsigned *hshift)
>> +{
>> +=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0VM_WARN((!arch_irqs_disabled(=
) && !__hard_irqs_disabled()) ,
>> +=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=
=A0=C2=A0=C2=A0=C2=A0"%s called with irq enabled\n", __func__);
>> +=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0return __find_linux_pte(pgdir=
, ea, is_thp, hshift);
>> +}
>> +
>
> When is arch_irqs_disabled() not sufficient ?
We can do lockless page table walk in interrupt handlers where we find
MSR_EE =3D 0. I was not sure we mark softenabled 0 there. What I wanted to
indicate in the patch is that we are safe with either softenable =3D 0 or M=
SR_EE =3D 0
-aneesh
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 2/3] powerpc/mm: Rename find_linux_pte_or_hugepte
2017-05-17 3:27 ` Aneesh Kumar K.V
@ 2017-05-17 4:57 ` Benjamin Herrenschmidt
2017-05-17 5:30 ` Madhavan Srinivasan
2017-05-29 14:32 ` Aneesh Kumar K.V
0 siblings, 2 replies; 11+ messages in thread
From: Benjamin Herrenschmidt @ 2017-05-17 4:57 UTC (permalink / raw)
To: Aneesh Kumar K.V, paulus, mpe, Frederic Barrat; +Cc: linuxppc-dev
On Wed, 2017-05-17 at 08:57 +0530, Aneesh Kumar K.V wrote:
> Benjamin Herrenschmidt <benh@kernel.crashing.org> writes:
>
> > On Tue, 2017-05-16 at 14:56 +0530, Aneesh Kumar K.V wrote:
> > > +static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea,
> > > + bool *is_thp, unsigned *hshift)
> > > +{
> > > + VM_WARN((!arch_irqs_disabled() && !__hard_irqs_disabled()) ,
> > > + "%s called with irq enabled\n", __func__);
> > > + return __find_linux_pte(pgdir, ea, is_thp, hshift);
> > > +}
> > > +
> >
> > When is arch_irqs_disabled() not sufficient ?
>
> We can do lockless page table walk in interrupt handlers where we find
> MSR_EE = 0.
Such as ?
> I was not sure we mark softenabled 0 there. What I wanted to
> indicate in the patch is that we are safe with either softenable = 0 or MSR_EE = 0
Reading the MSR is expensive...
Can you find a case where we are hard disabled and not soft disable in
C code ? I can't think of one off-hand ... I know we have some asm that
can do that very temporarily but I wouldn't think we have anything at
runtime.
Talking of which, we have this in irq.c:
#ifdef CONFIG_TRACE_IRQFLAGS
else {
/*
* We should already be hard disabled here. We had bugs
* where that wasn't the case so let's dbl check it and
* warn if we are wrong. Only do that when IRQ tracing
* is enabled as mfmsr() can be costly.
*/
if (WARN_ON(mfmsr() & MSR_EE))
__hard_irq_disable();
}
#endif
I think we should move that to a new CONFIG_PPC_DEBUG_LAZY_IRQ because
distros are likely to have CONFIG_TRACE_IRQFLAGS these days no ?
Also we could add additional checks, such as MSR_EE matching paca-
>irq_happened or the above you mentioned, ie, WARN if we find case
where IRQs are hard disabled but soft enabled.
If we find these, I think we should fix them.
Cheers,
Ben.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 2/3] powerpc/mm: Rename find_linux_pte_or_hugepte
2017-05-17 4:57 ` Benjamin Herrenschmidt
@ 2017-05-17 5:30 ` Madhavan Srinivasan
2017-05-29 14:32 ` Aneesh Kumar K.V
1 sibling, 0 replies; 11+ messages in thread
From: Madhavan Srinivasan @ 2017-05-17 5:30 UTC (permalink / raw)
To: linuxppc-dev
On Wednesday 17 May 2017 10:27 AM, Benjamin Herrenschmidt wrote:
> On Wed, 2017-05-17 at 08:57 +0530, Aneesh Kumar K.V wrote:
>> Benjamin Herrenschmidt <benh@kernel.crashing.org> writes:
>>
>>> On Tue, 2017-05-16 at 14:56 +0530, Aneesh Kumar K.V wrote:
>>>> +static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea,
>>>> + bool *is_thp, unsigned *hshift)
>>>> +{
>>>> + VM_WARN((!arch_irqs_disabled() && !__hard_irqs_disabled()) ,
>>>> + "%s called with irq enabled\n", __func__);
>>>> + return __find_linux_pte(pgdir, ea, is_thp, hshift);
>>>> +}
>>>> +
>>> When is arch_irqs_disabled() not sufficient ?
>> We can do lockless page table walk in interrupt handlers where we find
>> MSR_EE = 0.
> Such as ?
>
>> I was not sure we mark softenabled 0 there. What I wanted to
>> indicate in the patch is that we are safe with either softenable = 0 or MSR_EE = 0
> Reading the MSR is expensive...
>
> Can you find a case where we are hard disabled and not soft disable in
> C code ? I can't think of one off-hand ... I know we have some asm that
> can do that very temporarily but I wouldn't think we have anything at
> runtime.
>
> Talking of which, we have this in irq.c:
>
>
> #ifdef CONFIG_TRACE_IRQFLAGS
> else {
> /*
> * We should already be hard disabled here. We had bugs
> * where that wasn't the case so let's dbl check it and
> * warn if we are wrong. Only do that when IRQ tracing
> * is enabled as mfmsr() can be costly.
> */
> if (WARN_ON(mfmsr() & MSR_EE))
> __hard_irq_disable();
> }
> #endif
>
> I think we should move that to a new CONFIG_PPC_DEBUG_LAZY_IRQ because
> distros are likely to have CONFIG_TRACE_IRQFLAGS these days no ?
Yes, CONFIG_TRACE_IRQFLAGS are enabled. So in my local_t patchset,
I have added a patch to do the same with a flag "CONFIG_IRQ_DEBUG_SUPPORT"
mpe reported boot hang with the current version of the
local_t patchset in Booke system, and have a fix for the
same and it is being tested. Will post a newer version
once the patch verified.
Maddy
>
> Also we could add additional checks, such as MSR_EE matching paca-
>> irq_happened or the above you mentioned, ie, WARN if we find case
> where IRQs are hard disabled but soft enabled.
>
> If we find these, I think we should fix them.
>
> Cheers,
> Ben.
>
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/3] powerpc: Add __hard_irqs_disabled()
2017-05-16 9:26 [PATCH 1/3] powerpc: Add __hard_irqs_disabled() Aneesh Kumar K.V
` (2 preceding siblings ...)
2017-05-16 11:22 ` [PATCH 1/3] powerpc: Add __hard_irqs_disabled() Benjamin Herrenschmidt
@ 2017-05-17 11:04 ` Balbir Singh
3 siblings, 0 replies; 11+ messages in thread
From: Balbir Singh @ 2017-05-17 11:04 UTC (permalink / raw)
To: Aneesh Kumar K.V, benh, paulus, mpe, Frederic Barrat; +Cc: linuxppc-dev
On Tue, 2017-05-16 at 14:56 +0530, Aneesh Kumar K.V wrote:
> Add __hard_irqs_disabled() similar to arch_irqs_disabled to check whether irqs
> are hard disabled.
>
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> ---
> arch/powerpc/include/asm/hw_irq.h | 7 +++++++
> 1 file changed, 7 insertions(+)
>
> diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
> index eba60416536e..541bd42f902f 100644
> --- a/arch/powerpc/include/asm/hw_irq.h
> +++ b/arch/powerpc/include/asm/hw_irq.h
> @@ -88,6 +88,12 @@ static inline bool arch_irqs_disabled(void)
> return arch_irqs_disabled_flags(arch_local_save_flags());
> }
>
> +static inline bool __hard_irqs_disabled(void)
> +{
> + unsigned long flags = mfmsr();
> + return (flags & MSR_EE) == 0;
I have some patches that ensure MSR_EE is never 0, what are we protecting
against - external interrupts, IPI's or something else? I suspect its IPI's
Balbir SIngh.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 2/3] powerpc/mm: Rename find_linux_pte_or_hugepte
2017-05-17 4:57 ` Benjamin Herrenschmidt
2017-05-17 5:30 ` Madhavan Srinivasan
@ 2017-05-29 14:32 ` Aneesh Kumar K.V
2017-05-30 3:21 ` Benjamin Herrenschmidt
1 sibling, 1 reply; 11+ messages in thread
From: Aneesh Kumar K.V @ 2017-05-29 14:32 UTC (permalink / raw)
To: Benjamin Herrenschmidt, paulus, mpe, Frederic Barrat; +Cc: linuxppc-dev
Benjamin Herrenschmidt <benh@kernel.crashing.org> writes:
> On Wed, 2017-05-17 at 08:57 +0530, Aneesh Kumar K.V wrote:
>> Benjamin Herrenschmidt <benh@kernel.crashing.org> writes:
>>=20
>> > On Tue, 2017-05-16 at 14:56 +0530, Aneesh Kumar K.V wrote:
>> > > +static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea,
>> > > +=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=
=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=
=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0 bool *is_th=
p, unsigned *hshift)
>> > > +{
>> > > +=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0VM_WARN((!arch_irqs_disab=
led() && !__hard_irqs_disabled()) ,
>> > > +=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=
=C2=A0=C2=A0=C2=A0=C2=A0"%s called with irq enabled\n", __func__);
>> > > +=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0return __find_linux_pte(p=
gdir, ea, is_thp, hshift);
>> > > +}
>> > > +
>> >=20
>> > When is arch_irqs_disabled() not sufficient ?
>>=20
>> We can do lockless page table walk in interrupt handlers where we find
>> MSR_EE =3D 0.=20
>
> Such as ?
>
kvmppc_do_h_enter() when get called in real mode.
For now i have dropped hard_irq_disabled() and switched these usage to
__find_linux_pte with explict comment around them stating they are
called with MSR_EE =3D 0.
-aneesh
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 2/3] powerpc/mm: Rename find_linux_pte_or_hugepte
2017-05-29 14:32 ` Aneesh Kumar K.V
@ 2017-05-30 3:21 ` Benjamin Herrenschmidt
0 siblings, 0 replies; 11+ messages in thread
From: Benjamin Herrenschmidt @ 2017-05-30 3:21 UTC (permalink / raw)
To: Aneesh Kumar K.V, paulus, mpe, Frederic Barrat; +Cc: linuxppc-dev
On Mon, 2017-05-29 at 20:02 +0530, Aneesh Kumar K.V wrote:
> kvmppc_do_h_enter() when get called in real mode.
>
> For now i have dropped hard_irq_disabled() and switched these usage to
> __find_linux_pte with explict comment around them stating they are
> called with MSR_EE = 0.
Shouldn't these code path also have soft disabled set ?
Cheers,
Ben.
^ permalink raw reply [flat|nested] 11+ messages in thread
end of thread, other threads:[~2017-05-30 3:21 UTC | newest]
Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-05-16 9:26 [PATCH 1/3] powerpc: Add __hard_irqs_disabled() Aneesh Kumar K.V
2017-05-16 9:26 ` [PATCH 2/3] powerpc/mm: Rename find_linux_pte_or_hugepte Aneesh Kumar K.V
2017-05-16 11:22 ` Benjamin Herrenschmidt
2017-05-17 3:27 ` Aneesh Kumar K.V
2017-05-17 4:57 ` Benjamin Herrenschmidt
2017-05-17 5:30 ` Madhavan Srinivasan
2017-05-29 14:32 ` Aneesh Kumar K.V
2017-05-30 3:21 ` Benjamin Herrenschmidt
2017-05-16 9:26 ` [PATCH 3/3] powerpc/mm: Don't send IPI to all cpus on THP updates Aneesh Kumar K.V
2017-05-16 11:22 ` [PATCH 1/3] powerpc: Add __hard_irqs_disabled() Benjamin Herrenschmidt
2017-05-17 11:04 ` Balbir Singh
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).