* [PATCH 1/3] powerpc/mm/hash: Remove the superfluous bitwise operation when find hpte group
@ 2018-06-29 8:36 Aneesh Kumar K.V
2018-06-29 8:36 ` [PATCH 2/3] powerpc/mm/hash: Add hpte_get_old_v and use that instead of opencoding Aneesh Kumar K.V
` (2 more replies)
0 siblings, 3 replies; 4+ messages in thread
From: Aneesh Kumar K.V @ 2018-06-29 8:36 UTC (permalink / raw)
To: npiggin, benh, paulus, mpe; +Cc: linuxppc-dev, Aneesh Kumar K.V
From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
When computing the starting slot number for a hash page table group we used
to do this
hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
Multiplying with 8 (HPTES_PER_GROUP) imply the last three bits are 0. Hence we
really don't need to clear then separately.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
arch/powerpc/mm/dump_hashpagetable.c | 2 +-
arch/powerpc/mm/hash64_4k.c | 8 ++++----
arch/powerpc/mm/hash64_64k.c | 15 +++++++--------
arch/powerpc/mm/hash_utils_64.c | 10 ++++------
arch/powerpc/mm/hugepage-hash64.c | 9 ++++-----
5 files changed, 20 insertions(+), 24 deletions(-)
diff --git a/arch/powerpc/mm/dump_hashpagetable.c b/arch/powerpc/mm/dump_hashpagetable.c
index 14cfb11b09d0..d241cb6518da 100644
--- a/arch/powerpc/mm/dump_hashpagetable.c
+++ b/arch/powerpc/mm/dump_hashpagetable.c
@@ -260,7 +260,7 @@ static int pseries_find(unsigned long ea, int psize, bool primary, u64 *v, u64 *
/* to check in the secondary hash table, we invert the hash */
if (!primary)
hash = ~hash;
- hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
/* see if we can find an entry in the hpte with this hash */
for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c
index d573d7d07f25..6fa6765a10eb 100644
--- a/arch/powerpc/mm/hash64_4k.c
+++ b/arch/powerpc/mm/hash64_4k.c
@@ -80,7 +80,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
hash = hpt_hash(vpn, shift, ssize);
repeat:
- hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
/* Insert into the hash table, primary slot */
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
@@ -89,7 +89,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
* Primary is full, try the secondary
*/
if (unlikely(slot == -1)) {
- hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
rflags,
HPTE_V_SECONDARY,
@@ -97,8 +97,8 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
MMU_PAGE_4K, ssize);
if (slot == -1) {
if (mftb() & 0x1)
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) *
+ HPTES_PER_GROUP;
mmu_hash_ops.hpte_remove(hpte_group);
/*
* FIXME!! Should be try the group from which we removed ?
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c
index e601d95c3b20..3afa253d7f52 100644
--- a/arch/powerpc/mm/hash64_64k.c
+++ b/arch/powerpc/mm/hash64_64k.c
@@ -154,7 +154,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
}
hash = hpt_hash(vpn, shift, ssize);
repeat:
- hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
/* Insert into the hash table, primary slot */
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
@@ -165,7 +165,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
if (unlikely(slot == -1)) {
bool soft_invalid;
- hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
rflags, HPTE_V_SECONDARY,
MMU_PAGE_4K, MMU_PAGE_4K,
@@ -193,8 +193,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
* that we do not get the same soft-invalid slot.
*/
if (soft_invalid || (mftb() & 0x1))
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
mmu_hash_ops.hpte_remove(hpte_group);
/*
@@ -288,7 +287,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
hash = hpt_hash(vpn, shift, ssize);
repeat:
- hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
/* Insert into the hash table, primary slot */
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
@@ -298,7 +297,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
* Primary is full, try the secondary
*/
if (unlikely(slot == -1)) {
- hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
rflags,
HPTE_V_SECONDARY,
@@ -306,8 +305,8 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
MMU_PAGE_64K, ssize);
if (slot == -1) {
if (mftb() & 0x1)
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) *
+ HPTES_PER_GROUP;
mmu_hash_ops.hpte_remove(hpte_group);
/*
* FIXME!! Should be try the group from which we removed ?
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 8318716e5075..4ba901223000 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1783,8 +1783,7 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
long slot;
repeat:
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
/* Insert into the hash table, primary slot */
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, vflags,
@@ -1792,15 +1791,14 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
/* Primary is full, try the secondary */
if (unlikely(slot == -1)) {
- hpte_group = ((~hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags,
vflags | HPTE_V_SECONDARY,
psize, psize, ssize);
if (slot == -1) {
if (mftb() & 0x1)
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP)&~0x7UL;
+ hpte_group = (hash & htab_hash_mask) *
+ HPTES_PER_GROUP;
mmu_hash_ops.hpte_remove(hpte_group);
goto repeat;
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index f20d16f849c5..01f213d2bcb9 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -128,7 +128,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
new_pmd |= H_PAGE_HASHPTE;
repeat:
- hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
/* Insert into the hash table, primary slot */
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
@@ -137,16 +137,15 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
* Primary is full, try the secondary
*/
if (unlikely(slot == -1)) {
- hpte_group = ((~hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
rflags,
HPTE_V_SECONDARY,
psize, lpsize, ssize);
if (slot == -1) {
if (mftb() & 0x1)
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) *
+ HPTES_PER_GROUP;
mmu_hash_ops.hpte_remove(hpte_group);
goto repeat;
--
2.17.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH 2/3] powerpc/mm/hash: Add hpte_get_old_v and use that instead of opencoding
2018-06-29 8:36 [PATCH 1/3] powerpc/mm/hash: Remove the superfluous bitwise operation when find hpte group Aneesh Kumar K.V
@ 2018-06-29 8:36 ` Aneesh Kumar K.V
2018-06-29 8:36 ` [PATCH 3/3] powerpc/mm/nv/hash: Reduce contention on hpte lock Aneesh Kumar K.V
2018-07-24 13:59 ` [1/3] powerpc/mm/hash: Remove the superfluous bitwise operation when find hpte group Michael Ellerman
2 siblings, 0 replies; 4+ messages in thread
From: Aneesh Kumar K.V @ 2018-06-29 8:36 UTC (permalink / raw)
To: npiggin, benh, paulus, mpe; +Cc: linuxppc-dev, Aneesh Kumar K.V
No functional change
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
arch/powerpc/include/asm/book3s/64/mmu-hash.h | 10 +++++++
arch/powerpc/mm/hash_native_64.c | 27 +++++--------------
2 files changed, 17 insertions(+), 20 deletions(-)
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 50ed64fba4ae..eee0b5b8a23f 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -364,6 +364,16 @@ static inline unsigned long hpte_new_to_old_r(unsigned long r)
return r & ~HPTE_R_3_0_SSIZE_MASK;
}
+static inline unsigned long hpte_get_old_v(struct hash_pte *hptep)
+{
+ unsigned long hpte_v;
+
+ hpte_v = be64_to_cpu(hptep->v);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
+ return hpte_v;
+}
+
/*
* This function sets the AVPN and L fields of the HPTE appropriately
* using the base page size and actual page size.
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 1d049c78c82a..68e6eaf41bb9 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -423,9 +423,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
vpn, want_v & HPTE_V_AVPN, slot, newpp);
- hpte_v = be64_to_cpu(hptep->v);
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
+ hpte_v = hpte_get_old_v(hptep);
/*
* We need to invalidate the TLB always because hpte_remove doesn't do
* a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
@@ -439,9 +437,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
} else {
native_lock_hpte(hptep);
/* recheck with locks held */
- hpte_v = be64_to_cpu(hptep->v);
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
+ hpte_v = hpte_get_old_v(hptep);
if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
!(hpte_v & HPTE_V_VALID))) {
ret = -1;
@@ -481,11 +477,9 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
/* Bolted mappings are only ever in the primary group */
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
for (i = 0; i < HPTES_PER_GROUP; i++) {
- hptep = htab_address + slot;
- hpte_v = be64_to_cpu(hptep->v);
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
+ hptep = htab_address + slot;
+ hpte_v = hpte_get_old_v(hptep);
if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
/* HPTE matches */
return slot;
@@ -575,9 +569,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
want_v = hpte_encode_avpn(vpn, bpsize, ssize);
native_lock_hpte(hptep);
- hpte_v = be64_to_cpu(hptep->v);
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
+ hpte_v = hpte_get_old_v(hptep);
/*
* We need to invalidate the TLB always because hpte_remove doesn't do
@@ -635,9 +627,7 @@ static void native_hugepage_invalidate(unsigned long vsid,
hptep = htab_address + slot;
want_v = hpte_encode_avpn(vpn, psize, ssize);
native_lock_hpte(hptep);
- hpte_v = be64_to_cpu(hptep->v);
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
+ hpte_v = hpte_get_old_v(hptep);
/* Even if we miss, we need to invalidate the TLB */
if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
@@ -813,10 +803,7 @@ static void native_flush_hash_range(unsigned long number, int local)
hptep = htab_address + slot;
want_v = hpte_encode_avpn(vpn, psize, ssize);
native_lock_hpte(hptep);
- hpte_v = be64_to_cpu(hptep->v);
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- hpte_v = hpte_new_to_old_v(hpte_v,
- be64_to_cpu(hptep->r));
+ hpte_v = hpte_get_old_v(hptep);
if (!HPTE_V_COMPARE(hpte_v, want_v) ||
!(hpte_v & HPTE_V_VALID))
native_unlock_hpte(hptep);
--
2.17.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH 3/3] powerpc/mm/nv/hash: Reduce contention on hpte lock
2018-06-29 8:36 [PATCH 1/3] powerpc/mm/hash: Remove the superfluous bitwise operation when find hpte group Aneesh Kumar K.V
2018-06-29 8:36 ` [PATCH 2/3] powerpc/mm/hash: Add hpte_get_old_v and use that instead of opencoding Aneesh Kumar K.V
@ 2018-06-29 8:36 ` Aneesh Kumar K.V
2018-07-24 13:59 ` [1/3] powerpc/mm/hash: Remove the superfluous bitwise operation when find hpte group Michael Ellerman
2 siblings, 0 replies; 4+ messages in thread
From: Aneesh Kumar K.V @ 2018-06-29 8:36 UTC (permalink / raw)
To: npiggin, benh, paulus, mpe
Cc: linuxppc-dev, Aneesh Kumar K.V, Aneesh Kumar K . V
From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
We do this in some part. This patch make sure we always try to search for
hpte without holding lock and redo the compare with lock held once match found.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
arch/powerpc/mm/hash_native_64.c | 49 +++++++++++++++++++++-----------
1 file changed, 33 insertions(+), 16 deletions(-)
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 68e6eaf41bb9..ffbd5ed4e8de 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -568,9 +568,19 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
want_v = hpte_encode_avpn(vpn, bpsize, ssize);
- native_lock_hpte(hptep);
hpte_v = hpte_get_old_v(hptep);
+ if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
+ native_lock_hpte(hptep);
+ /* recheck with locks held */
+ hpte_v = hpte_get_old_v(hptep);
+
+ if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
+ /* Invalidate the hpte. NOTE: this also unlocks it */
+ hptep->v = 0;
+ else
+ native_unlock_hpte(hptep);
+ }
/*
* We need to invalidate the TLB always because hpte_remove doesn't do
* a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
@@ -578,13 +588,6 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
* (hpte_remove) because we assume the old translation is still
* technically "valid".
*/
- if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
- native_unlock_hpte(hptep);
- else
- /* Invalidate the hpte. NOTE: this also unlocks it */
- hptep->v = 0;
-
- /* Invalidate the TLB */
tlbie(vpn, bpsize, apsize, ssize, local);
local_irq_restore(flags);
@@ -626,15 +629,23 @@ static void native_hugepage_invalidate(unsigned long vsid,
hptep = htab_address + slot;
want_v = hpte_encode_avpn(vpn, psize, ssize);
- native_lock_hpte(hptep);
hpte_v = hpte_get_old_v(hptep);
/* Even if we miss, we need to invalidate the TLB */
- if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
- native_unlock_hpte(hptep);
- else
- /* Invalidate the hpte. NOTE: this also unlocks it */
- hptep->v = 0;
+ if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
+ /* recheck with locks held */
+ native_lock_hpte(hptep);
+ hpte_v = hpte_get_old_v(hptep);
+
+ if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
+ /*
+ * Invalidate the hpte. NOTE: this also unlocks it
+ */
+
+ hptep->v = 0;
+ } else
+ native_unlock_hpte(hptep);
+ }
/*
* We need to do tlb invalidate for all the address, tlbie
* instruction compares entry_VA in tlb with the VA specified
@@ -802,13 +813,19 @@ static void native_flush_hash_range(unsigned long number, int local)
slot += hidx & _PTEIDX_GROUP_IX;
hptep = htab_address + slot;
want_v = hpte_encode_avpn(vpn, psize, ssize);
+ hpte_v = hpte_get_old_v(hptep);
+
+ if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
+ continue;
+ /* lock and try again */
native_lock_hpte(hptep);
hpte_v = hpte_get_old_v(hptep);
- if (!HPTE_V_COMPARE(hpte_v, want_v) ||
- !(hpte_v & HPTE_V_VALID))
+
+ if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
native_unlock_hpte(hptep);
else
hptep->v = 0;
+
} pte_iterate_hashed_end();
}
--
2.17.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [1/3] powerpc/mm/hash: Remove the superfluous bitwise operation when find hpte group
2018-06-29 8:36 [PATCH 1/3] powerpc/mm/hash: Remove the superfluous bitwise operation when find hpte group Aneesh Kumar K.V
2018-06-29 8:36 ` [PATCH 2/3] powerpc/mm/hash: Add hpte_get_old_v and use that instead of opencoding Aneesh Kumar K.V
2018-06-29 8:36 ` [PATCH 3/3] powerpc/mm/nv/hash: Reduce contention on hpte lock Aneesh Kumar K.V
@ 2018-07-24 13:59 ` Michael Ellerman
2 siblings, 0 replies; 4+ messages in thread
From: Michael Ellerman @ 2018-07-24 13:59 UTC (permalink / raw)
To: Aneesh Kumar K.V, npiggin, benh, paulus; +Cc: linuxppc-dev, Aneesh Kumar K.V
On Fri, 2018-06-29 at 08:36:29 UTC, "Aneesh Kumar K.V" wrote:
> From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
>
> When computing the starting slot number for a hash page table group we used
> to do this
> hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
>
> Multiplying with 8 (HPTES_PER_GROUP) imply the last three bits are 0. Hence we
> really don't need to clear then separately.
>
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Series applied to powerpc next, thanks.
https://git.kernel.org/powerpc/c/1531cff44b5bb30c899404c044805e
cheers
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2018-07-24 13:59 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-06-29 8:36 [PATCH 1/3] powerpc/mm/hash: Remove the superfluous bitwise operation when find hpte group Aneesh Kumar K.V
2018-06-29 8:36 ` [PATCH 2/3] powerpc/mm/hash: Add hpte_get_old_v and use that instead of opencoding Aneesh Kumar K.V
2018-06-29 8:36 ` [PATCH 3/3] powerpc/mm/nv/hash: Reduce contention on hpte lock Aneesh Kumar K.V
2018-07-24 13:59 ` [1/3] powerpc/mm/hash: Remove the superfluous bitwise operation when find hpte group Michael Ellerman
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).