* [PATCH 13/26] ppc: Convert mmu context allocation to new IDA API [not found] <20180621212835.5636-1-willy@infradead.org> @ 2018-06-21 21:28 ` Matthew Wilcox 2018-06-22 2:15 ` Nicholas Piggin 2018-06-21 21:28 ` [PATCH 15/26] ppc: Convert vas ID " Matthew Wilcox 1 sibling, 1 reply; 8+ messages in thread From: Matthew Wilcox @ 2018-06-21 21:28 UTC (permalink / raw) To: linux-kernel Cc: Matthew Wilcox, Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman, Aneesh Kumar K.V, Nicholas Piggin, Thiago Jung Bauermann, Ram Pai, linuxppc-dev ida_alloc_range is the perfect fit for this use case. Eliminates a custom spinlock, a call to ida_pre_get and a local check for the allocated ID exceeding a maximum. Signed-off-by: Matthew Wilcox <willy@infradead.org> --- arch/powerpc/mm/mmu_context_book3s64.c | 44 +++----------------------- 1 file changed, 4 insertions(+), 40 deletions(-) diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c index f3d4b4a0e561..5a0cf2cc8ba0 100644 --- a/arch/powerpc/mm/mmu_context_book3s64.c +++ b/arch/powerpc/mm/mmu_context_book3s64.c @@ -26,48 +26,16 @@ #include <asm/mmu_context.h> #include <asm/pgalloc.h> -static DEFINE_SPINLOCK(mmu_context_lock); static DEFINE_IDA(mmu_context_ida); static int alloc_context_id(int min_id, int max_id) { - int index, err; - -again: - if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) - return -ENOMEM; - - spin_lock(&mmu_context_lock); - err = ida_get_new_above(&mmu_context_ida, min_id, &index); - spin_unlock(&mmu_context_lock); - - if (err == -EAGAIN) - goto again; - else if (err) - return err; - - if (index > max_id) { - spin_lock(&mmu_context_lock); - ida_remove(&mmu_context_ida, index); - spin_unlock(&mmu_context_lock); - return -ENOMEM; - } - - return index; + return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL); } void hash__reserve_context_id(int id) { - int rc, result = 0; - - do { - if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) - break; - - spin_lock(&mmu_context_lock); - rc = ida_get_new_above(&mmu_context_ida, id, &result); - spin_unlock(&mmu_context_lock); - } while (rc == -EAGAIN); + int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL); WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result); } @@ -172,9 +140,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) void __destroy_context(int context_id) { - spin_lock(&mmu_context_lock); - ida_remove(&mmu_context_ida, context_id); - spin_unlock(&mmu_context_lock); + ida_free(&mmu_context_ida, context_id); } EXPORT_SYMBOL_GPL(__destroy_context); @@ -182,13 +148,11 @@ static void destroy_contexts(mm_context_t *ctx) { int index, context_id; - spin_lock(&mmu_context_lock); for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) { context_id = ctx->extended_id[index]; if (context_id) - ida_remove(&mmu_context_ida, context_id); + ida_free(&mmu_context_ida, context_id); } - spin_unlock(&mmu_context_lock); } static void pte_frag_destroy(void *pte_frag) -- 2.17.1 ^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH 13/26] ppc: Convert mmu context allocation to new IDA API 2018-06-21 21:28 ` [PATCH 13/26] ppc: Convert mmu context allocation to new IDA API Matthew Wilcox @ 2018-06-22 2:15 ` Nicholas Piggin 2018-06-22 4:38 ` Matthew Wilcox 2018-06-22 5:47 ` Aneesh Kumar K.V 0 siblings, 2 replies; 8+ messages in thread From: Nicholas Piggin @ 2018-06-22 2:15 UTC (permalink / raw) To: Matthew Wilcox Cc: linux-kernel, Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman, Aneesh Kumar K.V, Thiago Jung Bauermann, Ram Pai, linuxppc-dev On Thu, 21 Jun 2018 14:28:22 -0700 Matthew Wilcox <willy@infradead.org> wrote: > ida_alloc_range is the perfect fit for this use case. Eliminates > a custom spinlock, a call to ida_pre_get and a local check for the > allocated ID exceeding a maximum. > > Signed-off-by: Matthew Wilcox <willy@infradead.org> > --- > arch/powerpc/mm/mmu_context_book3s64.c | 44 +++----------------------- > 1 file changed, 4 insertions(+), 40 deletions(-) > > diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c > index f3d4b4a0e561..5a0cf2cc8ba0 100644 > --- a/arch/powerpc/mm/mmu_context_book3s64.c > +++ b/arch/powerpc/mm/mmu_context_book3s64.c > @@ -26,48 +26,16 @@ > #include <asm/mmu_context.h> > #include <asm/pgalloc.h> > > -static DEFINE_SPINLOCK(mmu_context_lock); > static DEFINE_IDA(mmu_context_ida); > > static int alloc_context_id(int min_id, int max_id) > { > - int index, err; > - > -again: > - if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) > - return -ENOMEM; > - > - spin_lock(&mmu_context_lock); > - err = ida_get_new_above(&mmu_context_ida, min_id, &index); > - spin_unlock(&mmu_context_lock); > - > - if (err == -EAGAIN) > - goto again; > - else if (err) > - return err; > - > - if (index > max_id) { > - spin_lock(&mmu_context_lock); > - ida_remove(&mmu_context_ida, index); > - spin_unlock(&mmu_context_lock); > - return -ENOMEM; > - } > - > - return index; > + return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL); > } > > void hash__reserve_context_id(int id) > { > - int rc, result = 0; > - > - do { > - if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) > - break; > - > - spin_lock(&mmu_context_lock); > - rc = ida_get_new_above(&mmu_context_ida, id, &result); > - spin_unlock(&mmu_context_lock); > - } while (rc == -EAGAIN); > + int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL); > > WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result); > } > @@ -172,9 +140,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) > > void __destroy_context(int context_id) > { > - spin_lock(&mmu_context_lock); > - ida_remove(&mmu_context_ida, context_id); > - spin_unlock(&mmu_context_lock); > + ida_free(&mmu_context_ida, context_id); > } > EXPORT_SYMBOL_GPL(__destroy_context); > > @@ -182,13 +148,11 @@ static void destroy_contexts(mm_context_t *ctx) > { > int index, context_id; > > - spin_lock(&mmu_context_lock); > for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) { > context_id = ctx->extended_id[index]; > if (context_id) > - ida_remove(&mmu_context_ida, context_id); > + ida_free(&mmu_context_ida, context_id); > } > - spin_unlock(&mmu_context_lock); > } > > static void pte_frag_destroy(void *pte_frag) This hunk should be okay because the mmu_context_lock does not protect the extended_id array, right Aneesh? Reviewed-by: Nicholas Piggin <npiggin@gmail.com> Thanks, Nick ^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 13/26] ppc: Convert mmu context allocation to new IDA API 2018-06-22 2:15 ` Nicholas Piggin @ 2018-06-22 4:38 ` Matthew Wilcox 2018-06-22 4:53 ` Nicholas Piggin 2018-06-22 5:47 ` Aneesh Kumar K.V 2018-06-22 5:47 ` Aneesh Kumar K.V 1 sibling, 2 replies; 8+ messages in thread From: Matthew Wilcox @ 2018-06-22 4:38 UTC (permalink / raw) To: Nicholas Piggin Cc: linux-kernel, Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman, Aneesh Kumar K.V, Thiago Jung Bauermann, Ram Pai, linuxppc-dev On Fri, Jun 22, 2018 at 12:15:11PM +1000, Nicholas Piggin wrote: > On Thu, 21 Jun 2018 14:28:22 -0700 > Matthew Wilcox <willy@infradead.org> wrote: > > static int alloc_context_id(int min_id, int max_id) ... > > - spin_lock(&mmu_context_lock); > > - err = ida_get_new_above(&mmu_context_ida, min_id, &index); > > - spin_unlock(&mmu_context_lock); ... > > @@ -182,13 +148,11 @@ static void destroy_contexts(mm_context_t *ctx) > > { > > int index, context_id; > > > > - spin_lock(&mmu_context_lock); > > for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) { > > context_id = ctx->extended_id[index]; > > if (context_id) > > - ida_remove(&mmu_context_ida, context_id); > > + ida_free(&mmu_context_ida, context_id); > > } > > - spin_unlock(&mmu_context_lock); > > } > > > > static void pte_frag_destroy(void *pte_frag) > > This hunk should be okay because the mmu_context_lock does not protect > the extended_id array, right Aneesh? That's my understanding. The code today does this: static inline int alloc_extended_context(struct mm_struct *mm, unsigned long ea) { int context_id; int index = ea >> MAX_EA_BITS_PER_CONTEXT; context_id = hash__alloc_context_id(); if (context_id < 0) return context_id; VM_WARN_ON(mm->context.extended_id[index]); mm->context.extended_id[index] = context_id; so it's not currently protected by this lock. I suppose we are currently protected from destroy_contexts() being called twice simultaneously, but you'll notice that we don't zero the array elements in destroy_contexts(), so if we somehow had a code path which could call it concurrently, we'd be seeing warnings when the second caller tried to remove the context IDs from the IDA. I deduced that something else must be preventing this situation from occurring (like, oh i don't know, this function only being called on process exit, so implicitly only called once per context). > Reviewed-by: Nicholas Piggin <npiggin@gmail.com> Thanks. ^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 13/26] ppc: Convert mmu context allocation to new IDA API 2018-06-22 4:38 ` Matthew Wilcox @ 2018-06-22 4:53 ` Nicholas Piggin 2018-06-22 5:47 ` Aneesh Kumar K.V 1 sibling, 0 replies; 8+ messages in thread From: Nicholas Piggin @ 2018-06-22 4:53 UTC (permalink / raw) To: Matthew Wilcox Cc: linux-kernel, Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman, Aneesh Kumar K.V, Thiago Jung Bauermann, Ram Pai, linuxppc-dev On Thu, 21 Jun 2018 21:38:15 -0700 Matthew Wilcox <willy@infradead.org> wrote: > On Fri, Jun 22, 2018 at 12:15:11PM +1000, Nicholas Piggin wrote: > > On Thu, 21 Jun 2018 14:28:22 -0700 > > Matthew Wilcox <willy@infradead.org> wrote: > > > static int alloc_context_id(int min_id, int max_id) > ... > > > - spin_lock(&mmu_context_lock); > > > - err = ida_get_new_above(&mmu_context_ida, min_id, &index); > > > - spin_unlock(&mmu_context_lock); > ... > > > @@ -182,13 +148,11 @@ static void destroy_contexts(mm_context_t *ctx) > > > { > > > int index, context_id; > > > > > > - spin_lock(&mmu_context_lock); > > > for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) { > > > context_id = ctx->extended_id[index]; > > > if (context_id) > > > - ida_remove(&mmu_context_ida, context_id); > > > + ida_free(&mmu_context_ida, context_id); > > > } > > > - spin_unlock(&mmu_context_lock); > > > } > > > > > > static void pte_frag_destroy(void *pte_frag) > > > > This hunk should be okay because the mmu_context_lock does not protect > > the extended_id array, right Aneesh? > > That's my understanding. The code today does this: > > static inline int alloc_extended_context(struct mm_struct *mm, > unsigned long ea) > { > int context_id; > > int index = ea >> MAX_EA_BITS_PER_CONTEXT; > > context_id = hash__alloc_context_id(); > if (context_id < 0) > return context_id; > > VM_WARN_ON(mm->context.extended_id[index]); > mm->context.extended_id[index] = context_id; > > so it's not currently protected by this lock. I suppose we are currently > protected from destroy_contexts() being called twice simultaneously, but > you'll notice that we don't zero the array elements in destroy_contexts(), > so if we somehow had a code path which could call it concurrently, we'd > be seeing warnings when the second caller tried to remove the context Yeah that'd be an existing bug. > IDs from the IDA. I deduced that something else must be preventing > this situation from occurring (like, oh i don't know, this function only > being called on process exit, so implicitly only called once per context). I think that's exactly right. Thanks, Nick ^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 13/26] ppc: Convert mmu context allocation to new IDA API 2018-06-22 4:38 ` Matthew Wilcox 2018-06-22 4:53 ` Nicholas Piggin @ 2018-06-22 5:47 ` Aneesh Kumar K.V 1 sibling, 0 replies; 8+ messages in thread From: Aneesh Kumar K.V @ 2018-06-22 5:47 UTC (permalink / raw) To: Matthew Wilcox, Nicholas Piggin Cc: linux-kernel, Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman, Thiago Jung Bauermann, Ram Pai, linuxppc-dev Matthew Wilcox <willy@infradead.org> writes: > this situation from occurring (like, oh i don't know, this function only > being called on process exit, so implicitly only called once per context). > That is correct. ^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 13/26] ppc: Convert mmu context allocation to new IDA API 2018-06-22 2:15 ` Nicholas Piggin 2018-06-22 4:38 ` Matthew Wilcox @ 2018-06-22 5:47 ` Aneesh Kumar K.V 1 sibling, 0 replies; 8+ messages in thread From: Aneesh Kumar K.V @ 2018-06-22 5:47 UTC (permalink / raw) To: Nicholas Piggin, Matthew Wilcox Cc: Ram Pai, linux-kernel, Paul Mackerras, Thiago Jung Bauermann, linuxppc-dev Nicholas Piggin <npiggin@gmail.com> writes: > On Thu, 21 Jun 2018 14:28:22 -0700 > Matthew Wilcox <willy@infradead.org> wrote: > >> ida_alloc_range is the perfect fit for this use case. Eliminates >> a custom spinlock, a call to ida_pre_get and a local check for the >> allocated ID exceeding a maximum. >> >> Signed-off-by: Matthew Wilcox <willy@infradead.org> >> --- >> arch/powerpc/mm/mmu_context_book3s64.c | 44 +++----------------------- >> 1 file changed, 4 insertions(+), 40 deletions(-) >> >> diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c >> index f3d4b4a0e561..5a0cf2cc8ba0 100644 >> --- a/arch/powerpc/mm/mmu_context_book3s64.c >> +++ b/arch/powerpc/mm/mmu_context_book3s64.c >> @@ -26,48 +26,16 @@ >> #include <asm/mmu_context.h> >> #include <asm/pgalloc.h> >> >> -static DEFINE_SPINLOCK(mmu_context_lock); >> static DEFINE_IDA(mmu_context_ida); >> >> static int alloc_context_id(int min_id, int max_id) >> { >> - int index, err; >> - >> -again: >> - if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) >> - return -ENOMEM; >> - >> - spin_lock(&mmu_context_lock); >> - err = ida_get_new_above(&mmu_context_ida, min_id, &index); >> - spin_unlock(&mmu_context_lock); >> - >> - if (err == -EAGAIN) >> - goto again; >> - else if (err) >> - return err; >> - >> - if (index > max_id) { >> - spin_lock(&mmu_context_lock); >> - ida_remove(&mmu_context_ida, index); >> - spin_unlock(&mmu_context_lock); >> - return -ENOMEM; >> - } >> - >> - return index; >> + return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL); >> } >> >> void hash__reserve_context_id(int id) >> { >> - int rc, result = 0; >> - >> - do { >> - if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) >> - break; >> - >> - spin_lock(&mmu_context_lock); >> - rc = ida_get_new_above(&mmu_context_ida, id, &result); >> - spin_unlock(&mmu_context_lock); >> - } while (rc == -EAGAIN); >> + int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL); >> >> WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result); >> } >> @@ -172,9 +140,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) >> >> void __destroy_context(int context_id) >> { >> - spin_lock(&mmu_context_lock); >> - ida_remove(&mmu_context_ida, context_id); >> - spin_unlock(&mmu_context_lock); >> + ida_free(&mmu_context_ida, context_id); >> } >> EXPORT_SYMBOL_GPL(__destroy_context); >> >> @@ -182,13 +148,11 @@ static void destroy_contexts(mm_context_t *ctx) >> { >> int index, context_id; >> >> - spin_lock(&mmu_context_lock); >> for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) { >> context_id = ctx->extended_id[index]; >> if (context_id) >> - ida_remove(&mmu_context_ida, context_id); >> + ida_free(&mmu_context_ida, context_id); >> } >> - spin_unlock(&mmu_context_lock); >> } >> >> static void pte_frag_destroy(void *pte_frag) > > This hunk should be okay because the mmu_context_lock does not protect > the extended_id array, right Aneesh? Yes. This is called at process exit, so we should not find parallel calls. On the allocation side, we are protected by mmap_sem. We do allocate extended_id when doing mmap. -aneesh ^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 15/26] ppc: Convert vas ID allocation to new IDA API [not found] <20180621212835.5636-1-willy@infradead.org> 2018-06-21 21:28 ` [PATCH 13/26] ppc: Convert mmu context allocation to new IDA API Matthew Wilcox @ 2018-06-21 21:28 ` Matthew Wilcox 2018-07-05 12:17 ` Matthew Wilcox 1 sibling, 1 reply; 8+ messages in thread From: Matthew Wilcox @ 2018-06-21 21:28 UTC (permalink / raw) To: linux-kernel Cc: Matthew Wilcox, Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman, linuxppc-dev Removes a custom spinlock and simplifies the code. Signed-off-by: Matthew Wilcox <willy@infradead.org> --- arch/powerpc/platforms/powernv/vas-window.c | 26 ++++----------------- 1 file changed, 4 insertions(+), 22 deletions(-) diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c index ff9f48812331..2a5e68a2664d 100644 --- a/arch/powerpc/platforms/powernv/vas-window.c +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -515,35 +515,17 @@ int init_winctx_regs(struct vas_window *window, struct vas_winctx *winctx) return 0; } -static DEFINE_SPINLOCK(vas_ida_lock); - static void vas_release_window_id(struct ida *ida, int winid) { - spin_lock(&vas_ida_lock); - ida_remove(ida, winid); - spin_unlock(&vas_ida_lock); + ida_free(ida, winid); } static int vas_assign_window_id(struct ida *ida) { - int rc, winid; - - do { - rc = ida_pre_get(ida, GFP_KERNEL); - if (!rc) - return -EAGAIN; - - spin_lock(&vas_ida_lock); - rc = ida_get_new(ida, &winid); - spin_unlock(&vas_ida_lock); - } while (rc == -EAGAIN); - - if (rc) - return rc; + int winid = ida_alloc_max(ida, VAX_WINDOWS_PER_CHIP, GFP_KERNEL); - if (winid > VAS_WINDOWS_PER_CHIP) { - pr_err("Too many (%d) open windows\n", winid); - vas_release_window_id(ida, winid); + if (winid == -ENOSPC) { + pr_err("Too many (%d) open windows\n", VAX_WINDOWS_PER_CHIP); return -EAGAIN; } -- 2.17.1 ^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH 15/26] ppc: Convert vas ID allocation to new IDA API 2018-06-21 21:28 ` [PATCH 15/26] ppc: Convert vas ID " Matthew Wilcox @ 2018-07-05 12:17 ` Matthew Wilcox 0 siblings, 0 replies; 8+ messages in thread From: Matthew Wilcox @ 2018-07-05 12:17 UTC (permalink / raw) To: linux-kernel Cc: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman, Sukadev Bhattiprolu, linuxppc-dev On Thu, Jun 21, 2018 at 02:28:24PM -0700, Matthew Wilcox wrote: > Removes a custom spinlock and simplifies the code. I took a closer look at this patch as part of fixing the typo *ahem*. The original code is buggy at the limit: - if (winid > VAS_WINDOWS_PER_CHIP) { - pr_err("Too many (%d) open windows\n", winid); - vas_release_window_id(ida, winid); That permits winid to be == VAS_WINDOWS_PER_CHIP, which is 64 << 10. Since you then go on to store: int id = window->winid; vinst->windows[id] = window; and windows is defined as: struct vas_window *windows[VAS_WINDOWS_PER_CHIP]; that's a buffer overflow. Here's the current version of my patch which will be in linux-next tomorrow. diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c index ff9f48812331..e59e0e60e5b5 100644 --- a/arch/powerpc/platforms/powernv/vas-window.c +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -515,35 +515,17 @@ int init_winctx_regs(struct vas_window *window, struct vas_winctx *winctx) return 0; } -static DEFINE_SPINLOCK(vas_ida_lock); - static void vas_release_window_id(struct ida *ida, int winid) { - spin_lock(&vas_ida_lock); - ida_remove(ida, winid); - spin_unlock(&vas_ida_lock); + ida_free(ida, winid); } static int vas_assign_window_id(struct ida *ida) { - int rc, winid; - - do { - rc = ida_pre_get(ida, GFP_KERNEL); - if (!rc) - return -EAGAIN; - - spin_lock(&vas_ida_lock); - rc = ida_get_new(ida, &winid); - spin_unlock(&vas_ida_lock); - } while (rc == -EAGAIN); - - if (rc) - return rc; + int winid = ida_alloc_max(ida, VAS_WINDOWS_PER_CHIP - 1, GFP_KERNEL); - if (winid > VAS_WINDOWS_PER_CHIP) { - pr_err("Too many (%d) open windows\n", winid); - vas_release_window_id(ida, winid); + if (winid == -ENOSPC) { + pr_err("Too many (%d) open windows\n", VAS_WINDOWS_PER_CHIP); return -EAGAIN; } ^ permalink raw reply related [flat|nested] 8+ messages in thread
end of thread, other threads:[~2018-07-05 12:17 UTC | newest] Thread overview: 8+ messages (download: mbox.gz follow: Atom feed -- links below jump to the message on this page -- [not found] <20180621212835.5636-1-willy@infradead.org> 2018-06-21 21:28 ` [PATCH 13/26] ppc: Convert mmu context allocation to new IDA API Matthew Wilcox 2018-06-22 2:15 ` Nicholas Piggin 2018-06-22 4:38 ` Matthew Wilcox 2018-06-22 4:53 ` Nicholas Piggin 2018-06-22 5:47 ` Aneesh Kumar K.V 2018-06-22 5:47 ` Aneesh Kumar K.V 2018-06-21 21:28 ` [PATCH 15/26] ppc: Convert vas ID " Matthew Wilcox 2018-07-05 12:17 ` Matthew Wilcox
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).