From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
To: David Hildenbrand <dahi@linux.vnet.ibm.com>
Cc: linux-kernel@vger.kernel.org, mingo@redhat.com,
peterz@infradead.org, yang.shi@windriver.com,
bigeasy@linutronix.de, paulus@samba.org,
akpm@linux-foundation.org, heiko.carstens@de.ibm.com,
schwidefsky@de.ibm.com, borntraeger@de.ibm.com, mst@redhat.com,
tglx@linutronix.de, David.Laight@ACULAB.COM, hughd@google.com,
hocko@suse.cz, ralf@linux-mips.org, herbert@gondor.apana.org.au,
linux@arm.linux.org.uk, airlied@linux.ie,
daniel.vetter@intel.com, linux-mm@kvack.org,
linux-arch@vger.kernel.org
Subject: Re: [PATCH RFC 13/15] powerpc: enable_kernel_altivec() requires disabled preemption
Date: Thu, 07 May 2015 10:21:49 +1000 [thread overview]
Message-ID: <1430958109.3453.23.camel@kernel.crashing.org> (raw)
In-Reply-To: <1430934639-2131-14-git-send-email-dahi@linux.vnet.ibm.com>
On Wed, 2015-05-06 at 19:50 +0200, David Hildenbrand wrote:
> enable_kernel_altivec() has to be called with disabled preemption.
> Let's make this explicit, to prepare for pagefault_disable() not
> touching preemption anymore.
>
> Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
> ---
> arch/powerpc/lib/vmx-helper.c | 11 ++++++-----
> drivers/crypto/vmx/aes.c | 8 +++++++-
> drivers/crypto/vmx/aes_cbc.c | 6 ++++++
> drivers/crypto/vmx/ghash.c | 8 ++++++++
> 4 files changed, 27 insertions(+), 6 deletions(-)
>
> diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c
> index 3cf529c..ac93a3b 100644
> --- a/arch/powerpc/lib/vmx-helper.c
> +++ b/arch/powerpc/lib/vmx-helper.c
> @@ -27,11 +27,11 @@ int enter_vmx_usercopy(void)
> if (in_interrupt())
> return 0;
>
> - /* This acts as preempt_disable() as well and will make
> - * enable_kernel_altivec(). We need to disable page faults
> - * as they can call schedule and thus make us lose the VMX
> - * context. So on page faults, we just fail which will cause
> - * a fallback to the normal non-vmx copy.
> + preempt_disable();
> + /*
> + * We need to disable page faults as they can call schedule and
> + * thus make us lose the VMX context. So on page faults, we just
> + * fail which will cause a fallback to the normal non-vmx copy.
> */
> pagefault_disable();
>
> @@ -47,6 +47,7 @@ int enter_vmx_usercopy(void)
> int exit_vmx_usercopy(void)
> {
> pagefault_enable();
> + preempt_enable();
> return 0;
> }
>
> diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
> index ab300ea..a9064e3 100644
> --- a/drivers/crypto/vmx/aes.c
> +++ b/drivers/crypto/vmx/aes.c
> @@ -78,12 +78,14 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
> int ret;
> struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
>
> + preempt_disable();
> pagefault_disable();
> enable_kernel_altivec();
> ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
> ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
> pagefault_enable();
> -
> + preempt_enable();
> +
> ret += crypto_cipher_setkey(ctx->fallback, key, keylen);
> return ret;
> }
> @@ -95,10 +97,12 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
> if (in_interrupt()) {
> crypto_cipher_encrypt_one(ctx->fallback, dst, src);
> } else {
> + preempt_disable();
> pagefault_disable();
> enable_kernel_altivec();
> aes_p8_encrypt(src, dst, &ctx->enc_key);
> pagefault_enable();
> + preempt_enable();
> }
> }
>
> @@ -109,10 +113,12 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
> if (in_interrupt()) {
> crypto_cipher_decrypt_one(ctx->fallback, dst, src);
> } else {
> + preempt_disable();
> pagefault_disable();
> enable_kernel_altivec();
> aes_p8_decrypt(src, dst, &ctx->dec_key);
> pagefault_enable();
> + preempt_enable();
> }
> }
>
> diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
> index 1a559b7..477284a 100644
> --- a/drivers/crypto/vmx/aes_cbc.c
> +++ b/drivers/crypto/vmx/aes_cbc.c
> @@ -79,11 +79,13 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
> int ret;
> struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
>
> + preempt_disable();
> pagefault_disable();
> enable_kernel_altivec();
> ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
> ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
> pagefault_enable();
> + preempt_enable();
>
> ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
> return ret;
> @@ -106,6 +108,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
> if (in_interrupt()) {
> ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes);
> } else {
> + preempt_disable();
> pagefault_disable();
> enable_kernel_altivec();
>
> @@ -119,6 +122,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
> }
>
> pagefault_enable();
> + preempt_enable();
> }
>
> return ret;
> @@ -141,6 +145,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
> if (in_interrupt()) {
> ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes);
> } else {
> + preempt_disable();
> pagefault_disable();
> enable_kernel_altivec();
>
> @@ -154,6 +159,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
> }
>
> pagefault_enable();
> + preempt_enable();
> }
>
> return ret;
> diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
> index d0ffe27..f255ec4 100644
> --- a/drivers/crypto/vmx/ghash.c
> +++ b/drivers/crypto/vmx/ghash.c
> @@ -114,11 +114,13 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
> if (keylen != GHASH_KEY_LEN)
> return -EINVAL;
>
> + preempt_disable();
> pagefault_disable();
> enable_kernel_altivec();
> enable_kernel_fp();
> gcm_init_p8(ctx->htable, (const u64 *) key);
> pagefault_enable();
> + preempt_enable();
> return crypto_shash_setkey(ctx->fallback, key, keylen);
> }
>
> @@ -140,23 +142,27 @@ static int p8_ghash_update(struct shash_desc *desc,
> }
> memcpy(dctx->buffer + dctx->bytes, src,
> GHASH_DIGEST_SIZE - dctx->bytes);
> + preempt_disable();
> pagefault_disable();
> enable_kernel_altivec();
> enable_kernel_fp();
> gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
> GHASH_DIGEST_SIZE);
> pagefault_enable();
> + preempt_enable();
> src += GHASH_DIGEST_SIZE - dctx->bytes;
> srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
> dctx->bytes = 0;
> }
> len = srclen & ~(GHASH_DIGEST_SIZE - 1);
> if (len) {
> + preempt_disable();
> pagefault_disable();
> enable_kernel_altivec();
> enable_kernel_fp();
> gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
> pagefault_enable();
> + preempt_enable();
> src += len;
> srclen -= len;
> }
> @@ -180,12 +186,14 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
> if (dctx->bytes) {
> for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
> dctx->buffer[i] = 0;
> + preempt_disable();
> pagefault_disable();
> enable_kernel_altivec();
> enable_kernel_fp();
> gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
> GHASH_DIGEST_SIZE);
> pagefault_enable();
> + preempt_enable();
> dctx->bytes = 0;
> }
> memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2015-05-07 0:24 UTC|newest]
Thread overview: 38+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-05-06 17:50 [PATCH RFC 00/15] decouple pagefault_disable() from preempt_disable() David Hildenbrand
2015-05-06 17:50 ` [PATCH RFC 01/15] uaccess: count pagefault_disable() levels in pagefault_disabled David Hildenbrand
2015-05-07 10:22 ` Peter Zijlstra
2015-05-07 10:50 ` David Hildenbrand
2015-05-07 11:12 ` Peter Zijlstra
2015-05-07 11:23 ` David Hildenbrand
2015-05-07 11:25 ` Ingo Molnar
2015-05-07 11:30 ` David Hildenbrand
2015-05-07 11:42 ` Peter Zijlstra
2015-05-07 11:40 ` David Hildenbrand
2015-05-07 11:48 ` Peter Zijlstra
2015-05-07 11:51 ` Peter Zijlstra
2015-05-07 12:14 ` David Hildenbrand
2015-05-07 12:27 ` Ingo Molnar
2015-05-07 12:32 ` Peter Zijlstra
2015-05-07 15:45 ` [PATCH draft] mm: use pagefault_disable() to check for disabled pagefaults in the handler David Hildenbrand
2015-05-07 11:12 ` [PATCH RFC 01/15] uaccess: count pagefault_disable() levels in pagefault_disabled Ingo Molnar
2015-05-06 17:50 ` [PATCH RFC 02/15] mm, uaccess: trigger might_sleep() in might_fault() with disabled pagefaults David Hildenbrand
2015-05-06 17:50 ` [PATCH RFC 03/15] uaccess: clarify that uaccess may only sleep if pagefaults are enabled David Hildenbrand
2015-05-06 17:50 ` [PATCH RFC 04/15] mm: explicitly disable/enable preemption in kmap_atomic_* David Hildenbrand
2015-05-06 17:50 ` [PATCH RFC 05/15] mips: kmap_coherent relies on disabled preemption David Hildenbrand
2015-05-06 17:50 ` [PATCH RFC 06/15] mm: use pagefault_disabled() to check for disabled pagefaults David Hildenbrand
2015-05-06 17:50 ` [PATCH RFC 07/15] drm/i915: " David Hildenbrand
2015-05-06 17:50 ` [PATCH RFC 08/15] futex: UP futex_atomic_op_inuser() relies on disabled preemption David Hildenbrand
2015-05-06 17:50 ` [PATCH RFC 09/15] futex: UP futex_atomic_cmpxchg_inatomic() " David Hildenbrand
2015-05-06 17:50 ` [PATCH RFC 10/15] arm/futex: " David Hildenbrand
2015-05-06 17:50 ` [PATCH RFC 11/15] arm/futex: UP futex_atomic_op_inuser() " David Hildenbrand
2015-05-06 17:50 ` [PATCH RFC 12/15] futex: clarify that preemption doesn't have to be disabled David Hildenbrand
2015-05-06 17:50 ` [PATCH RFC 13/15] powerpc: enable_kernel_altivec() requires disabled preemption David Hildenbrand
2015-05-07 0:21 ` Benjamin Herrenschmidt [this message]
2015-05-06 17:50 ` [PATCH RFC 14/15] mips: properly lock access to the fpu David Hildenbrand
2015-05-06 17:50 ` [PATCH RFC 15/15] uaccess: decouple preemption from the pagefault logic David Hildenbrand
2015-05-06 22:01 ` [PATCH RFC 00/15] decouple pagefault_disable() from preempt_disable() Andrew Morton
2015-05-07 6:23 ` David Hildenbrand
2015-05-07 9:48 ` Ingo Molnar
2015-05-07 10:51 ` Christian Borntraeger
2015-05-07 11:08 ` Ingo Molnar
2015-05-07 11:40 ` Peter Zijlstra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1430958109.3453.23.camel@kernel.crashing.org \
--to=benh@kernel.crashing.org \
--cc=David.Laight@ACULAB.COM \
--cc=airlied@linux.ie \
--cc=akpm@linux-foundation.org \
--cc=bigeasy@linutronix.de \
--cc=borntraeger@de.ibm.com \
--cc=dahi@linux.vnet.ibm.com \
--cc=daniel.vetter@intel.com \
--cc=heiko.carstens@de.ibm.com \
--cc=herbert@gondor.apana.org.au \
--cc=hocko@suse.cz \
--cc=hughd@google.com \
--cc=linux-arch@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux@arm.linux.org.uk \
--cc=mingo@redhat.com \
--cc=mst@redhat.com \
--cc=paulus@samba.org \
--cc=peterz@infradead.org \
--cc=ralf@linux-mips.org \
--cc=schwidefsky@de.ibm.com \
--cc=tglx@linutronix.de \
--cc=yang.shi@windriver.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).