From: <gregkh@linuxfoundation.org>
To: dwmw@amazon.co.uk, ak@linux.intel.com, arjan@linux.intel.com,
dave.hansen@intel.com, gregkh@linux-foundation.org,
gregkh@linuxfoundation.org, jikos@kernel.org,
jpoimboe@redhat.com, keescook@google.com, luto@amacapital.net,
mingo@kernel.org, peterz@infradead.org, pjt@google.com,
riel@redhat.com, tglx@linutronix.de, tim.c.chen@linux.intel.com,
torvalds@linux-foundation.org
Cc: <stable@vger.kernel.org>, <stable-commits@vger.kernel.org>
Subject: Patch "x86/retpoline/crypto: Convert crypto assembler indirect jumps" has been added to the 4.9-stable tree
Date: Mon, 15 Jan 2018 10:07:07 +0100 [thread overview]
Message-ID: <151600722769223@kroah.com> (raw)
This is a note to let you know that I've just added the patch titled
x86/retpoline/crypto: Convert crypto assembler indirect jumps
to the 4.9-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary
The filename of the patch is:
x86-retpoline-crypto-convert-crypto-assembler-indirect-jumps.patch
and it can be found in the queue-4.9 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@vger.kernel.org> know about it.
>From 9697fa39efd3fc3692f2949d4045f393ec58450b Mon Sep 17 00:00:00 2001
From: David Woodhouse <dwmw@amazon.co.uk>
Date: Thu, 11 Jan 2018 21:46:27 +0000
Subject: x86/retpoline/crypto: Convert crypto assembler indirect jumps
From: David Woodhouse <dwmw@amazon.co.uk>
commit 9697fa39efd3fc3692f2949d4045f393ec58450b upstream.
Convert all indirect jumps in crypto assembler code to use non-speculative
sequences when CONFIG_RETPOLINE is enabled.
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Arjan van de Ven <arjan@linux.intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Cc: gnomes@lxorguk.ukuu.org.uk
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: thomas.lendacky@amd.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jikos@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Kees Cook <keescook@google.com>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
Cc: Paul Turner <pjt@google.com>
Link: https://lkml.kernel.org/r/1515707194-20531-6-git-send-email-dwmw@amazon.co.uk
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
arch/x86/crypto/aesni-intel_asm.S | 5 +++--
arch/x86/crypto/camellia-aesni-avx-asm_64.S | 3 ++-
arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 3 ++-
arch/x86/crypto/crc32c-pcl-intel-asm_64.S | 3 ++-
4 files changed, 9 insertions(+), 5 deletions(-)
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -32,6 +32,7 @@
#include <linux/linkage.h>
#include <asm/inst.h>
#include <asm/frame.h>
+#include <asm/nospec-branch.h>
/*
* The following macros are used to move an (un)aligned 16 byte value to/from
@@ -2734,7 +2735,7 @@ ENTRY(aesni_xts_crypt8)
pxor INC, STATE4
movdqu IV, 0x30(OUTP)
- call *%r11
+ CALL_NOSPEC %r11
movdqu 0x00(OUTP), INC
pxor INC, STATE1
@@ -2779,7 +2780,7 @@ ENTRY(aesni_xts_crypt8)
_aesni_gf128mul_x_ble()
movups IV, (IVP)
- call *%r11
+ CALL_NOSPEC %r11
movdqu 0x40(OUTP), INC
pxor INC, STATE1
--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
@@ -17,6 +17,7 @@
#include <linux/linkage.h>
#include <asm/frame.h>
+#include <asm/nospec-branch.h>
#define CAMELLIA_TABLE_BYTE_LEN 272
@@ -1224,7 +1225,7 @@ camellia_xts_crypt_16way:
vpxor 14 * 16(%rax), %xmm15, %xmm14;
vpxor 15 * 16(%rax), %xmm15, %xmm15;
- call *%r9;
+ CALL_NOSPEC %r9;
addq $(16 * 16), %rsp;
--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <asm/frame.h>
+#include <asm/nospec-branch.h>
#define CAMELLIA_TABLE_BYTE_LEN 272
@@ -1337,7 +1338,7 @@ camellia_xts_crypt_32way:
vpxor 14 * 32(%rax), %ymm15, %ymm14;
vpxor 15 * 32(%rax), %ymm15, %ymm15;
- call *%r9;
+ CALL_NOSPEC %r9;
addq $(16 * 32), %rsp;
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -45,6 +45,7 @@
#include <asm/inst.h>
#include <linux/linkage.h>
+#include <asm/nospec-branch.h>
## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
@@ -172,7 +173,7 @@ continue_block:
movzxw (bufp, %rax, 2), len
lea crc_array(%rip), bufp
lea (bufp, len, 1), bufp
- jmp *bufp
+ JMP_NOSPEC bufp
################################################################
## 2a) PROCESS FULL BLOCKS:
Patches currently in stable-queue which might be from dwmw@amazon.co.uk are
queue-4.9/x86-spectre-add-boot-time-option-to-select-spectre-v2-mitigation.patch
queue-4.9/x86-retpoline-irq32-convert-assembler-indirect-jumps.patch
queue-4.9/objtool-detect-jumps-to-retpoline-thunks.patch
queue-4.9/x86-cpufeatures-add-x86_bug_spectre_v.patch
queue-4.9/x86-alternatives-add-missing-n-at-end-of-alternative-inline-asm.patch
queue-4.9/x86-retpoline-hyperv-convert-assembler-indirect-jumps.patch
queue-4.9/x86-retpoline-entry-convert-entry-assembler-indirect-jumps.patch
queue-4.9/sysfs-cpu-fix-typos-in-vulnerability-documentation.patch
queue-4.9/x86-cpufeatures-add-x86_bug_cpu_insecure.patch
queue-4.9/x86-cpufeatures-make-cpu-bugs-sticky.patch
queue-4.9/x86-cpu-amd-make-lfence-a-serializing-instruction.patch
queue-4.9/x86-retpoline-ftrace-convert-ftrace-assembler-indirect-jumps.patch
queue-4.9/objtool-allow-alternatives-to-be-ignored.patch
queue-4.9/x86-cpu-implement-cpu-vulnerabilites-sysfs-functions.patch
queue-4.9/x86-retpoline-crypto-convert-crypto-assembler-indirect-jumps.patch
queue-4.9/x86-cpu-factor-out-application-of-forced-cpu-caps.patch
queue-4.9/x86-retpoline-xen-convert-xen-hypercall-indirect-jumps.patch
queue-4.9/x86-retpoline-checksum32-convert-assembler-indirect-jumps.patch
queue-4.9/x86-mm-32-move-setup_clear_cpu_cap-x86_feature_pcid-earlier.patch
queue-4.9/sysfs-cpu-add-vulnerability-folder.patch
queue-4.9/x86-retpoline-fill-return-stack-buffer-on-vmexit.patch
queue-4.9/x86-pti-rename-bug_cpu_insecure-to-bug_cpu_meltdown.patch
queue-4.9/x86-retpoline-remove-compile-time-warning.patch
queue-4.9/x86-alternatives-fix-optimize_nops-checking.patch
queue-4.9/x86-cpu-amd-use-lfence_rdtsc-in-preference-to-mfence_rdtsc.patch
queue-4.9/x86-retpoline-add-initial-retpoline-support.patch
reply other threads:[~2018-01-15 9:07 UTC|newest]
Thread overview: [no followups] expand[flat|nested] mbox.gz Atom feed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=151600722769223@kroah.com \
--to=gregkh@linuxfoundation.org \
--cc=ak@linux.intel.com \
--cc=arjan@linux.intel.com \
--cc=dave.hansen@intel.com \
--cc=dwmw@amazon.co.uk \
--cc=gregkh@linux-foundation.org \
--cc=jikos@kernel.org \
--cc=jpoimboe@redhat.com \
--cc=keescook@google.com \
--cc=luto@amacapital.net \
--cc=mingo@kernel.org \
--cc=peterz@infradead.org \
--cc=pjt@google.com \
--cc=riel@redhat.com \
--cc=stable-commits@vger.kernel.org \
--cc=stable@vger.kernel.org \
--cc=tglx@linutronix.de \
--cc=tim.c.chen@linux.intel.com \
--cc=torvalds@linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).