From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail.linutronix.de (146.0.238.70:993) by crypto-ml.lab.linutronix.de with IMAP4-SSL for ; 24 Feb 2019 15:11:45 -0000 Received: from mga05.intel.com ([192.55.52.43]) by Galois.linutronix.de with esmtps (TLS1.2:DHE_RSA_AES_256_CBC_SHA256:256) (Exim 4.80) (envelope-from ) id 1gxvNr-0001Qt-3R for speck@linutronix.de; Sun, 24 Feb 2019 16:08:04 +0100 From: Andi Kleen Subject: [MODERATED] [PATCH v6 23/43] MDSv6 Date: Sun, 24 Feb 2019 07:07:29 -0800 Message-Id: In-Reply-To: References: In-Reply-To: References: Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 7bit MIME-Version: 1.0 To: speck@linutronix.de Cc: Andi Kleen List-ID: Instrument some strategic skbuff functions that either touch packet data directly, or are likely followed by a user data touch like a memcpy, to schedule a cpu clear on next kernel exit. This is only done inside interrupts, outside we assume it only touches the current processes' data. In principle network data should be encrypted anyways, but it's better to not leak it. This provides protection for the network softirq. Needs more auditing. Signed-off-by: Andi Kleen --- include/linux/skbuff.h | 2 ++ net/core/skbuff.c | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 95d25b010a25..7cd26e7b15d5 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -40,6 +40,7 @@ #include #include #include +#include /* The interface for checksum offload between the stack and networking drivers * is as follows... @@ -2093,6 +2094,7 @@ static inline void *__skb_put(struct sk_buff *skb, unsigned int len) SKB_LINEAR_ASSERT(skb); skb->tail += len; skb->len += len; + lazy_clear_cpu_interrupt(); return tmp; } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 26d848484912..bd30d79b894d 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -63,6 +63,7 @@ #include #include #include +#include #include #include @@ -1189,6 +1190,9 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) if (!num_frags) goto release; + /* Likely to copy user data */ + lazy_clear_cpu_interrupt(); + new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT; for (i = 0; i < new_frags; i++) { page = alloc_page(gfp_mask); @@ -1353,6 +1357,9 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) if (!n) return NULL; + /* Copies user data */ + lazy_clear_cpu_interrupt(); + /* Set the data pointer */ skb_reserve(n, headerlen); /* Set the tail pointer and length */ @@ -1460,6 +1467,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, size = SKB_DATA_ALIGN(size); + lazy_clear_cpu_interrupt(); if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), @@ -1524,6 +1532,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, if (!skb->sk || skb->destructor == sock_edemux) skb->truesize += size - osize; + return 0; nofrags: @@ -1588,6 +1597,9 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, if (!n) return NULL; + /* May copy user data */ + lazy_clear_cpu_interrupt(); + skb_reserve(n, newheadroom); /* Set the tail pointer and length */ @@ -1676,6 +1688,8 @@ EXPORT_SYMBOL(__skb_pad); void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) { + /* Likely to be followed by a user data copy */ + lazy_clear_cpu_interrupt(); if (tail != skb) { skb->data_len += len; skb->len += len; @@ -1701,6 +1715,8 @@ void *skb_put(struct sk_buff *skb, unsigned int len) skb->len += len; if (unlikely(skb->tail > skb->end)) skb_over_panic(skb, len, __builtin_return_address(0)); + /* Likely to be followed by a user data copy */ + lazy_clear_cpu_interrupt(); return tmp; } EXPORT_SYMBOL(skb_put); @@ -1720,6 +1736,7 @@ void *skb_push(struct sk_buff *skb, unsigned int len) skb->len += len; if (unlikely(skb->data < skb->head)) skb_under_panic(skb, len, __builtin_return_address(0)); + /* No clear cpu, assume this is only header data */ return skb->data; } EXPORT_SYMBOL(skb_push); @@ -2026,6 +2043,9 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) struct sk_buff *frag_iter; int i, copy; + /* Copies user data */ + lazy_clear_cpu_interrupt(); + if (offset > (int)skb->len - len) goto fault; @@ -2113,6 +2133,8 @@ static struct page *linear_to_page(struct page *page, unsigned int *len, { struct page_frag *pfrag = sk_page_frag(sk); + lazy_clear_cpu_interrupt(); + if (!sk_page_frag_refill(sk, pfrag)) return NULL; @@ -2387,6 +2409,9 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) struct sk_buff *frag_iter; int i, copy; + /* Copies user data */ + lazy_clear_cpu_interrupt(); + if (offset > (int)skb->len - len) goto fault; @@ -2467,6 +2492,9 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, struct sk_buff *frag_iter; int pos = 0; + /* Reads packet data */ + lazy_clear_cpu_interrupt(); + /* Checksum header. */ if (copy > 0) { if (copy > len) @@ -2559,6 +2587,9 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, struct sk_buff *frag_iter; int pos = 0; + /* Reads packet data */ + lazy_clear_cpu_interrupt(); + /* Copy header. */ if (copy > 0) { if (copy > len) @@ -3445,6 +3476,7 @@ void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) BUG_ON(len > skb->len); __skb_pull(skb, len); skb_postpull_rcsum(skb, data, len); + lazy_clear_cpu_interrupt(); return skb->data; } EXPORT_SYMBOL_GPL(skb_pull_rcsum); -- 2.17.2