From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
To: linux-kernel@vger.kernel.org
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
stable@vger.kernel.org, Matt Mullins <mmullins@fb.com>,
Andrii Nakryiko <andriin@fb.com>,
Daniel Borkmann <daniel@iogearbox.net>,
Alexei Starovoitov <ast@kernel.org>
Subject: [PATCH 5.1 46/55] bpf: fix nested bpf tracepoints with per-cpu data
Date: Tue, 2 Jul 2019 10:01:54 +0200 [thread overview]
Message-ID: <20190702080126.479691032@linuxfoundation.org> (raw)
In-Reply-To: <20190702080124.103022729@linuxfoundation.org>
From: Matt Mullins <mmullins@fb.com>
commit 9594dc3c7e71b9f52bee1d7852eb3d4e3aea9e99 upstream.
BPF_PROG_TYPE_RAW_TRACEPOINTs can be executed nested on the same CPU, as
they do not increment bpf_prog_active while executing.
This enables three levels of nesting, to support
- a kprobe or raw tp or perf event,
- another one of the above that irq context happens to call, and
- another one in nmi context
(at most one of which may be a kprobe or perf event).
Fixes: 20b9d7ac4852 ("bpf: avoid excessive stack usage for perf_sample_data")
Signed-off-by: Matt Mullins <mmullins@fb.com>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
kernel/trace/bpf_trace.c | 100 +++++++++++++++++++++++++++++++++++++++--------
1 file changed, 84 insertions(+), 16 deletions(-)
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -402,8 +402,6 @@ static const struct bpf_func_proto bpf_p
.arg4_type = ARG_CONST_SIZE,
};
-static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd);
-
static __always_inline u64
__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
u64 flags, struct perf_sample_data *sd)
@@ -434,24 +432,50 @@ __bpf_perf_event_output(struct pt_regs *
return perf_event_output(event, sd, regs);
}
+/*
+ * Support executing tracepoints in normal, irq, and nmi context that each call
+ * bpf_perf_event_output
+ */
+struct bpf_trace_sample_data {
+ struct perf_sample_data sds[3];
+};
+
+static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
+static DEFINE_PER_CPU(int, bpf_trace_nest_level);
BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
u64, flags, void *, data, u64, size)
{
- struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd);
+ struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
+ int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
struct perf_raw_record raw = {
.frag = {
.size = size,
.data = data,
},
};
+ struct perf_sample_data *sd;
+ int err;
- if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
- return -EINVAL;
+ if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ sd = &sds->sds[nest_level - 1];
+
+ if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
+ err = -EINVAL;
+ goto out;
+ }
perf_sample_data_init(sd, 0, 0);
sd->raw = &raw;
- return __bpf_perf_event_output(regs, map, flags, sd);
+ err = __bpf_perf_event_output(regs, map, flags, sd);
+
+out:
+ this_cpu_dec(bpf_trace_nest_level);
+ return err;
}
static const struct bpf_func_proto bpf_perf_event_output_proto = {
@@ -808,16 +832,48 @@ pe_prog_func_proto(enum bpf_func_id func
/*
* bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
* to avoid potential recursive reuse issue when/if tracepoints are added
- * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack
+ * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
+ *
+ * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
+ * in normal, irq, and nmi context.
*/
-static DEFINE_PER_CPU(struct pt_regs, bpf_raw_tp_regs);
+struct bpf_raw_tp_regs {
+ struct pt_regs regs[3];
+};
+static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
+static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
+static struct pt_regs *get_bpf_raw_tp_regs(void)
+{
+ struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
+ int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
+
+ if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
+ this_cpu_dec(bpf_raw_tp_nest_level);
+ return ERR_PTR(-EBUSY);
+ }
+
+ return &tp_regs->regs[nest_level - 1];
+}
+
+static void put_bpf_raw_tp_regs(void)
+{
+ this_cpu_dec(bpf_raw_tp_nest_level);
+}
+
BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
struct bpf_map *, map, u64, flags, void *, data, u64, size)
{
- struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
+ struct pt_regs *regs = get_bpf_raw_tp_regs();
+ int ret;
+
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
perf_fetch_caller_regs(regs);
- return ____bpf_perf_event_output(regs, map, flags, data, size);
+ ret = ____bpf_perf_event_output(regs, map, flags, data, size);
+
+ put_bpf_raw_tp_regs();
+ return ret;
}
static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
@@ -834,12 +890,18 @@ static const struct bpf_func_proto bpf_p
BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
struct bpf_map *, map, u64, flags)
{
- struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
+ struct pt_regs *regs = get_bpf_raw_tp_regs();
+ int ret;
+
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
perf_fetch_caller_regs(regs);
/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
- return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
- flags, 0, 0);
+ ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
+ flags, 0, 0);
+ put_bpf_raw_tp_regs();
+ return ret;
}
static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
@@ -854,11 +916,17 @@ static const struct bpf_func_proto bpf_g
BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
void *, buf, u32, size, u64, flags)
{
- struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
+ struct pt_regs *regs = get_bpf_raw_tp_regs();
+ int ret;
+
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
perf_fetch_caller_regs(regs);
- return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
- (unsigned long) size, flags, 0);
+ ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
+ (unsigned long) size, flags, 0);
+ put_bpf_raw_tp_regs();
+ return ret;
}
static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
next prev parent reply other threads:[~2019-07-02 8:16 UTC|newest]
Thread overview: 69+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-07-02 8:01 [PATCH 5.1 00/55] 5.1.16-stable review Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 01/55] arm64: Dont unconditionally add -Wno-psabi to KBUILD_CFLAGS Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 02/55] Revert "x86/uaccess, ftrace: Fix ftrace_likely_update() vs. SMAP" Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 03/55] qmi_wwan: Fix out-of-bounds read Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 04/55] fs/proc/array.c: allow reporting eip/esp for all coredumping threads Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 05/55] mm/mempolicy.c: fix an incorrect rebind node in mpol_rebind_nodemask Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 06/55] fs/binfmt_flat.c: make load_flat_shared_library() work Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 07/55] clk: tegra210: Fix default rates for HDA clocks Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 08/55] clk: socfpga: stratix10: fix divider entry for the emac clocks Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 09/55] drm/i915: Force 2*96 MHz cdclk on glk/cnl when audio power is enabled Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 10/55] drm/i915: Save the old CDCLK atomic state Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 11/55] drm/i915: Remove redundant store of logical CDCLK state Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 12/55] drm/i915: Skip modeset for cdclk changes if possible Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 13/55] mm: soft-offline: return -EBUSY if set_hwpoison_free_buddy_page() fails Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 14/55] mm: hugetlb: soft-offline: dissolve_free_huge_page() return zero on !PageHuge Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 15/55] mm/page_idle.c: fix oops because end_pfn is larger than max_pfn Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 16/55] mm, swap: fix THP swap out Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 17/55] dm init: fix incorrect uses of kstrndup() Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 18/55] dm log writes: make sure super sector log updates are written in order Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 19/55] io_uring: ensure req->file is cleared on allocation Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 20/55] scsi: vmw_pscsi: Fix use-after-free in pvscsi_queue_lck() Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 21/55] x86/speculation: Allow guests to use SSBD even if host does not Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 22/55] x86/microcode: Fix the microcode load on CPU hotplug for real Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 23/55] x86/resctrl: Prevent possible overrun during bitmap operations Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 24/55] mm: fix page cache convergence regression Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 25/55] efi/memreserve: deal with memreserve entries in unmapped memory Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 26/55] NFS/flexfiles: Use the correct TCP timeout for flexfiles I/O Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 27/55] cpu/speculation: Warn on unsupported mitigations= parameter Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 28/55] SUNRPC: Fix up calculation of client message length Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 29/55] irqchip/mips-gic: Use the correct local interrupt map registers Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 30/55] af_packet: Block execution of tasks waiting for transmit to complete in AF_PACKET Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 31/55] bonding: Always enable vlan tx offload Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 32/55] ipv4: Use return value of inet_iif() for __raw_v4_lookup in the while loop Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 33/55] net/packet: fix memory leak in packet_set_ring() Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 34/55] net: remove duplicate fetch in sock_getsockopt Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 35/55] net: stmmac: fixed new system time seconds value calculation Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 36/55] net: stmmac: set IC bit when transmitting frames with HW timestamp Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 37/55] net/tls: fix page double free on TX cleanup Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 38/55] sctp: change to hold sk after auth shkey is created successfully Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 39/55] team: Always enable vlan tx offload Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 40/55] tipc: change to use register_pernet_device Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 41/55] tipc: check msg->req data len in tipc_nl_compat_bearer_disable Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 42/55] tun: wake up waitqueues after IFF_UP is set Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 43/55] net: aquantia: fix vlans not working over bridged network Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 44/55] bpf: simplify definition of BPF_FIB_LOOKUP related flags Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 45/55] bpf: lpm_trie: check left child of last leftmost node for NULL Greg Kroah-Hartman
2019-07-02 8:01 ` Greg Kroah-Hartman [this message]
2019-07-02 8:01 ` [PATCH 5.1 47/55] bpf: fix unconnected udp hooks Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 48/55] bpf: udp: Avoid calling reuseports bpf_prog from udp_gro Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 49/55] bpf: udp: ipv6: Avoid running reuseports bpf_prog from __udp6_lib_err Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 50/55] arm64: futex: Avoid copying out uninitialised stack in failed cmpxchg() Greg Kroah-Hartman
2019-07-02 8:01 ` [PATCH 5.1 51/55] bpf, arm64: use more scalable stadd over ldxr / stxr loop in xadd Greg Kroah-Hartman
2019-07-03 2:02 ` Sasha Levin
2019-07-03 7:24 ` Greg Kroah-Hartman
2019-07-02 8:02 ` [PATCH 5.1 52/55] futex: Update comments and docs about return values of arch futex code Greg Kroah-Hartman
2019-07-02 8:02 ` [PATCH 5.1 53/55] RDMA: Directly cast the sockaddr union to sockaddr Greg Kroah-Hartman
2019-07-02 8:02 ` [PATCH 5.1 54/55] fanotify: update connector fsid cache on add mark Greg Kroah-Hartman
2019-07-02 8:02 ` [PATCH 5.1 55/55] tipc: pass tunnel dev as NULL to udp_tunnel(6)_xmit_skb Greg Kroah-Hartman
2019-07-02 14:32 ` [PATCH 5.1 00/55] 5.1.16-stable review kernelci.org bot
2019-07-02 17:39 ` Naresh Kamboju
2019-07-03 9:11 ` Greg Kroah-Hartman
2019-07-02 18:06 ` Jiunn Chang
2019-07-02 21:09 ` Kelsey Skunberg
2019-07-02 22:56 ` shuah
2019-07-03 9:12 ` Greg Kroah-Hartman
2019-07-03 6:26 ` Shreeya Patel
2019-07-03 10:21 ` Jon Hunter
2019-07-03 10:49 ` Greg Kroah-Hartman
2019-07-04 5:27 ` Bharath Vedartham
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190702080126.479691032@linuxfoundation.org \
--to=gregkh@linuxfoundation.org \
--cc=andriin@fb.com \
--cc=ast@kernel.org \
--cc=daniel@iogearbox.net \
--cc=linux-kernel@vger.kernel.org \
--cc=mmullins@fb.com \
--cc=stable@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).