* [PATCH net-next 2/5] bpf: add various test cases to verifier selftests
2017-04-26 23:39 [PATCH net-next 0/5] Misc BPF updates Daniel Borkmann
2017-04-26 23:39 ` [PATCH net-next 1/5] bpf, x86_64/arm64: remove old ldimm64 artifacts from jits Daniel Borkmann
@ 2017-04-26 23:39 ` Daniel Borkmann
2017-04-26 23:39 ` [PATCH net-next 3/5] bpf: bpf_lock on kallsysms doesn't need to be irqsave Daniel Borkmann
` (3 subsequent siblings)
5 siblings, 0 replies; 9+ messages in thread
From: Daniel Borkmann @ 2017-04-26 23:39 UTC (permalink / raw)
To: davem; +Cc: ast, netdev, Daniel Borkmann
Add several test cases around ldimm64, fp arithmetic and direct
packet access.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
---
tools/testing/selftests/bpf/test_verifier.c | 137 ++++++++++++++++++++++++++++
1 file changed, 137 insertions(+)
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 95a8d5f..d3395c1 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -191,6 +191,86 @@ struct test_val {
.result = REJECT,
},
{
+ "test6 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
+ BPF_RAW_INSN(0, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "test7 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
+ BPF_RAW_INSN(0, 0, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "test8 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
+ BPF_RAW_INSN(0, 0, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "uses reserved fields",
+ .result = REJECT,
+ },
+ {
+ "test9 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
+ BPF_RAW_INSN(0, 0, 0, 1, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+ },
+ {
+ "test10 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
+ BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+ },
+ {
+ "test11 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
+ BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+ },
+ {
+ "test12 ld_imm64",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
+ BPF_RAW_INSN(0, 0, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "not pointing to valid bpf_map",
+ .result = REJECT,
+ },
+ {
+ "test13 ld_imm64",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
+ BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+ },
+ {
"no bpf_exit",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
@@ -331,6 +411,30 @@ struct test_val {
.result = REJECT,
},
{
+ "invalid fp arithmetic",
+ /* If this gets ever changed, make sure JITs can deal with it. */
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 pointer arithmetic",
+ .result_unpriv = REJECT,
+ .errstr = "R1 invalid mem access",
+ .result = REJECT,
+ },
+ {
+ "non-invalid fp arithmetic",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
"invalid argument register",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
@@ -1801,6 +1905,20 @@ struct test_val {
.result = ACCEPT,
},
{
+ "unpriv: adding of fp",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "pointer arithmetic prohibited",
+ .result_unpriv = REJECT,
+ .errstr = "R1 invalid mem access",
+ .result = REJECT,
+ },
+ {
"unpriv: cmp of stack pointer",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
@@ -2472,6 +2590,25 @@ struct test_val {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
+ "direct packet access: test16 (arith on data_end)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
"helper access to packet: test1, valid packet_ptr range",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
--
1.9.3
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH net-next 4/5] bpf: fix _htons occurences in test_progs
2017-04-26 23:39 [PATCH net-next 0/5] Misc BPF updates Daniel Borkmann
` (2 preceding siblings ...)
2017-04-26 23:39 ` [PATCH net-next 3/5] bpf: bpf_lock on kallsysms doesn't need to be irqsave Daniel Borkmann
@ 2017-04-26 23:39 ` Daniel Borkmann
2017-04-26 23:39 ` [PATCH net-next 5/5] bpf: provide a generic macro for percpu values for selftests Daniel Borkmann
2017-04-28 19:49 ` [PATCH net-next 0/5] Misc BPF updates David Miller
5 siblings, 0 replies; 9+ messages in thread
From: Daniel Borkmann @ 2017-04-26 23:39 UTC (permalink / raw)
To: davem; +Cc: ast, netdev, Daniel Borkmann
Dave reported that on sparc test_progs generates buggy swapped
eth->h_proto protocol comparisons:
10: (15) if r3 == 0xdd86 goto pc+9
R0=imm2,min_value=2,max_value=2 R1=pkt(id=0,off=0,r=14) R2=pkt_end R3=inv
R4=pkt(id=0,off=14,r=14) R5=inv56 R10=fp
This is due to the unconditional ...
#define htons __builtin_bswap16
#define ntohs __builtin_bswap16
... in test_progs that causes this. Make use of asm/byteorder.h
and use __constant_htons() where possible and only perform the
bswap16 when on little endian in non-constant case.
Fixes: 6882804c916b ("selftests/bpf: add a test for overlapping packet range checks")
Fixes: 37821613626e ("selftests/bpf: add l4 load balancer test based on sched_cls")
Reported-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
---
tools/testing/selftests/bpf/bpf_util.h | 19 +++++++++++++++++++
tools/testing/selftests/bpf/test_l4lb.c | 11 +++++------
tools/testing/selftests/bpf/test_pkt_access.c | 6 +++---
tools/testing/selftests/bpf/test_progs.c | 10 ++++------
4 files changed, 31 insertions(+), 15 deletions(-)
diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h
index 84a5d18..7de2796 100644
--- a/tools/testing/selftests/bpf/bpf_util.h
+++ b/tools/testing/selftests/bpf/bpf_util.h
@@ -6,6 +6,25 @@
#include <string.h>
#include <errno.h>
+#include <asm/byteorder.h>
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+# define __bpf_ntohs(x) __builtin_bswap16(x)
+# define __bpf_htons(x) __builtin_bswap16(x)
+#elif __BYTE_ORDER == __BIG_ENDIAN
+# define __bpf_ntohs(x) (x)
+# define __bpf_htons(x) (x)
+#else
+# error "Fix your __BYTE_ORDER?!"
+#endif
+
+#define bpf_htons(x) \
+ (__builtin_constant_p(x) ? \
+ __constant_htons(x) : __bpf_htons(x))
+#define bpf_ntohs(x) \
+ (__builtin_constant_p(x) ? \
+ __constant_ntohs(x) : __bpf_ntohs(x))
+
static inline unsigned int bpf_num_possible_cpus(void)
{
static const char *fcpu = "/sys/devices/system/cpu/possible";
diff --git a/tools/testing/selftests/bpf/test_l4lb.c b/tools/testing/selftests/bpf/test_l4lb.c
index 368bfe8..b68b212 100644
--- a/tools/testing/selftests/bpf/test_l4lb.c
+++ b/tools/testing/selftests/bpf/test_l4lb.c
@@ -19,9 +19,8 @@
#include <linux/udp.h>
#include "bpf_helpers.h"
#include "test_iptunnel_common.h"
+#include "bpf_util.h"
-#define htons __builtin_bswap16
-#define ntohs __builtin_bswap16
int _version SEC("version") = 1;
static inline __u32 rol32(__u32 word, unsigned int shift)
@@ -355,7 +354,7 @@ static __always_inline int process_packet(void *data, __u64 off, void *data_end,
iph_len = sizeof(struct ipv6hdr);
protocol = ip6h->nexthdr;
pckt.proto = protocol;
- pkt_bytes = ntohs(ip6h->payload_len);
+ pkt_bytes = bpf_ntohs(ip6h->payload_len);
off += iph_len;
if (protocol == IPPROTO_FRAGMENT) {
return TC_ACT_SHOT;
@@ -377,7 +376,7 @@ static __always_inline int process_packet(void *data, __u64 off, void *data_end,
protocol = iph->protocol;
pckt.proto = protocol;
- pkt_bytes = ntohs(iph->tot_len);
+ pkt_bytes = bpf_ntohs(iph->tot_len);
off += IPV4_HDR_LEN_NO_OPT;
if (iph->frag_off & PCKT_FRAGMENTED)
@@ -464,9 +463,9 @@ int balancer_ingress(struct __sk_buff *ctx)
if (data + nh_off > data_end)
return TC_ACT_SHOT;
eth_proto = eth->eth_proto;
- if (eth_proto == htons(ETH_P_IP))
+ if (eth_proto == bpf_htons(ETH_P_IP))
return process_packet(data, nh_off, data_end, false, ctx);
- else if (eth_proto == htons(ETH_P_IPV6))
+ else if (eth_proto == bpf_htons(ETH_P_IPV6))
return process_packet(data, nh_off, data_end, true, ctx);
else
return TC_ACT_SHOT;
diff --git a/tools/testing/selftests/bpf/test_pkt_access.c b/tools/testing/selftests/bpf/test_pkt_access.c
index fd1e083..7113005 100644
--- a/tools/testing/selftests/bpf/test_pkt_access.c
+++ b/tools/testing/selftests/bpf/test_pkt_access.c
@@ -14,8 +14,8 @@
#include <linux/tcp.h>
#include <linux/pkt_cls.h>
#include "bpf_helpers.h"
+#include "bpf_util.h"
-#define _htons __builtin_bswap16
#define barrier() __asm__ __volatile__("": : :"memory")
int _version SEC("version") = 1;
@@ -32,7 +32,7 @@ int process(struct __sk_buff *skb)
if (eth + 1 > data_end)
return TC_ACT_SHOT;
- if (eth->h_proto == _htons(ETH_P_IP)) {
+ if (eth->h_proto == bpf_htons(ETH_P_IP)) {
struct iphdr *iph = (struct iphdr *)(eth + 1);
if (iph + 1 > data_end)
@@ -40,7 +40,7 @@ int process(struct __sk_buff *skb)
ihl_len = iph->ihl * 4;
proto = iph->protocol;
tcp = (struct tcphdr *)((void *)(iph) + ihl_len);
- } else if (eth->h_proto == _htons(ETH_P_IPV6)) {
+ } else if (eth->h_proto == bpf_htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = (struct ipv6hdr *)(eth + 1);
if (ip6h + 1 > data_end)
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 5275d4a..7c2d899 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -30,8 +30,6 @@
#include "test_iptunnel_common.h"
#include "bpf_util.h"
-#define _htons __builtin_bswap16
-
static int error_cnt, pass_cnt;
#define MAGIC_BYTES 123
@@ -42,10 +40,10 @@
struct iphdr iph;
struct tcphdr tcp;
} __packed pkt_v4 = {
- .eth.h_proto = _htons(ETH_P_IP),
+ .eth.h_proto = bpf_htons(ETH_P_IP),
.iph.ihl = 5,
.iph.protocol = 6,
- .iph.tot_len = _htons(MAGIC_BYTES),
+ .iph.tot_len = bpf_htons(MAGIC_BYTES),
.tcp.urg_ptr = 123,
};
@@ -55,9 +53,9 @@
struct ipv6hdr iph;
struct tcphdr tcp;
} __packed pkt_v6 = {
- .eth.h_proto = _htons(ETH_P_IPV6),
+ .eth.h_proto = bpf_htons(ETH_P_IPV6),
.iph.nexthdr = 6,
- .iph.payload_len = _htons(MAGIC_BYTES),
+ .iph.payload_len = bpf_htons(MAGIC_BYTES),
.tcp.urg_ptr = 123,
};
--
1.9.3
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH net-next 5/5] bpf: provide a generic macro for percpu values for selftests
2017-04-26 23:39 [PATCH net-next 0/5] Misc BPF updates Daniel Borkmann
` (3 preceding siblings ...)
2017-04-26 23:39 ` [PATCH net-next 4/5] bpf: fix _htons occurences in test_progs Daniel Borkmann
@ 2017-04-26 23:39 ` Daniel Borkmann
2017-04-28 19:49 ` [PATCH net-next 0/5] Misc BPF updates David Miller
5 siblings, 0 replies; 9+ messages in thread
From: Daniel Borkmann @ 2017-04-26 23:39 UTC (permalink / raw)
To: davem; +Cc: ast, netdev, Daniel Borkmann
To overcome bugs as described and fixed in 89087c456fb5 ("bpf: Fix
values type used in test_maps"), provide a generic BPF_DECLARE_PERCPU()
and bpf_percpu() accessor macro for all percpu map values used in
tests.
Declaring variables works as follows (also works for structs):
BPF_DECLARE_PERCPU(uint32_t, my_value);
They can then be accessed normally as uint32_t type through:
bpf_percpu(my_value, <cpu_nr>)
For example:
bpf_percpu(my_value, 0)++;
Implicitly, we make sure that the passed type is allocated and aligned
by gcc at least on a 8-byte boundary, so that it works together with
the map lookup/update syscall for percpu maps. We use it as a usage
example in test_maps, so that others are free to adapt this into their
code when necessary.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
---
tools/testing/selftests/bpf/bpf_util.h | 7 +++++++
tools/testing/selftests/bpf/test_maps.c | 37 ++++++++++++++++++---------------
2 files changed, 27 insertions(+), 17 deletions(-)
diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h
index 7de2796..369e7d7 100644
--- a/tools/testing/selftests/bpf/bpf_util.h
+++ b/tools/testing/selftests/bpf/bpf_util.h
@@ -54,4 +54,11 @@ static inline unsigned int bpf_num_possible_cpus(void)
return possible_cpus;
}
+#define __bpf_percpu_val_align __attribute__((__aligned__(8)))
+
+#define BPF_DECLARE_PERCPU(type, name) \
+ struct { type v; /* padding */ } __bpf_percpu_val_align \
+ name[bpf_num_possible_cpus()]
+#define bpf_percpu(name, cpu) name[(cpu)].v
+
#endif /* __BPF_UTIL__ */
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index a977c4f..9331452 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -137,20 +137,20 @@ static void test_hashmap_sizes(int task, void *data)
static void test_hashmap_percpu(int task, void *data)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
- long long value[nr_cpus];
+ BPF_DECLARE_PERCPU(long, value);
long long key, next_key, first_key;
int expected_key_mask = 0;
int fd, i;
fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_HASH, sizeof(key),
- sizeof(value[0]), 2, map_flags);
+ sizeof(bpf_percpu(value, 0)), 2, map_flags);
if (fd < 0) {
printf("Failed to create hashmap '%s'!\n", strerror(errno));
exit(1);
}
for (i = 0; i < nr_cpus; i++)
- value[i] = i + 100;
+ bpf_percpu(value, i) = i + 100;
key = 1;
/* Insert key=1 element. */
@@ -170,8 +170,9 @@ static void test_hashmap_percpu(int task, void *data)
/* Check that key=1 can be found. Value could be 0 if the lookup
* was run from a different CPU.
*/
- value[0] = 1;
- assert(bpf_map_lookup_elem(fd, &key, value) == 0 && value[0] == 100);
+ bpf_percpu(value, 0) = 1;
+ assert(bpf_map_lookup_elem(fd, &key, value) == 0 &&
+ bpf_percpu(value, 0) == 100);
key = 2;
/* Check that key=2 is not found. */
@@ -211,7 +212,7 @@ static void test_hashmap_percpu(int task, void *data)
assert(bpf_map_lookup_elem(fd, &next_key, value) == 0);
for (i = 0; i < nr_cpus; i++)
- assert(value[i] == i + 100);
+ assert(bpf_percpu(value, i) == i + 100);
key = next_key;
}
@@ -296,34 +297,36 @@ static void test_arraymap(int task, void *data)
static void test_arraymap_percpu(int task, void *data)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
+ BPF_DECLARE_PERCPU(long, values);
int key, next_key, fd, i;
- long long values[nr_cpus];
fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
- sizeof(values[0]), 2, 0);
+ sizeof(bpf_percpu(values, 0)), 2, 0);
if (fd < 0) {
printf("Failed to create arraymap '%s'!\n", strerror(errno));
exit(1);
}
for (i = 0; i < nr_cpus; i++)
- values[i] = i + 100;
+ bpf_percpu(values, i) = i + 100;
key = 1;
/* Insert key=1 element. */
assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0);
- values[0] = 0;
+ bpf_percpu(values, 0) = 0;
assert(bpf_map_update_elem(fd, &key, values, BPF_NOEXIST) == -1 &&
errno == EEXIST);
/* Check that key=1 can be found. */
- assert(bpf_map_lookup_elem(fd, &key, values) == 0 && values[0] == 100);
+ assert(bpf_map_lookup_elem(fd, &key, values) == 0 &&
+ bpf_percpu(values, 0) == 100);
key = 0;
/* Check that key=0 is also found and zero initialized. */
assert(bpf_map_lookup_elem(fd, &key, values) == 0 &&
- values[0] == 0 && values[nr_cpus - 1] == 0);
+ bpf_percpu(values, 0) == 0 &&
+ bpf_percpu(values, nr_cpus - 1) == 0);
/* Check that key=2 cannot be inserted due to max_entries limit. */
key = 2;
@@ -353,15 +356,15 @@ static void test_arraymap_percpu(int task, void *data)
static void test_arraymap_percpu_many_keys(void)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
+ BPF_DECLARE_PERCPU(long, values);
/* nr_keys is not too large otherwise the test stresses percpu
* allocator more than anything else
*/
unsigned int nr_keys = 2000;
- long long values[nr_cpus];
int key, fd, i;
fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
- sizeof(values[0]), nr_keys, 0);
+ sizeof(bpf_percpu(values, 0)), nr_keys, 0);
if (fd < 0) {
printf("Failed to create per-cpu arraymap '%s'!\n",
strerror(errno));
@@ -369,19 +372,19 @@ static void test_arraymap_percpu_many_keys(void)
}
for (i = 0; i < nr_cpus; i++)
- values[i] = i + 10;
+ bpf_percpu(values, i) = i + 10;
for (key = 0; key < nr_keys; key++)
assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0);
for (key = 0; key < nr_keys; key++) {
for (i = 0; i < nr_cpus; i++)
- values[i] = 0;
+ bpf_percpu(values, i) = 0;
assert(bpf_map_lookup_elem(fd, &key, values) == 0);
for (i = 0; i < nr_cpus; i++)
- assert(values[i] == i + 10);
+ assert(bpf_percpu(values, i) == i + 10);
}
close(fd);
--
1.9.3
^ permalink raw reply related [flat|nested] 9+ messages in thread