* [PATCH net-next 0/2] net_random_N, reciprocal_divide helper updates
@ 2013-09-03 10:26 Daniel Borkmann
2013-09-03 10:26 ` [PATCH net-next 1/2] net: introduce generic net_random_N helper Daniel Borkmann
2013-09-03 10:26 ` [PATCH net-next 2/2] net: use reciprocal_divide instead of reimplementing it Daniel Borkmann
0 siblings, 2 replies; 11+ messages in thread
From: Daniel Borkmann @ 2013-09-03 10:26 UTC (permalink / raw)
To: davem; +Cc: netdev
Introduce a net_random_N() helper instead of reimplementing it over and
over, plus use reciprocal_divide() helper instead of reimplementing it
as well.
Daniel Borkmann (2):
net: introduce generic net_random_N helper
net: use reciprocal_divide instead of reimplementing it
drivers/net/team/team_mode_random.c | 7 +------
include/linux/net.h | 7 +++++++
include/net/red.h | 2 +-
net/802/garp.c | 2 +-
net/802/mrp.c | 2 +-
net/core/dev.c | 2 +-
net/core/flow_dissector.c | 5 ++---
net/ipv4/inet_hashtables.c | 2 +-
net/ipv4/netfilter/ipt_CLUSTERIP.c | 2 +-
net/ipv4/udp.c | 6 +++---
net/ipv6/inet6_hashtables.c | 2 +-
net/ipv6/udp.c | 4 ++--
net/netfilter/nf_conntrack_core.c | 2 +-
net/netfilter/nf_conntrack_expect.c | 2 +-
net/netfilter/nf_nat_core.c | 4 ++--
net/netfilter/xt_HMARK.c | 2 +-
net/netfilter/xt_NFQUEUE.c | 4 ++--
net/netfilter/xt_cluster.c | 2 +-
net/netfilter/xt_hashlimit.c | 2 +-
net/packet/af_packet.c | 2 +-
net/sched/sch_choke.c | 8 +-------
net/sched/sch_fq_codel.c | 2 +-
22 files changed, 34 insertions(+), 39 deletions(-)
--
1.7.11.7
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH net-next 1/2] net: introduce generic net_random_N helper
2013-09-03 10:26 [PATCH net-next 0/2] net_random_N, reciprocal_divide helper updates Daniel Borkmann
@ 2013-09-03 10:26 ` Daniel Borkmann
2013-09-03 11:00 ` Joe Perches
2013-09-03 13:59 ` Eric Dumazet
2013-09-03 10:26 ` [PATCH net-next 2/2] net: use reciprocal_divide instead of reimplementing it Daniel Borkmann
1 sibling, 2 replies; 11+ messages in thread
From: Daniel Borkmann @ 2013-09-03 10:26 UTC (permalink / raw)
To: davem; +Cc: netdev
We have implemented the same function over and over, so introduce a
generic helper net_random_N() that unifies these implementations.
It internally used net_random() which eventually resolves to
prandom_u32(). Explicit include of reciprocal_div.h is not necessary.
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
---
drivers/net/team/team_mode_random.c | 7 +------
include/linux/net.h | 7 +++++++
include/net/red.h | 2 +-
net/802/garp.c | 2 +-
net/802/mrp.c | 2 +-
net/packet/af_packet.c | 2 +-
net/sched/sch_choke.c | 8 +-------
7 files changed, 13 insertions(+), 17 deletions(-)
diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c
index 7f032e2..b2294f8 100644
--- a/drivers/net/team/team_mode_random.c
+++ b/drivers/net/team/team_mode_random.c
@@ -16,17 +16,12 @@
#include <linux/reciprocal_div.h>
#include <linux/if_team.h>
-static u32 random_N(unsigned int N)
-{
- return reciprocal_divide(prandom_u32(), N);
-}
-
static bool rnd_transmit(struct team *team, struct sk_buff *skb)
{
struct team_port *port;
int port_index;
- port_index = random_N(team->en_port_count);
+ port_index = net_random_N(team->en_port_count);
port = team_get_port_by_index_rcu(team, port_index);
if (unlikely(!port))
goto drop;
diff --git a/include/linux/net.h b/include/linux/net.h
index 4f27575..86ffce7 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -24,6 +24,7 @@
#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */
#include <linux/kmemcheck.h>
#include <linux/rcupdate.h>
+#include <linux/reciprocal_div.h>
#include <uapi/linux/net.h>
struct poll_table_struct;
@@ -243,6 +244,12 @@ do { \
#define net_random() prandom_u32()
#define net_srandom(seed) prandom_seed((__force u32)(seed))
+/* deliver a random number between 0 and N - 1 */
+static inline u32 net_random_N(u32 N)
+{
+ return reciprocal_divide(net_random(), N);
+}
+
extern int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
struct kvec *vec, size_t num, size_t len);
extern int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
diff --git a/include/net/red.h b/include/net/red.h
index ef46058..b17ed60 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -303,7 +303,7 @@ static inline unsigned long red_calc_qavg(const struct red_parms *p,
static inline u32 red_random(const struct red_parms *p)
{
- return reciprocal_divide(net_random(), p->max_P_reciprocal);
+ return net_random_N(p->max_P_reciprocal);
}
static inline int red_mark_probability(const struct red_parms *p,
diff --git a/net/802/garp.c b/net/802/garp.c
index 5d9630a..c6fe7ca 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -397,7 +397,7 @@ static void garp_join_timer_arm(struct garp_applicant *app)
{
unsigned long delay;
- delay = (u64)msecs_to_jiffies(garp_join_time) * net_random() >> 32;
+ delay = net_random_N(msecs_to_jiffies(garp_join_time));
mod_timer(&app->join_timer, jiffies + delay);
}
diff --git a/net/802/mrp.c b/net/802/mrp.c
index 1eb05d8..7338a7c 100644
--- a/net/802/mrp.c
+++ b/net/802/mrp.c
@@ -578,7 +578,7 @@ static void mrp_join_timer_arm(struct mrp_applicant *app)
{
unsigned long delay;
- delay = (u64)msecs_to_jiffies(mrp_join_time) * net_random() >> 32;
+ delay = net_random_N(msecs_to_jiffies(mrp_join_time));
mod_timer(&app->join_timer, jiffies + delay);
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 2e8286b..7ac669e 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1162,7 +1162,7 @@ static unsigned int fanout_demux_rnd(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int num)
{
- return reciprocal_divide(prandom_u32(), num);
+ return net_random_N(num);
}
static unsigned int fanout_demux_rollover(struct packet_fanout *f,
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index ef53ab8..e19b20be 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -77,12 +77,6 @@ struct choke_sched_data {
struct sk_buff **tab;
};
-/* deliver a random number between 0 and N - 1 */
-static u32 random_N(unsigned int N)
-{
- return reciprocal_divide(prandom_u32(), N);
-}
-
/* number of elements in queue including holes */
static unsigned int choke_len(const struct choke_sched_data *q)
{
@@ -233,7 +227,7 @@ static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
int retrys = 3;
do {
- *pidx = (q->head + random_N(choke_len(q))) & q->tab_mask;
+ *pidx = (q->head + net_random_N(choke_len(q))) & q->tab_mask;
skb = q->tab[*pidx];
if (skb)
return skb;
--
1.7.11.7
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH net-next 2/2] net: use reciprocal_divide instead of reimplementing it
2013-09-03 10:26 [PATCH net-next 0/2] net_random_N, reciprocal_divide helper updates Daniel Borkmann
2013-09-03 10:26 ` [PATCH net-next 1/2] net: introduce generic net_random_N helper Daniel Borkmann
@ 2013-09-03 10:26 ` Daniel Borkmann
2013-09-03 14:02 ` Eric Dumazet
1 sibling, 1 reply; 11+ messages in thread
From: Daniel Borkmann @ 2013-09-03 10:26 UTC (permalink / raw)
To: davem; +Cc: netdev, netfilter-devel
Replace these places with reciprocal_divide() inline function instead of
reimplementing it each time, thus it will become easier to read. We do not
need to explicitly include its header as it's pulled in from linux/net.h
anyway.
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Cc: netfilter-devel@vger.kernel.org
---
net/core/dev.c | 2 +-
net/core/flow_dissector.c | 5 ++---
net/ipv4/inet_hashtables.c | 2 +-
net/ipv4/netfilter/ipt_CLUSTERIP.c | 2 +-
net/ipv4/udp.c | 6 +++---
net/ipv6/inet6_hashtables.c | 2 +-
net/ipv6/udp.c | 4 ++--
net/netfilter/nf_conntrack_core.c | 2 +-
net/netfilter/nf_conntrack_expect.c | 2 +-
net/netfilter/nf_nat_core.c | 4 ++--
net/netfilter/xt_HMARK.c | 2 +-
net/netfilter/xt_NFQUEUE.c | 4 ++--
net/netfilter/xt_cluster.c | 2 +-
net/netfilter/xt_hashlimit.c | 2 +-
net/sched/sch_fq_codel.c | 2 +-
15 files changed, 21 insertions(+), 22 deletions(-)
diff --git a/net/core/dev.c b/net/core/dev.c
index 6fbb0c9..bc7c839 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3040,7 +3040,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
}
if (map) {
- tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
+ tcpu = map->cpus[reciprocal_divide(skb->rxhash, map->len)];
if (cpu_online(tcpu)) {
cpu = tcpu;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 159737c..59521e0 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -233,7 +233,7 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
hash = (__force u16) skb->protocol;
hash = jhash_1word(hash, hashrnd);
- return (u16) (((u64) hash * qcount) >> 32) + qoffset;
+ return (u16) reciprocal_divide(hash, qcount) + qoffset;
}
EXPORT_SYMBOL(__skb_tx_hash);
@@ -324,8 +324,7 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
hash = (__force u16) skb->protocol ^
skb->rxhash;
hash = jhash_1word(hash, hashrnd);
- queue_index = map->queues[
- ((u64)hash * map->len) >> 32];
+ queue_index = map->queues[reciprocal_divide(hash, map->len)];
}
if (unlikely(queue_index >= dev->real_num_tx_queues))
queue_index = -1;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 7bd8983..731dcbf 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -204,7 +204,7 @@ begin:
}
} else if (score == hiscore && reuseport) {
matches++;
- if (((u64)phash * matches) >> 32 == 0)
+ if (reciprocal_divide(phash, matches) == 0)
result = sk;
phash = next_pseudo_random32(phash);
}
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 0b732ef..0fede8b 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -273,7 +273,7 @@ clusterip_hashfn(const struct sk_buff *skb,
}
/* node numbers are 1..n, not 0..n */
- return (((u64)hashval * config->num_total_nodes) >> 32) + 1;
+ return reciprocal_divide(hashval, config->num_total_nodes) + 1;
}
static inline int
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 74d2c95..5db0725 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -223,7 +223,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
remaining = (high - low) + 1;
rand = net_random();
- first = (((u64)rand * remaining) >> 32) + low;
+ first = reciprocal_divide(rand, remaining) + low;
/*
* force rand to be an odd multiple of UDP_HTABLE_SIZE
*/
@@ -435,7 +435,7 @@ begin:
}
} else if (score == badness && reuseport) {
matches++;
- if (((u64)hash * matches) >> 32 == 0)
+ if (reciprocal_divide(hash, matches) == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
@@ -516,7 +516,7 @@ begin:
}
} else if (score == badness && reuseport) {
matches++;
- if (((u64)hash * matches) >> 32 == 0)
+ if (reciprocal_divide(hash, matches) == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 32b4a16..d9d8908 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -187,7 +187,7 @@ begin:
}
} else if (score == hiscore && reuseport) {
matches++;
- if (((u64)phash * matches) >> 32 == 0)
+ if (reciprocal_divide(phash, matches) == 0)
result = sk;
phash = next_pseudo_random32(phash);
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index f405815..610013a 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -226,7 +226,7 @@ begin:
goto exact_match;
} else if (score == badness && reuseport) {
matches++;
- if (((u64)hash * matches) >> 32 == 0)
+ if (reciprocal_divide(hash, matches) == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
@@ -306,7 +306,7 @@ begin:
}
} else if (score == badness && reuseport) {
matches++;
- if (((u64)hash * matches) >> 32 == 0)
+ if (reciprocal_divide(hash, matches) == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 5d892fe..6378441 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -97,7 +97,7 @@ static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
static u32 __hash_bucket(u32 hash, unsigned int size)
{
- return ((u64)hash * size) >> 32;
+ return reciprocal_divide(hash, size);
}
static u32 hash_bucket(u32 hash, const struct net *net)
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 4fd1ca9..c0865e6 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -83,7 +83,7 @@ static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple
hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
(((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
(__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
- return ((u64)hash * nf_ct_expect_hsize) >> 32;
+ return reciprocal_divide(hash, nf_ct_expect_hsize);
}
struct nf_conntrack_expect *
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 6f0f4f7..ce9083e 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -126,7 +126,7 @@ hash_by_src(const struct net *net, u16 zone,
/* Original src, to ensure we map it consistently if poss. */
hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
tuple->dst.protonum ^ zone ^ nf_conntrack_hash_rnd);
- return ((u64)hash * net->ct.nat_htable_size) >> 32;
+ return reciprocal_divide(hash, net->ct.nat_htable_size);
}
/* Is this tuple already taken? (not by us) */
@@ -274,7 +274,7 @@ find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
}
var_ipp->all[i] = (__force __u32)
- htonl(minip + (((u64)j * dist) >> 32));
+ htonl(minip + reciprocal_divide(j, dist));
if (var_ipp->all[i] != range->max_addr.all[i])
full_range = true;
diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c
index 73b73f6..3dd05c4 100644
--- a/net/netfilter/xt_HMARK.c
+++ b/net/netfilter/xt_HMARK.c
@@ -126,7 +126,7 @@ hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info)
hash = jhash_3words(src, dst, t->uports.v32, info->hashrnd);
hash = hash ^ (t->proto & info->proto_mask);
- return (((u64)hash * info->hmodulus) >> 32) + info->hoffset;
+ return reciprocal_divide(hash, info->hmodulus) + info->hoffset;
}
static void
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 1e2fae3..88443c1 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -83,10 +83,10 @@ nfqueue_hash(const struct sk_buff *skb, const struct xt_action_param *par)
u32 queue = info->queuenum;
if (par->family == NFPROTO_IPV4)
- queue += ((u64) hash_v4(skb) * info->queues_total) >> 32;
+ queue += reciprocal_divide(hash_v4(skb), info->queues_total);
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
else if (par->family == NFPROTO_IPV6)
- queue += ((u64) hash_v6(skb) * info->queues_total) >> 32;
+ queue += reciprocal_divide(hash_v6(skb), info->queues_total);
#endif
return queue;
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
index f4af1bf..845fc16 100644
--- a/net/netfilter/xt_cluster.c
+++ b/net/netfilter/xt_cluster.c
@@ -55,7 +55,7 @@ xt_cluster_hash(const struct nf_conn *ct,
WARN_ON(1);
break;
}
- return (((u64)hash * info->total_nodes) >> 32);
+ return reciprocal_divide(hash, info->total_nodes);
}
static inline bool
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 9ff035c..9af9021 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -135,7 +135,7 @@ hash_dst(const struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst)
* give results between [0 and cfg.size-1] and same hash distribution,
* but using a multiply, less expensive than a divide
*/
- return ((u64)hash * ht->cfg.size) >> 32;
+ return reciprocal_divide(hash, ht->cfg.size);
}
static struct dsthash_ent *
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 5578628..4869671 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -77,7 +77,7 @@ static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
hash = jhash_3words((__force u32)keys.dst,
(__force u32)keys.src ^ keys.ip_proto,
(__force u32)keys.ports, q->perturbation);
- return ((u64)hash * q->flows_cnt) >> 32;
+ return reciprocal_divide(hash, q->flows_cnt);
}
static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
--
1.7.11.7
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH net-next 1/2] net: introduce generic net_random_N helper
2013-09-03 10:26 ` [PATCH net-next 1/2] net: introduce generic net_random_N helper Daniel Borkmann
@ 2013-09-03 11:00 ` Joe Perches
2013-09-03 11:40 ` Daniel Borkmann
2013-09-03 13:59 ` Eric Dumazet
1 sibling, 1 reply; 11+ messages in thread
From: Joe Perches @ 2013-09-03 11:00 UTC (permalink / raw)
To: Daniel Borkmann; +Cc: davem, netdev, Theodore Ts'o
On Tue, 2013-09-03 at 12:26 +0200, Daniel Borkmann wrote:
> We have implemented the same function over and over, so introduce a
> generic helper net_random_N() that unifies these implementations.
> It internally used net_random() which eventually resolves to
> prandom_u32(). Explicit include of reciprocal_div.h is not necessary.
Perhaps adding a generic helper to random.h like
u32 prandom_u32_between(u32 low, u32 high);
or
u32 prandom_u32_range(u32 bound1, u32 bound2)
would be better.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH net-next 1/2] net: introduce generic net_random_N helper
2013-09-03 11:00 ` Joe Perches
@ 2013-09-03 11:40 ` Daniel Borkmann
2013-09-03 12:15 ` Joe Perches
0 siblings, 1 reply; 11+ messages in thread
From: Daniel Borkmann @ 2013-09-03 11:40 UTC (permalink / raw)
To: Joe Perches; +Cc: davem, netdev, Theodore Ts'o
On 09/03/2013 01:00 PM, Joe Perches wrote:
> On Tue, 2013-09-03 at 12:26 +0200, Daniel Borkmann wrote:
>> We have implemented the same function over and over, so introduce a
>> generic helper net_random_N() that unifies these implementations.
>> It internally used net_random() which eventually resolves to
>> prandom_u32(). Explicit include of reciprocal_div.h is not necessary.
>
> Perhaps adding a generic helper to random.h like
> u32 prandom_u32_between(u32 low, u32 high);
> or
> u32 prandom_u32_range(u32 bound1, u32 bound2)
> would be better.
Sure, this could be done as a follow-up. Once, we've migrated users to
the new API, follow-ups could go ahead to do the rest, and migration
will be easy. Note that the lower bound is 0 here.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH net-next 1/2] net: introduce generic net_random_N helper
2013-09-03 11:40 ` Daniel Borkmann
@ 2013-09-03 12:15 ` Joe Perches
2013-09-03 12:26 ` Daniel Borkmann
0 siblings, 1 reply; 11+ messages in thread
From: Joe Perches @ 2013-09-03 12:15 UTC (permalink / raw)
To: Daniel Borkmann; +Cc: davem, netdev, Theodore Ts'o
On Tue, 2013-09-03 at 13:40 +0200, Daniel Borkmann wrote:
> On 09/03/2013 01:00 PM, Joe Perches wrote:
> > On Tue, 2013-09-03 at 12:26 +0200, Daniel Borkmann wrote:
> >> We have implemented the same function over and over, so introduce a
> >> generic helper net_random_N() that unifies these implementations.
> >> It internally used net_random() which eventually resolves to
> >> prandom_u32(). Explicit include of reciprocal_div.h is not necessary.
> >
> > Perhaps adding a generic helper to random.h like
> > u32 prandom_u32_between(u32 low, u32 high);
> > or
> > u32 prandom_u32_range(u32 bound1, u32 bound2)
> > would be better.
>
> Sure, this could be done as a follow-up. Once, we've migrated users to
> the new API, follow-ups could go ahead to do the rest, and migration
> will be easy. Note that the lower bound is 0 here.
Part of the reason I suggested this it to make the
API more readable/intelligible.
At first glance, I have no idea what net_random_N does.
I think #define net_random() prandom_u32()
should be removed eventually as well.
If gcc doesn't already do this optimization,
perhaps there are also some use of
net_random() % non_const
that could be optimized a bit using this API.
$ git grep -E "(prandom_u32|net_random)\s*\(\s*\)\s*\%" net
net/batman-adv/bat_iv_ogm.c: msecs += prandom_u32() % (2 * BATADV_JITTER);
net/batman-adv/bat_iv_ogm.c: return jiffies + msecs_to_jiffies(prandom_u32() % (BATADV_JITTER / 2));
net/core/neighbour.c: return base ? (net_random() % base) + (base >> 1) : 0;
net/core/neighbour.c: unsigned long sched_next = now + (net_random() % p->proxy_delay);
net/core/pktgen.c: flow = prandom_u32() % pkt_dev->cflows;
net/core/pktgen.c: t = prandom_u32() %
net/core/pktgen.c: mc = prandom_u32() % pkt_dev->src_mac_count;
net/core/pktgen.c: mc = prandom_u32() % pkt_dev->dst_mac_count;
net/core/pktgen.c: pkt_dev->cur_udp_src = prandom_u32() %
net/core/pktgen.c: pkt_dev->cur_udp_dst = prandom_u32() %
net/core/pktgen.c: t = prandom_u32() % (imx - imn) + imn;
net/core/pktgen.c: t = prandom_u32() %
net/core/pktgen.c: t = prandom_u32() %
net/core/stream.c: current_timeo = vm_wait = (net_random() % (HZ / 5)) + 2;
net/ipv4/igmp.c: int tv = net_random() % max_delay;
net/ipv4/igmp.c: int tv = net_random() % in_dev->mr_maxdelay;
net/ipv4/igmp.c: int tv = net_random() % delay;
net/ipv4/inet_connection_sock.c: smallest_rover = rover = net_random() % remaining + low;
net/ipv6/addrconf.c: rand_num = net_random() % (idev->cnf.rtr_solicit_delay ? : 1);
net/ipv6/mcast.c: unsigned long tv = net_random() % idev->mc_maxdelay;
net/ipv6/mcast.c: unsigned long tv = net_random() % delay;
net/ipv6/mcast.c: unsigned long tv = net_random() % delay;
net/ipv6/mcast.c: delay = net_random() % resptime;
net/ipv6/mcast.c: delay = net_random() % unsolicited_report_interval(ma->idev);
net/netfilter/nf_conntrack_core.c: (prandom_u32() % net->ct.sysctl_events_retry_timeout);
net/netfilter/nf_conntrack_core.c: (prandom_u32() % net->ct.sysctl_events_retry_timeout);
net/sched/act_gact.c: if (!gact->tcfg_pval || net_random() % gact->tcfg_pval)
net/sched/sch_netem.c: skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
net/sctp/socket.c: rover = net_random() % remaining + low;
net/sunrpc/xprtsock.c: unsigned short rand = (unsigned short) net_random() % range;
net/xfrm/xfrm_state.c: spi = low + net_random()%(high-low+1);
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH net-next 1/2] net: introduce generic net_random_N helper
2013-09-03 12:15 ` Joe Perches
@ 2013-09-03 12:26 ` Daniel Borkmann
2013-09-03 12:41 ` Joe Perches
0 siblings, 1 reply; 11+ messages in thread
From: Daniel Borkmann @ 2013-09-03 12:26 UTC (permalink / raw)
To: Joe Perches; +Cc: davem, netdev, Theodore Ts'o
On 09/03/2013 02:15 PM, Joe Perches wrote:
> On Tue, 2013-09-03 at 13:40 +0200, Daniel Borkmann wrote:
>> On 09/03/2013 01:00 PM, Joe Perches wrote:
>>> On Tue, 2013-09-03 at 12:26 +0200, Daniel Borkmann wrote:
>>>> We have implemented the same function over and over, so introduce a
>>>> generic helper net_random_N() that unifies these implementations.
>>>> It internally used net_random() which eventually resolves to
>>>> prandom_u32(). Explicit include of reciprocal_div.h is not necessary.
>>>
>>> Perhaps adding a generic helper to random.h like
>>> u32 prandom_u32_between(u32 low, u32 high);
>>> or
>>> u32 prandom_u32_range(u32 bound1, u32 bound2)
>>> would be better.
>>
>> Sure, this could be done as a follow-up. Once, we've migrated users to
>> the new API, follow-ups could go ahead to do the rest, and migration
>> will be easy. Note that the lower bound is 0 here.
>
> Part of the reason I suggested this it to make the
> API more readable/intelligible.
>
> At first glance, I have no idea what net_random_N does.
>
> I think #define net_random() prandom_u32()
> should be removed eventually as well.
Why? Assume there could be different PRNGs in future that have different properties
(e.g. speed vs. period, etc). Then, changing net_random() to something else is way
easier and less error-prone than searching through the whole subtree of net and
replacing every occurrence of prandom_u32().
> If gcc doesn't already do this optimization,
> perhaps there are also some use of
> net_random() % non_const
> that could be optimized a bit using this API.
I agree with you, needs to be evaluated on a case by case basis for future work.
> $ git grep -E "(prandom_u32|net_random)\s*\(\s*\)\s*\%" net
> net/batman-adv/bat_iv_ogm.c: msecs += prandom_u32() % (2 * BATADV_JITTER);
> net/batman-adv/bat_iv_ogm.c: return jiffies + msecs_to_jiffies(prandom_u32() % (BATADV_JITTER / 2));
> net/core/neighbour.c: return base ? (net_random() % base) + (base >> 1) : 0;
> net/core/neighbour.c: unsigned long sched_next = now + (net_random() % p->proxy_delay);
> net/core/pktgen.c: flow = prandom_u32() % pkt_dev->cflows;
> net/core/pktgen.c: t = prandom_u32() %
> net/core/pktgen.c: mc = prandom_u32() % pkt_dev->src_mac_count;
> net/core/pktgen.c: mc = prandom_u32() % pkt_dev->dst_mac_count;
> net/core/pktgen.c: pkt_dev->cur_udp_src = prandom_u32() %
> net/core/pktgen.c: pkt_dev->cur_udp_dst = prandom_u32() %
> net/core/pktgen.c: t = prandom_u32() % (imx - imn) + imn;
> net/core/pktgen.c: t = prandom_u32() %
> net/core/pktgen.c: t = prandom_u32() %
> net/core/stream.c: current_timeo = vm_wait = (net_random() % (HZ / 5)) + 2;
> net/ipv4/igmp.c: int tv = net_random() % max_delay;
> net/ipv4/igmp.c: int tv = net_random() % in_dev->mr_maxdelay;
> net/ipv4/igmp.c: int tv = net_random() % delay;
> net/ipv4/inet_connection_sock.c: smallest_rover = rover = net_random() % remaining + low;
> net/ipv6/addrconf.c: rand_num = net_random() % (idev->cnf.rtr_solicit_delay ? : 1);
> net/ipv6/mcast.c: unsigned long tv = net_random() % idev->mc_maxdelay;
> net/ipv6/mcast.c: unsigned long tv = net_random() % delay;
> net/ipv6/mcast.c: unsigned long tv = net_random() % delay;
> net/ipv6/mcast.c: delay = net_random() % resptime;
> net/ipv6/mcast.c: delay = net_random() % unsolicited_report_interval(ma->idev);
> net/netfilter/nf_conntrack_core.c: (prandom_u32() % net->ct.sysctl_events_retry_timeout);
> net/netfilter/nf_conntrack_core.c: (prandom_u32() % net->ct.sysctl_events_retry_timeout);
> net/sched/act_gact.c: if (!gact->tcfg_pval || net_random() % gact->tcfg_pval)
> net/sched/sch_netem.c: skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
> net/sctp/socket.c: rover = net_random() % remaining + low;
> net/sunrpc/xprtsock.c: unsigned short rand = (unsigned short) net_random() % range;
> net/xfrm/xfrm_state.c: spi = low + net_random()%(high-low+1);
>
>
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH net-next 1/2] net: introduce generic net_random_N helper
2013-09-03 12:26 ` Daniel Borkmann
@ 2013-09-03 12:41 ` Joe Perches
0 siblings, 0 replies; 11+ messages in thread
From: Joe Perches @ 2013-09-03 12:41 UTC (permalink / raw)
To: Daniel Borkmann; +Cc: davem, netdev, Theodore Ts'o
On Tue, 2013-09-03 at 14:26 +0200, Daniel Borkmann wrote:
> On 09/03/2013 02:15 PM, Joe Perches wrote:
> > I think #define net_random() prandom_u32()
> > should be removed eventually as well.
>
> Why? Assume there could be different PRNGs in future that have different properties
> (e.g. speed vs. period, etc). Then, changing net_random() to something else is way
> easier and less error-prone than searching through the whole subtree of net and
> replacing every occurrence of prandom_u32().
Maybe. There are already instances of prandom_u32
in the net tree. I don't find value in the indirection.
Akinobu Mita once posted a s/net_random/prandom_u32/
patch: https://lkml.org/lkml/2012/12/23/140
> > If gcc doesn't already do this optimization,
> > perhaps there are also some use of
> > net_random() % non_const
> > that could be optimized a bit using this API.
>
> I agree with you, needs to be evaluated on a case by case basis for future work.
__builtin_constant_p is useful.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH net-next 1/2] net: introduce generic net_random_N helper
2013-09-03 10:26 ` [PATCH net-next 1/2] net: introduce generic net_random_N helper Daniel Borkmann
2013-09-03 11:00 ` Joe Perches
@ 2013-09-03 13:59 ` Eric Dumazet
1 sibling, 0 replies; 11+ messages in thread
From: Eric Dumazet @ 2013-09-03 13:59 UTC (permalink / raw)
To: Daniel Borkmann; +Cc: davem, netdev
On Tue, 2013-09-03 at 12:26 +0200, Daniel Borkmann wrote:
> We have implemented the same function over and over, so introduce a
> generic helper net_random_N() that unifies these implementations.
> It internally used net_random() which eventually resolves to
> prandom_u32(). Explicit include of reciprocal_div.h is not necessary.
>
> Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
> ---
It was private, so the name was whatever we chose at that time.
If we want to make it generic, why still doing a net_something() ?
Really this patch should be discussed on lkml and netdev and we should
use a generic name.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH net-next 2/2] net: use reciprocal_divide instead of reimplementing it
2013-09-03 10:26 ` [PATCH net-next 2/2] net: use reciprocal_divide instead of reimplementing it Daniel Borkmann
@ 2013-09-03 14:02 ` Eric Dumazet
2013-09-03 14:35 ` Daniel Borkmann
0 siblings, 1 reply; 11+ messages in thread
From: Eric Dumazet @ 2013-09-03 14:02 UTC (permalink / raw)
To: Daniel Borkmann; +Cc: davem, netdev, netfilter-devel
On Tue, 2013-09-03 at 12:26 +0200, Daniel Borkmann wrote:
> Replace these places with reciprocal_divide() inline function instead of
> reimplementing it each time, thus it will become easier to read. We do not
> need to explicitly include its header as it's pulled in from linux/net.h
> anyway.
Sure, (((u64) hash * qcount) >> 32) happens to be
reciprocal_divide(hash, qcount) but the result depends on hash being
well distributed in [0 .. ~0U] space.
So 'reciprocal' and 'divide' do not accurately describe this exact
operation.
I suggest making the thing more descriptive.
(And discussed on lkml btw)
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH net-next 2/2] net: use reciprocal_divide instead of reimplementing it
2013-09-03 14:02 ` Eric Dumazet
@ 2013-09-03 14:35 ` Daniel Borkmann
0 siblings, 0 replies; 11+ messages in thread
From: Daniel Borkmann @ 2013-09-03 14:35 UTC (permalink / raw)
To: Eric Dumazet; +Cc: davem, netdev, netfilter-devel
On 09/03/2013 04:02 PM, Eric Dumazet wrote:
> On Tue, 2013-09-03 at 12:26 +0200, Daniel Borkmann wrote:
>> Replace these places with reciprocal_divide() inline function instead of
>> reimplementing it each time, thus it will become easier to read. We do not
>> need to explicitly include its header as it's pulled in from linux/net.h
>> anyway.
>
> Sure, (((u64) hash * qcount) >> 32) happens to be
> reciprocal_divide(hash, qcount) but the result depends on hash being
> well distributed in [0 .. ~0U] space.
>
> So 'reciprocal' and 'divide' do not accurately describe this exact
> operation.
>
> I suggest making the thing more descriptive.
>
> (And discussed on lkml btw)
Sure, we can do that. Then lets drop these two for now.
^ permalink raw reply [flat|nested] 11+ messages in thread
end of thread, other threads:[~2013-09-03 14:35 UTC | newest]
Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-09-03 10:26 [PATCH net-next 0/2] net_random_N, reciprocal_divide helper updates Daniel Borkmann
2013-09-03 10:26 ` [PATCH net-next 1/2] net: introduce generic net_random_N helper Daniel Borkmann
2013-09-03 11:00 ` Joe Perches
2013-09-03 11:40 ` Daniel Borkmann
2013-09-03 12:15 ` Joe Perches
2013-09-03 12:26 ` Daniel Borkmann
2013-09-03 12:41 ` Joe Perches
2013-09-03 13:59 ` Eric Dumazet
2013-09-03 10:26 ` [PATCH net-next 2/2] net: use reciprocal_divide instead of reimplementing it Daniel Borkmann
2013-09-03 14:02 ` Eric Dumazet
2013-09-03 14:35 ` Daniel Borkmann
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).