* [PATCH 06/15] net,9p: use new hashtable implementation
[not found] <1355756497-15834-1-git-send-email-sasha.levin@oracle.com>
@ 2012-12-17 15:01 ` Sasha Levin
2012-12-17 15:01 ` [PATCH 08/15] SUNRPC/cache: " Sasha Levin
` (3 subsequent siblings)
4 siblings, 0 replies; 5+ messages in thread
From: Sasha Levin @ 2012-12-17 15:01 UTC (permalink / raw)
To: David S. Miller, Sasha Levin, Eric Van Hensbergen, Joe Perches,
netdev, linux-kernel
Cc: Sasha Levin
Switch 9p error table to use the new hashtable implementation. This reduces
the amount of generic unrelated code in 9p.
This patch depends on d9b482c ("hashtable: introduce a small and naive
hashtable") which was merged in v3.6.
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
---
net/9p/error.c | 21 +++++++++------------
1 file changed, 9 insertions(+), 12 deletions(-)
diff --git a/net/9p/error.c b/net/9p/error.c
index 2ab2de7..a394b37 100644
--- a/net/9p/error.c
+++ b/net/9p/error.c
@@ -34,6 +34,7 @@
#include <linux/jhash.h>
#include <linux/errno.h>
#include <net/9p/9p.h>
+#include <linux/hashtable.h>
/**
* struct errormap - map string errors from Plan 9 to Linux numeric ids
@@ -50,8 +51,8 @@ struct errormap {
struct hlist_node list;
};
-#define ERRHASHSZ 32
-static struct hlist_head hash_errmap[ERRHASHSZ];
+#define ERR_HASH_BITS 5
+static DEFINE_HASHTABLE(hash_errmap, ERR_HASH_BITS);
/* FixMe - reduce to a reasonable size */
static struct errormap errmap[] = {
@@ -193,18 +194,14 @@ static struct errormap errmap[] = {
int p9_error_init(void)
{
struct errormap *c;
- int bucket;
-
- /* initialize hash table */
- for (bucket = 0; bucket < ERRHASHSZ; bucket++)
- INIT_HLIST_HEAD(&hash_errmap[bucket]);
+ u32 hash;
/* load initial error map into hash table */
for (c = errmap; c->name != NULL; c++) {
c->namelen = strlen(c->name);
- bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ;
+ hash = jhash(c->name, c->namelen, 0);
INIT_HLIST_NODE(&c->list);
- hlist_add_head(&c->list, &hash_errmap[bucket]);
+ hash_add(hash_errmap, &c->list, hash);
}
return 1;
@@ -223,13 +220,13 @@ int p9_errstr2errno(char *errstr, int len)
int errno;
struct hlist_node *p;
struct errormap *c;
- int bucket;
+ u32 hash;
errno = 0;
p = NULL;
c = NULL;
- bucket = jhash(errstr, len, 0) % ERRHASHSZ;
- hlist_for_each_entry(c, p, &hash_errmap[bucket], list) {
+ hash = jhash(errstr, len, 0);
+ hash_for_each_possible(hash_errmap, c, p, list, hash) {
if (c->namelen == len && !memcmp(c->name, errstr, len)) {
errno = c->val;
break;
--
1.8.0
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 08/15] SUNRPC/cache: use new hashtable implementation
[not found] <1355756497-15834-1-git-send-email-sasha.levin@oracle.com>
2012-12-17 15:01 ` [PATCH 06/15] net,9p: use new hashtable implementation Sasha Levin
@ 2012-12-17 15:01 ` Sasha Levin
2012-12-17 15:01 ` [PATCH 10/15] net,l2tp: " Sasha Levin
` (2 subsequent siblings)
4 siblings, 0 replies; 5+ messages in thread
From: Sasha Levin @ 2012-12-17 15:01 UTC (permalink / raw)
To: Trond Myklebust, J. Bruce Fields, David S. Miller, linux-nfs,
netdev, linux-kernel
Cc: Sasha Levin
Switch cache to use the new hashtable implementation. This reduces the amount
of generic unrelated code in the cache implementation.
This patch depends on d9b482c ("hashtable: introduce a small and naive
hashtable") which was merged in v3.6.
Tested-by: J. Bruce Fields <bfields@fieldses.org>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
---
net/sunrpc/cache.c | 18 +++++++-----------
1 file changed, 7 insertions(+), 11 deletions(-)
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 9afa439..d4539b6 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -28,6 +28,7 @@
#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
+#include <linux/hashtable.h>
#include <asm/ioctls.h>
#include <linux/sunrpc/types.h>
#include <linux/sunrpc/cache.h>
@@ -524,19 +525,18 @@ EXPORT_SYMBOL_GPL(cache_purge);
* it to be revisited when cache info is available
*/
-#define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
-#define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
+#define DFR_HASH_BITS 9
#define DFR_MAX 300 /* ??? */
static DEFINE_SPINLOCK(cache_defer_lock);
static LIST_HEAD(cache_defer_list);
-static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
+static DEFINE_HASHTABLE(cache_defer_hash, DFR_HASH_BITS);
static int cache_defer_cnt;
static void __unhash_deferred_req(struct cache_deferred_req *dreq)
{
- hlist_del_init(&dreq->hash);
+ hash_del(&dreq->hash);
if (!list_empty(&dreq->recent)) {
list_del_init(&dreq->recent);
cache_defer_cnt--;
@@ -545,10 +545,7 @@ static void __unhash_deferred_req(struct cache_deferred_req *dreq)
static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
{
- int hash = DFR_HASH(item);
-
- INIT_LIST_HEAD(&dreq->recent);
- hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
+ hash_add(cache_defer_hash, &dreq->hash, (unsigned long)item);
}
static void setup_deferral(struct cache_deferred_req *dreq,
@@ -600,7 +597,7 @@ static void cache_wait_req(struct cache_req *req, struct cache_head *item)
* to clean up
*/
spin_lock(&cache_defer_lock);
- if (!hlist_unhashed(&sleeper.handle.hash)) {
+ if (hash_hashed(&sleeper.handle.hash)) {
__unhash_deferred_req(&sleeper.handle);
spin_unlock(&cache_defer_lock);
} else {
@@ -671,12 +668,11 @@ static void cache_revisit_request(struct cache_head *item)
struct cache_deferred_req *dreq;
struct list_head pending;
struct hlist_node *lp, *tmp;
- int hash = DFR_HASH(item);
INIT_LIST_HEAD(&pending);
spin_lock(&cache_defer_lock);
- hlist_for_each_entry_safe(dreq, lp, tmp, &cache_defer_hash[hash], hash)
+ hash_for_each_possible_safe(cache_defer_hash, dreq, lp, tmp, hash, (unsigned long)item)
if (dreq->item == item) {
__unhash_deferred_req(dreq);
list_add(&dreq->recent, &pending);
--
1.8.0
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 10/15] net,l2tp: use new hashtable implementation
[not found] <1355756497-15834-1-git-send-email-sasha.levin@oracle.com>
2012-12-17 15:01 ` [PATCH 06/15] net,9p: use new hashtable implementation Sasha Levin
2012-12-17 15:01 ` [PATCH 08/15] SUNRPC/cache: " Sasha Levin
@ 2012-12-17 15:01 ` Sasha Levin
2012-12-17 15:01 ` [PATCH 13/15] net,rds: " Sasha Levin
2012-12-17 15:01 ` [PATCH 14/15] openvswitch: " Sasha Levin
4 siblings, 0 replies; 5+ messages in thread
From: Sasha Levin @ 2012-12-17 15:01 UTC (permalink / raw)
To: David S. Miller, James Chapman, Eric Dumazet, Dmitry Kozlov,
Sasha Levin, Chris Elston, Joe Perches, netdev, linux-kernel
Cc: Sasha Levin
Switch l2tp to use the new hashtable implementation. This reduces the amount
of generic unrelated code in l2tp.
This patch depends on d9b482c ("hashtable: introduce a small and naive
hashtable") which was merged in v3.6.
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
---
net/l2tp/l2tp_core.c | 140 +++++++++++++++++++-----------------------------
net/l2tp/l2tp_core.h | 15 ++++--
net/l2tp/l2tp_debugfs.c | 19 +++----
3 files changed, 74 insertions(+), 100 deletions(-)
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 1a9f372..0b369e4 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -44,6 +44,7 @@
#include <linux/udp.h>
#include <linux/l2tp.h>
#include <linux/hash.h>
+#include <linux/hashtable.h>
#include <linux/sort.h>
#include <linux/file.h>
#include <linux/nsproxy.h>
@@ -107,8 +108,14 @@ static unsigned int l2tp_net_id;
struct l2tp_net {
struct list_head l2tp_tunnel_list;
spinlock_t l2tp_tunnel_list_lock;
- struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
- spinlock_t l2tp_session_hlist_lock;
+/*
+ * Session hash global list for L2TPv3.
+ * The session_id SHOULD be random according to RFC3931, but several
+ * L2TP implementations use incrementing session_ids. So we do a real
+ * hash on the session_id, rather than a simple bitmask.
+ */
+ DECLARE_HASHTABLE(l2tp_session_hash, L2TP_HASH_BITS_2);
+ spinlock_t l2tp_session_hash_lock;
};
static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
@@ -156,30 +163,17 @@ do { \
#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
#endif
-/* Session hash global list for L2TPv3.
- * The session_id SHOULD be random according to RFC3931, but several
- * L2TP implementations use incrementing session_ids. So we do a real
- * hash on the session_id, rather than a simple bitmask.
- */
-static inline struct hlist_head *
-l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
-{
- return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
-
-}
-
/* Lookup a session by id in the global session list
*/
static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
{
struct l2tp_net *pn = l2tp_pernet(net);
- struct hlist_head *session_list =
- l2tp_session_id_hash_2(pn, session_id);
struct l2tp_session *session;
struct hlist_node *walk;
rcu_read_lock_bh();
- hlist_for_each_entry_rcu(session, walk, session_list, global_hlist) {
+ hash_for_each_possible_rcu(pn->l2tp_session_hash, session, walk,
+ global_hlist, session_id) {
if (session->session_id == session_id) {
rcu_read_unlock_bh();
return session;
@@ -190,23 +184,10 @@ static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
return NULL;
}
-/* Session hash list.
- * The session_id SHOULD be random according to RFC2661, but several
- * L2TP implementations (Cisco and Microsoft) use incrementing
- * session_ids. So we do a real hash on the session_id, rather than a
- * simple bitmask.
- */
-static inline struct hlist_head *
-l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
-{
- return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
-}
-
/* Lookup a session by id
*/
struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id)
{
- struct hlist_head *session_list;
struct l2tp_session *session;
struct hlist_node *walk;
@@ -217,15 +198,14 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
if (tunnel == NULL)
return l2tp_session_find_2(net, session_id);
- session_list = l2tp_session_id_hash(tunnel, session_id);
- read_lock_bh(&tunnel->hlist_lock);
- hlist_for_each_entry(session, walk, session_list, hlist) {
+ read_lock_bh(&tunnel->hash_lock);
+ hash_for_each_possible(tunnel->session_hash, session, walk, hlist, session_id) {
if (session->session_id == session_id) {
- read_unlock_bh(&tunnel->hlist_lock);
+ read_unlock_bh(&tunnel->hash_lock);
return session;
}
}
- read_unlock_bh(&tunnel->hlist_lock);
+ read_unlock_bh(&tunnel->hash_lock);
return NULL;
}
@@ -238,17 +218,15 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
struct l2tp_session *session;
int count = 0;
- read_lock_bh(&tunnel->hlist_lock);
- for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
- hlist_for_each_entry(session, walk, &tunnel->session_hlist[hash], hlist) {
- if (++count > nth) {
- read_unlock_bh(&tunnel->hlist_lock);
- return session;
- }
+ read_lock_bh(&tunnel->hash_lock);
+ hash_for_each(tunnel->session_hash, hash, walk, session, hlist) {
+ if (++count > nth) {
+ read_unlock_bh(&tunnel->hash_lock);
+ return session;
}
}
- read_unlock_bh(&tunnel->hlist_lock);
+ read_unlock_bh(&tunnel->hash_lock);
return NULL;
}
@@ -265,12 +243,10 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
struct l2tp_session *session;
rcu_read_lock_bh();
- for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
- hlist_for_each_entry_rcu(session, walk, &pn->l2tp_session_hlist[hash], global_hlist) {
- if (!strcmp(session->ifname, ifname)) {
- rcu_read_unlock_bh();
- return session;
- }
+ hash_for_each_rcu(pn->l2tp_session_hash, hash, walk, session, global_hlist) {
+ if (!strcmp(session->ifname, ifname)) {
+ rcu_read_unlock_bh();
+ return session;
}
}
@@ -1272,7 +1248,7 @@ end:
*/
static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
{
- int hash;
+ int hash, found = 0;
struct hlist_node *walk;
struct hlist_node *tmp;
struct l2tp_session *session;
@@ -1282,16 +1258,14 @@ static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing all sessions...\n",
tunnel->name);
- write_lock_bh(&tunnel->hlist_lock);
- for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
-again:
- hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
- session = hlist_entry(walk, struct l2tp_session, hlist);
-
+ write_lock_bh(&tunnel->hash_lock);
+ do {
+ found = 0;
+ hash_for_each_safe(tunnel->session_hash, hash, walk, tmp, session, hlist) {
l2tp_info(session, L2TP_MSG_CONTROL,
"%s: closing session\n", session->name);
- hlist_del_init(&session->hlist);
+ hash_del(&session->hlist);
/* Since we should hold the sock lock while
* doing any unbinding, we need to release the
@@ -1302,14 +1276,14 @@ again:
if (session->ref != NULL)
(*session->ref)(session);
- write_unlock_bh(&tunnel->hlist_lock);
+ write_unlock_bh(&tunnel->hash_lock);
if (tunnel->version != L2TP_HDR_VER_2) {
struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
- spin_lock_bh(&pn->l2tp_session_hlist_lock);
- hlist_del_init_rcu(&session->global_hlist);
- spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+ spin_lock_bh(&pn->l2tp_session_hash_lock);
+ hash_del_rcu(&session->global_hlist);
+ spin_unlock_bh(&pn->l2tp_session_hash_lock);
synchronize_rcu();
}
@@ -1319,17 +1293,17 @@ again:
if (session->deref != NULL)
(*session->deref)(session);
- write_lock_bh(&tunnel->hlist_lock);
+ write_lock_bh(&tunnel->hash_lock);
/* Now restart from the beginning of this hash
* chain. We always remove a session from the
* list so we are guaranteed to make forward
* progress.
*/
- goto again;
+ found = 1;
}
- }
- write_unlock_bh(&tunnel->hlist_lock);
+ } while (found);
+ write_unlock_bh(&tunnel->hash_lock);
}
/* Really kill the tunnel.
@@ -1576,7 +1550,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
tunnel->magic = L2TP_TUNNEL_MAGIC;
sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
- rwlock_init(&tunnel->hlist_lock);
+ rwlock_init(&tunnel->hash_lock);
/* The net we belong to */
tunnel->l2tp_net = net;
@@ -1613,6 +1587,8 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
/* Add tunnel to our list */
INIT_LIST_HEAD(&tunnel->list);
+
+ hash_init(tunnel->session_hash);
atomic_inc(&l2tp_tunnel_count);
/* Bump the reference count. The tunnel context is deleted
@@ -1677,17 +1653,17 @@ void l2tp_session_free(struct l2tp_session *session)
BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
/* Delete the session from the hash */
- write_lock_bh(&tunnel->hlist_lock);
- hlist_del_init(&session->hlist);
- write_unlock_bh(&tunnel->hlist_lock);
+ write_lock_bh(&tunnel->hash_lock);
+ hash_del(&session->hlist);
+ write_unlock_bh(&tunnel->hash_lock);
/* Unlink from the global hash if not L2TPv2 */
if (tunnel->version != L2TP_HDR_VER_2) {
struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
- spin_lock_bh(&pn->l2tp_session_hlist_lock);
- hlist_del_init_rcu(&session->global_hlist);
- spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+ spin_lock_bh(&pn->l2tp_session_hash_lock);
+ hash_del_rcu(&session->global_hlist);
+ spin_unlock_bh(&pn->l2tp_session_hash_lock);
synchronize_rcu();
}
@@ -1800,19 +1776,17 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
sock_hold(tunnel->sock);
/* Add session to the tunnel's hash list */
- write_lock_bh(&tunnel->hlist_lock);
- hlist_add_head(&session->hlist,
- l2tp_session_id_hash(tunnel, session_id));
- write_unlock_bh(&tunnel->hlist_lock);
+ write_lock_bh(&tunnel->hash_lock);
+ hash_add(tunnel->session_hash, &session->hlist, session_id);
+ write_unlock_bh(&tunnel->hash_lock);
/* And to the global session list if L2TPv3 */
if (tunnel->version != L2TP_HDR_VER_2) {
struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
- spin_lock_bh(&pn->l2tp_session_hlist_lock);
- hlist_add_head_rcu(&session->global_hlist,
- l2tp_session_id_hash_2(pn, session_id));
- spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+ spin_lock_bh(&pn->l2tp_session_hash_lock);
+ hash_add(pn->l2tp_session_hash, &session->global_hlist, session_id);
+ spin_unlock_bh(&pn->l2tp_session_hash_lock);
}
/* Ignore management session in session count value */
@@ -1831,15 +1805,13 @@ EXPORT_SYMBOL_GPL(l2tp_session_create);
static __net_init int l2tp_init_net(struct net *net)
{
struct l2tp_net *pn = net_generic(net, l2tp_net_id);
- int hash;
INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
spin_lock_init(&pn->l2tp_tunnel_list_lock);
- for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
- INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
+ hash_init(pn->l2tp_session_hash);
- spin_lock_init(&pn->l2tp_session_hlist_lock);
+ spin_lock_init(&pn->l2tp_session_hash_lock);
return 0;
}
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 56d583e..fc58c85 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -11,17 +11,17 @@
#ifndef _L2TP_CORE_H_
#define _L2TP_CORE_H_
+#include <linux/hashtable.h>
+
/* Just some random numbers */
#define L2TP_TUNNEL_MAGIC 0x42114DDA
#define L2TP_SESSION_MAGIC 0x0C04EB7D
/* Per tunnel, session hash table size */
#define L2TP_HASH_BITS 4
-#define L2TP_HASH_SIZE (1 << L2TP_HASH_BITS)
/* System-wide, session hash table size */
#define L2TP_HASH_BITS_2 8
-#define L2TP_HASH_SIZE_2 (1 << L2TP_HASH_BITS_2)
/* Debug message categories for the DEBUG socket option */
enum {
@@ -164,8 +164,15 @@ struct l2tp_tunnel_cfg {
struct l2tp_tunnel {
int magic; /* Should be L2TP_TUNNEL_MAGIC */
struct rcu_head rcu;
- rwlock_t hlist_lock; /* protect session_hlist */
- struct hlist_head session_hlist[L2TP_HASH_SIZE];
+ rwlock_t hash_lock; /* protect session_hash */
+/*
+ * Session hash list.
+ * The session_id SHOULD be random according to RFC2661, but several
+ * L2TP implementations (Cisco and Microsoft) use incrementing
+ * session_ids. So we do a real hash on the session_id, rather than a
+ * simple bitmask.
+*/
+ DECLARE_HASHTABLE(session_hash, L2TP_HASH_BITS);
/* hashed list of sessions,
* hashed by id */
u32 tunnel_id;
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index c3813bc..655f1fa 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -105,21 +105,16 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
int session_count = 0;
int hash;
struct hlist_node *walk;
- struct hlist_node *tmp;
+ struct l2tp_session *session;
- read_lock_bh(&tunnel->hlist_lock);
- for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
- hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
- struct l2tp_session *session;
+ read_lock_bh(&tunnel->hash_lock);
+ hash_for_each(tunnel->session_hash, hash, walk, session, hlist) {
+ if (session->session_id == 0)
+ continue;
- session = hlist_entry(walk, struct l2tp_session, hlist);
- if (session->session_id == 0)
- continue;
-
- session_count++;
- }
+ session_count++;
}
- read_unlock_bh(&tunnel->hlist_lock);
+ read_unlock_bh(&tunnel->hash_lock);
seq_printf(m, "\nTUNNEL %u peer %u", tunnel->tunnel_id, tunnel->peer_tunnel_id);
if (tunnel->sock) {
--
1.8.0
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 13/15] net,rds: use new hashtable implementation
[not found] <1355756497-15834-1-git-send-email-sasha.levin@oracle.com>
` (2 preceding siblings ...)
2012-12-17 15:01 ` [PATCH 10/15] net,l2tp: " Sasha Levin
@ 2012-12-17 15:01 ` Sasha Levin
2012-12-17 15:01 ` [PATCH 14/15] openvswitch: " Sasha Levin
4 siblings, 0 replies; 5+ messages in thread
From: Sasha Levin @ 2012-12-17 15:01 UTC (permalink / raw)
To: Venkat Venkatsubra, David S. Miller, rds-devel, netdev,
linux-kernel
Cc: Sasha Levin
Switch rds to use the new hashtable implementation. This reduces the amount of
generic unrelated code in rds.
This patch depends on d9b482c ("hashtable: introduce a small and naive
hashtable") which was merged in v3.6.
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
---
net/rds/bind.c | 20 +++++------
net/rds/connection.c | 100 ++++++++++++++++++++++-----------------------------
2 files changed, 53 insertions(+), 67 deletions(-)
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 637bde5..a99e524 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -36,16 +36,16 @@
#include <linux/if_arp.h>
#include <linux/jhash.h>
#include <linux/ratelimit.h>
+#include <linux/hashtable.h>
#include "rds.h"
-#define BIND_HASH_SIZE 1024
-static struct hlist_head bind_hash_table[BIND_HASH_SIZE];
+#define BIND_HASH_BITS 10
+static DEFINE_HASHTABLE(bind_hash_table, BIND_HASH_BITS);
static DEFINE_SPINLOCK(rds_bind_lock);
-static struct hlist_head *hash_to_bucket(__be32 addr, __be16 port)
+static u32 rds_hash(__be32 addr, __be16 port)
{
- return bind_hash_table + (jhash_2words((u32)addr, (u32)port, 0) &
- (BIND_HASH_SIZE - 1));
+ return jhash_2words((u32)addr, (u32)port, 0);
}
static struct rds_sock *rds_bind_lookup(__be32 addr, __be16 port,
@@ -53,12 +53,12 @@ static struct rds_sock *rds_bind_lookup(__be32 addr, __be16 port,
{
struct rds_sock *rs;
struct hlist_node *node;
- struct hlist_head *head = hash_to_bucket(addr, port);
+ u32 key = rds_hash(addr, port);
u64 cmp;
u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port);
rcu_read_lock();
- hlist_for_each_entry_rcu(rs, node, head, rs_bound_node) {
+ hash_for_each_possible_rcu(bind_hash_table, rs, node, rs_bound_node, key) {
cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) |
be16_to_cpu(rs->rs_bound_port);
@@ -74,13 +74,13 @@ static struct rds_sock *rds_bind_lookup(__be32 addr, __be16 port,
* make sure our addr and port are set before
* we are added to the list, other people
* in rcu will find us as soon as the
- * hlist_add_head_rcu is done
+ * hash_add_rcu is done
*/
insert->rs_bound_addr = addr;
insert->rs_bound_port = port;
rds_sock_addref(insert);
- hlist_add_head_rcu(&insert->rs_bound_node, head);
+ hash_add_rcu(bind_hash_table, &insert->rs_bound_node, key);
}
return NULL;
}
@@ -152,7 +152,7 @@ void rds_remove_bound(struct rds_sock *rs)
rs, &rs->rs_bound_addr,
ntohs(rs->rs_bound_port));
- hlist_del_init_rcu(&rs->rs_bound_node);
+ hash_del_rcu(&rs->rs_bound_node);
rds_sock_put(rs);
rs->rs_bound_addr = 0;
}
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 9e07c75..a9afcb8 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -34,28 +34,24 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/hashtable.h>
#include <net/inet_hashtables.h>
#include "rds.h"
#include "loop.h"
#define RDS_CONNECTION_HASH_BITS 12
-#define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS)
-#define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1)
/* converting this to RCU is a chore for another day.. */
static DEFINE_SPINLOCK(rds_conn_lock);
static unsigned long rds_conn_count;
-static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES];
+static DEFINE_HASHTABLE(rds_conn_hash, RDS_CONNECTION_HASH_BITS);
static struct kmem_cache *rds_conn_slab;
-static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
+static unsigned long rds_conn_hashfn(__be32 laddr, __be32 faddr)
{
/* Pass NULL, don't need struct net for hash */
- unsigned long hash = inet_ehashfn(NULL,
- be32_to_cpu(laddr), 0,
- be32_to_cpu(faddr), 0);
- return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
+ return inet_ehashfn(NULL, be32_to_cpu(laddr), 0, be32_to_cpu(faddr), 0);
}
#define rds_conn_info_set(var, test, suffix) do { \
@@ -64,14 +60,14 @@ static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
} while (0)
/* rcu read lock must be held or the connection spinlock */
-static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
- __be32 laddr, __be32 faddr,
+static struct rds_connection *rds_conn_lookup(__be32 laddr, __be32 faddr,
struct rds_transport *trans)
{
struct rds_connection *conn, *ret = NULL;
struct hlist_node *pos;
+ unsigned long key = rds_conn_hashfn(laddr, faddr);
- hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
+ hash_for_each_possible_rcu(rds_conn_hash, conn, pos, c_hash_node, key) {
if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
conn->c_trans == trans) {
ret = conn;
@@ -117,13 +113,12 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
int is_outgoing)
{
struct rds_connection *conn, *parent = NULL;
- struct hlist_head *head = rds_conn_bucket(laddr, faddr);
struct rds_transport *loop_trans;
unsigned long flags;
int ret;
rcu_read_lock();
- conn = rds_conn_lookup(head, laddr, faddr, trans);
+ conn = rds_conn_lookup(laddr, faddr, trans);
if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
!is_outgoing) {
/* This is a looped back IB connection, and we're
@@ -224,13 +219,15 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
/* Creating normal conn */
struct rds_connection *found;
- found = rds_conn_lookup(head, laddr, faddr, trans);
+ found = rds_conn_lookup(laddr, faddr, trans);
if (found) {
trans->conn_free(conn->c_transport_data);
kmem_cache_free(rds_conn_slab, conn);
conn = found;
} else {
- hlist_add_head_rcu(&conn->c_hash_node, head);
+ unsigned long key = rds_conn_hashfn(laddr, faddr);
+
+ hash_add_rcu(rds_conn_hash, &conn->c_hash_node, key);
rds_cong_add_conn(conn);
rds_conn_count++;
}
@@ -303,7 +300,7 @@ void rds_conn_shutdown(struct rds_connection *conn)
* conn - the reconnect is always triggered by the active peer. */
cancel_delayed_work_sync(&conn->c_conn_w);
rcu_read_lock();
- if (!hlist_unhashed(&conn->c_hash_node)) {
+ if (hash_hashed(&conn->c_hash_node)) {
rcu_read_unlock();
rds_queue_reconnect(conn);
} else {
@@ -329,7 +326,7 @@ void rds_conn_destroy(struct rds_connection *conn)
/* Ensure conn will not be scheduled for reconnect */
spin_lock_irq(&rds_conn_lock);
- hlist_del_init_rcu(&conn->c_hash_node);
+ hash_del(&conn->c_hash_node);
spin_unlock_irq(&rds_conn_lock);
synchronize_rcu();
@@ -375,7 +372,6 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
struct rds_info_lengths *lens,
int want_send)
{
- struct hlist_head *head;
struct hlist_node *pos;
struct list_head *list;
struct rds_connection *conn;
@@ -388,27 +384,24 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
rcu_read_lock();
- for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
- i++, head++) {
- hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
- if (want_send)
- list = &conn->c_send_queue;
- else
- list = &conn->c_retrans;
-
- spin_lock_irqsave(&conn->c_lock, flags);
-
- /* XXX too lazy to maintain counts.. */
- list_for_each_entry(rm, list, m_conn_item) {
- total++;
- if (total <= len)
- rds_inc_info_copy(&rm->m_inc, iter,
- conn->c_laddr,
- conn->c_faddr, 0);
- }
-
- spin_unlock_irqrestore(&conn->c_lock, flags);
+ hash_for_each_rcu(rds_conn_hash, i, pos, conn, c_hash_node) {
+ if (want_send)
+ list = &conn->c_send_queue;
+ else
+ list = &conn->c_retrans;
+
+ spin_lock_irqsave(&conn->c_lock, flags);
+
+ /* XXX too lazy to maintain counts.. */
+ list_for_each_entry(rm, list, m_conn_item) {
+ total++;
+ if (total <= len)
+ rds_inc_info_copy(&rm->m_inc, iter,
+ conn->c_laddr,
+ conn->c_faddr, 0);
}
+
+ spin_unlock_irqrestore(&conn->c_lock, flags);
}
rcu_read_unlock();
@@ -438,7 +431,6 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
size_t item_len)
{
uint64_t buffer[(item_len + 7) / 8];
- struct hlist_head *head;
struct hlist_node *pos;
struct rds_connection *conn;
size_t i;
@@ -448,23 +440,19 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
lens->nr = 0;
lens->each = item_len;
- for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
- i++, head++) {
- hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
-
- /* XXX no c_lock usage.. */
- if (!visitor(conn, buffer))
- continue;
-
- /* We copy as much as we can fit in the buffer,
- * but we count all items so that the caller
- * can resize the buffer. */
- if (len >= item_len) {
- rds_info_copy(iter, buffer, item_len);
- len -= item_len;
- }
- lens->nr++;
+ hash_for_each_rcu(rds_conn_hash, i, pos, conn, c_hash_node) {
+ /* XXX no c_lock usage.. */
+ if (!visitor(conn, buffer))
+ continue;
+
+ /* We copy as much as we can fit in the buffer,
+ * but we count all items so that the caller
+ * can resize the buffer. */
+ if (len >= item_len) {
+ rds_info_copy(iter, buffer, item_len);
+ len -= item_len;
}
+ lens->nr++;
}
rcu_read_unlock();
}
@@ -525,8 +513,6 @@ void rds_conn_exit(void)
{
rds_loop_exit();
- WARN_ON(!hlist_empty(rds_conn_hash));
-
kmem_cache_destroy(rds_conn_slab);
rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info);
--
1.8.0
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 14/15] openvswitch: use new hashtable implementation
[not found] <1355756497-15834-1-git-send-email-sasha.levin@oracle.com>
` (3 preceding siblings ...)
2012-12-17 15:01 ` [PATCH 13/15] net,rds: " Sasha Levin
@ 2012-12-17 15:01 ` Sasha Levin
4 siblings, 0 replies; 5+ messages in thread
From: Sasha Levin @ 2012-12-17 15:01 UTC (permalink / raw)
To: Jesse Gross, David S. Miller, dev, netdev, linux-kernel; +Cc: Sasha Levin
Switch openvswitch to use the new hashtable implementation. This reduces the
amount of generic unrelated code in openvswitch.
This patch depends on d9b482c ("hashtable: introduce a small and naive
hashtable") which was merged in v3.6.
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
---
net/openvswitch/vport.c | 35 ++++++++++++-----------------------
1 file changed, 12 insertions(+), 23 deletions(-)
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 70af0be..a946529 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -28,6 +28,7 @@
#include <linux/rtnetlink.h>
#include <linux/compat.h>
#include <net/net_namespace.h>
+#include <linux/hashtable.h>
#include "datapath.h"
#include "vport.h"
@@ -41,8 +42,8 @@ static const struct vport_ops *vport_ops_list[] = {
};
/* Protected by RCU read lock for reading, RTNL lock for writing. */
-static struct hlist_head *dev_table;
-#define VPORT_HASH_BUCKETS 1024
+#define VPORT_HASH_BITS 10
+static DEFINE_HASHTABLE(dev_table, VPORT_HASH_BITS);
/**
* ovs_vport_init - initialize vport subsystem
@@ -51,11 +52,6 @@ static struct hlist_head *dev_table;
*/
int ovs_vport_init(void)
{
- dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
- GFP_KERNEL);
- if (!dev_table)
- return -ENOMEM;
-
return 0;
}
@@ -66,13 +62,6 @@ int ovs_vport_init(void)
*/
void ovs_vport_exit(void)
{
- kfree(dev_table);
-}
-
-static struct hlist_head *hash_bucket(struct net *net, const char *name)
-{
- unsigned int hash = jhash(name, strlen(name), (unsigned long) net);
- return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
}
/**
@@ -84,13 +73,12 @@ static struct hlist_head *hash_bucket(struct net *net, const char *name)
*/
struct vport *ovs_vport_locate(struct net *net, const char *name)
{
- struct hlist_head *bucket = hash_bucket(net, name);
struct vport *vport;
struct hlist_node *node;
+ int key = full_name_hash(name, strlen(name));
- hlist_for_each_entry_rcu(vport, node, bucket, hash_node)
- if (!strcmp(name, vport->ops->get_name(vport)) &&
- net_eq(ovs_dp_get_net(vport->dp), net))
+ hash_for_each_possible_rcu(dev_table, vport, node, hash_node, key)
+ if (!strcmp(name, vport->ops->get_name(vport)))
return vport;
return NULL;
@@ -174,7 +162,8 @@ struct vport *ovs_vport_add(const struct vport_parms *parms)
for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) {
if (vport_ops_list[i]->type == parms->type) {
- struct hlist_head *bucket;
+ int key;
+ const char *name;
vport = vport_ops_list[i]->create(parms);
if (IS_ERR(vport)) {
@@ -182,9 +171,9 @@ struct vport *ovs_vport_add(const struct vport_parms *parms)
goto out;
}
- bucket = hash_bucket(ovs_dp_get_net(vport->dp),
- vport->ops->get_name(vport));
- hlist_add_head_rcu(&vport->hash_node, bucket);
+ name = vport->ops->get_name(vport);
+ key = full_name_hash(name, strlen(name));
+ hash_add_rcu(dev_table, &vport->hash_node, key);
return vport;
}
}
@@ -225,7 +214,7 @@ void ovs_vport_del(struct vport *vport)
{
ASSERT_RTNL();
- hlist_del_rcu(&vport->hash_node);
+ hash_del_rcu(&vport->hash_node);
vport->ops->destroy(vport);
}
--
1.8.0
^ permalink raw reply related [flat|nested] 5+ messages in thread
end of thread, other threads:[~2012-12-17 15:01 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <1355756497-15834-1-git-send-email-sasha.levin@oracle.com>
2012-12-17 15:01 ` [PATCH 06/15] net,9p: use new hashtable implementation Sasha Levin
2012-12-17 15:01 ` [PATCH 08/15] SUNRPC/cache: " Sasha Levin
2012-12-17 15:01 ` [PATCH 10/15] net,l2tp: " Sasha Levin
2012-12-17 15:01 ` [PATCH 13/15] net,rds: " Sasha Levin
2012-12-17 15:01 ` [PATCH 14/15] openvswitch: " Sasha Levin
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).