* [PATCH net] ipvlan: Make the addrs_lock be per port
@ 2025-12-15 16:54 Dmitry Skorodumov
2025-12-15 17:00 ` Dmitry Skorodumov
2025-12-23 10:13 ` Paolo Abeni
0 siblings, 2 replies; 4+ messages in thread
From: Dmitry Skorodumov @ 2025-12-15 16:54 UTC (permalink / raw)
To: netdev, Dmitry Skorodumov, Xiao Liang, Jakub Kicinski,
Kuniyuki Iwashima, Guillaume Nault, Julian Vetter, Eric Dumazet,
Stanislav Fomichev, Etienne Champetier, David S. Miller,
Paolo Abeni, linux-kernel
Cc: Andrew Lunn
Make the addrs_lock be per port, not per ipvlan dev.
Initial code seems to be written in the assumption,
that any address change must occur under RTNL.
But it is not so for the case of IPv6. So
1) Introduce per-port addrs_lock.
2) It was needed to fix places where it was forgotten
to take lock (ipvlan_open/ipvlan_close)
3) Fix places, where list_for_each_entry_rcu()
was used to iterate the list while holding a lock
This appears to be a very minor problem though.
Since it's highly unlikely that ipvlan_add_addr() will
be called on 2 CPU simultaneously. But nevertheless,
this could cause:
1) False-negative of ipvlan_addr_busy(): one interface
iterated through all port->ipvlans + ipvlan->addrs
under some ipvlan spinlock, and another added IP
under its own lock. Though this is only possible
for IPv6, since looks like only ipvlan_addr6_event() can be
called without rtnl_lock.
2) Race since ipvlan_ht_addr_add(port) is called under
different ipvlan->addrs_lock locks
This should not affect performance, since add/remove IP
is a rare situation and spinlock is not taken on fast
paths.
Fixes: 8230819494b3 ("ipvlan: use per device spinlock to protect addrs list updates")
Signed-off-by: Dmitry Skorodumov <skorodumov.dmitry@huawei.com>
CC: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Dmitry Skorodumov <skorodumov.dmitry@huawei.com>
---
drivers/net/ipvlan/ipvlan.h | 2 +-
drivers/net/ipvlan/ipvlan_core.c | 12 ++++----
drivers/net/ipvlan/ipvlan_main.c | 52 ++++++++++++++++++--------------
3 files changed, 37 insertions(+), 29 deletions(-)
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 50de3ee204db..80f84fc87008 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -69,7 +69,6 @@ struct ipvl_dev {
DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE);
netdev_features_t sfeatures;
u32 msg_enable;
- spinlock_t addrs_lock;
};
struct ipvl_addr {
@@ -90,6 +89,7 @@ struct ipvl_port {
struct net_device *dev;
possible_net_t pnet;
struct hlist_head hlhead[IPVLAN_HASH_SIZE];
+ spinlock_t addrs_lock; /* guards hash-table and addrs */
struct list_head ipvlans;
u16 mode;
u16 flags;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 2efa3ba148aa..22cb5ee7a231 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -109,14 +109,14 @@ struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
{
struct ipvl_addr *addr, *ret = NULL;
- rcu_read_lock();
- list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) {
+ assert_spin_locked(&ipvlan->port->addrs_lock);
+
+ list_for_each_entry(addr, &ipvlan->addrs, anode) {
if (addr_equal(is_v6, addr, iaddr)) {
ret = addr;
break;
}
}
- rcu_read_unlock();
return ret;
}
@@ -125,14 +125,14 @@ bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
struct ipvl_dev *ipvlan;
bool ret = false;
- rcu_read_lock();
- list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
+ assert_spin_locked(&port->addrs_lock);
+
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
if (ipvlan_find_addr(ipvlan, iaddr, is_v6)) {
ret = true;
break;
}
}
- rcu_read_unlock();
return ret;
}
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 660f3db11766..b0b4f747f162 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -75,6 +75,7 @@ static int ipvlan_port_create(struct net_device *dev)
for (idx = 0; idx < IPVLAN_HASH_SIZE; idx++)
INIT_HLIST_HEAD(&port->hlhead[idx]);
+ spin_lock_init(&port->addrs_lock);
skb_queue_head_init(&port->backlog);
INIT_WORK(&port->wq, ipvlan_process_multicast);
ida_init(&port->ida);
@@ -181,18 +182,18 @@ static void ipvlan_uninit(struct net_device *dev)
static int ipvlan_open(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct ipvl_port *port = ipvlan->port;
struct ipvl_addr *addr;
- if (ipvlan->port->mode == IPVLAN_MODE_L3 ||
- ipvlan->port->mode == IPVLAN_MODE_L3S)
+ if (port->mode == IPVLAN_MODE_L3 || port->mode == IPVLAN_MODE_L3S)
dev->flags |= IFF_NOARP;
else
dev->flags &= ~IFF_NOARP;
- rcu_read_lock();
- list_for_each_entry_rcu(addr, &ipvlan->addrs, anode)
+ spin_lock_bh(&port->addrs_lock);
+ list_for_each_entry(addr, &ipvlan->addrs, anode)
ipvlan_ht_addr_add(ipvlan, addr);
- rcu_read_unlock();
+ spin_unlock_bh(&port->addrs_lock);
return 0;
}
@@ -206,10 +207,10 @@ static int ipvlan_stop(struct net_device *dev)
dev_uc_unsync(phy_dev, dev);
dev_mc_unsync(phy_dev, dev);
- rcu_read_lock();
- list_for_each_entry_rcu(addr, &ipvlan->addrs, anode)
+ spin_lock_bh(&ipvlan->port->addrs_lock);
+ list_for_each_entry(addr, &ipvlan->addrs, anode)
ipvlan_ht_addr_del(addr);
- rcu_read_unlock();
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
return 0;
}
@@ -579,7 +580,6 @@ int ipvlan_link_new(struct net_device *dev, struct rtnl_newlink_params *params,
if (!tb[IFLA_MTU])
ipvlan_adjust_mtu(ipvlan, phy_dev);
INIT_LIST_HEAD(&ipvlan->addrs);
- spin_lock_init(&ipvlan->addrs_lock);
/* TODO Probably put random address here to be presented to the
* world but keep using the physical-dev address for the outgoing
@@ -657,13 +657,13 @@ void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_addr *addr, *next;
- spin_lock_bh(&ipvlan->addrs_lock);
+ spin_lock_bh(&ipvlan->port->addrs_lock);
list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
ipvlan_ht_addr_del(addr);
list_del_rcu(&addr->anode);
kfree_rcu(addr, rcu);
}
- spin_unlock_bh(&ipvlan->addrs_lock);
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
ida_free(&ipvlan->port->ida, dev->dev_id);
list_del_rcu(&ipvlan->pnode);
@@ -817,6 +817,8 @@ static int ipvlan_add_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
{
struct ipvl_addr *addr;
+ assert_spin_locked(&ipvlan->port->addrs_lock);
+
addr = kzalloc(sizeof(struct ipvl_addr), GFP_ATOMIC);
if (!addr)
return -ENOMEM;
@@ -847,16 +849,16 @@ static void ipvlan_del_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
{
struct ipvl_addr *addr;
- spin_lock_bh(&ipvlan->addrs_lock);
+ spin_lock_bh(&ipvlan->port->addrs_lock);
addr = ipvlan_find_addr(ipvlan, iaddr, is_v6);
if (!addr) {
- spin_unlock_bh(&ipvlan->addrs_lock);
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
return;
}
ipvlan_ht_addr_del(addr);
list_del_rcu(&addr->anode);
- spin_unlock_bh(&ipvlan->addrs_lock);
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
kfree_rcu(addr, rcu);
}
@@ -878,14 +880,14 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
{
int ret = -EINVAL;
- spin_lock_bh(&ipvlan->addrs_lock);
+ spin_lock_bh(&ipvlan->port->addrs_lock);
if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true))
netif_err(ipvlan, ifup, ipvlan->dev,
"Failed to add IPv6=%pI6c addr for %s intf\n",
ip6_addr, ipvlan->dev->name);
else
ret = ipvlan_add_addr(ipvlan, ip6_addr, true);
- spin_unlock_bh(&ipvlan->addrs_lock);
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
return ret;
}
@@ -924,21 +926,24 @@ static int ipvlan_addr6_validator_event(struct notifier_block *unused,
struct in6_validator_info *i6vi = (struct in6_validator_info *)ptr;
struct net_device *dev = (struct net_device *)i6vi->i6vi_dev->dev;
struct ipvl_dev *ipvlan = netdev_priv(dev);
+ int ret = NOTIFY_OK;
if (!ipvlan_is_valid_dev(dev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
+ spin_lock_bh(&ipvlan->port->addrs_lock);
if (ipvlan_addr_busy(ipvlan->port, &i6vi->i6vi_addr, true)) {
NL_SET_ERR_MSG(i6vi->extack,
"Address already assigned to an ipvlan device");
- return notifier_from_errno(-EADDRINUSE);
+ ret = notifier_from_errno(-EADDRINUSE);
}
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
break;
}
- return NOTIFY_OK;
+ return ret;
}
#endif
@@ -946,14 +951,14 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
{
int ret = -EINVAL;
- spin_lock_bh(&ipvlan->addrs_lock);
+ spin_lock_bh(&ipvlan->port->addrs_lock);
if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false))
netif_err(ipvlan, ifup, ipvlan->dev,
"Failed to add IPv4=%pI4 on %s intf.\n",
ip4_addr, ipvlan->dev->name);
else
ret = ipvlan_add_addr(ipvlan, ip4_addr, false);
- spin_unlock_bh(&ipvlan->addrs_lock);
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
return ret;
}
@@ -995,21 +1000,24 @@ static int ipvlan_addr4_validator_event(struct notifier_block *unused,
struct in_validator_info *ivi = (struct in_validator_info *)ptr;
struct net_device *dev = (struct net_device *)ivi->ivi_dev->dev;
struct ipvl_dev *ipvlan = netdev_priv(dev);
+ int ret = NOTIFY_OK;
if (!ipvlan_is_valid_dev(dev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
+ spin_lock_bh(&ipvlan->port->addrs_lock);
if (ipvlan_addr_busy(ipvlan->port, &ivi->ivi_addr, false)) {
NL_SET_ERR_MSG(ivi->extack,
"Address already assigned to an ipvlan device");
- return notifier_from_errno(-EADDRINUSE);
+ ret = notifier_from_errno(-EADDRINUSE);
}
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
break;
}
- return NOTIFY_OK;
+ return ret;
}
static struct notifier_block ipvlan_addr4_notifier_block __read_mostly = {
--
2.25.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH net] ipvlan: Make the addrs_lock be per port
2025-12-15 16:54 [PATCH net] ipvlan: Make the addrs_lock be per port Dmitry Skorodumov
@ 2025-12-15 17:00 ` Dmitry Skorodumov
2025-12-16 11:17 ` Simon Horman
2025-12-23 10:13 ` Paolo Abeni
1 sibling, 1 reply; 4+ messages in thread
From: Dmitry Skorodumov @ 2025-12-15 17:00 UTC (permalink / raw)
To: netdev, Xiao Liang, Jakub Kicinski, Kuniyuki Iwashima,
Guillaume Nault, Julian Vetter, Eric Dumazet, Stanislav Fomichev,
Etienne Champetier, David S. Miller, Paolo Abeni, linux-kernel
Cc: Andrew Lunn
I'm working currently on some selftests/net for ipvtap for some kind of test (test calls "ip a a/ip a d" in several threads), but I'm unsure how to proceed:
This patch is supposed to be a "fix". But selftest - obviously not a fix.
So, I'm unsure how to send a selftest for this.
Dmitry
On 15.12.2025 19:54, Dmitry Skorodumov wrote:
> Make the addrs_lock be per port, not per ipvlan dev.
>
> Initial code seems to be written in the assumption,
> that any address change must occur under RTNL.
> But it is not so for the case of IPv6. So
>
> 1) Introduce per-port addrs_lock.
>
> 2) It was needed to fix places where it was forgotten
> to take lock (ipvlan_open/ipvlan_close)
>
> 3) Fix places, where list_for_each_entry_rcu()
> was used to iterate the list while holding a lock
>
> This appears to be a very minor problem though.
> Since it's highly unlikely that ipvlan_add_addr() will
> be called on 2 CPU simultaneously. But nevertheless,
> this could cause:
>
> 1) False-negative of ipvlan_addr_busy(): one interface
> iterated through all port->ipvlans + ipvlan->addrs
> under some ipvlan spinlock, and another added IP
> under its own lock. Though this is only possible
> for IPv6, since looks like only ipvlan_addr6_event() can be
> called without rtnl_lock.
>
> 2) Race since ipvlan_ht_addr_add(port) is called under
> different ipvlan->addrs_lock locks
>
> This should not affect performance, since add/remove IP
> is a rare situation and spinlock is not taken on fast
> paths.
>
> Fixes: 8230819494b3 ("ipvlan: use per device spinlock to protect addrs list updates")
> Signed-off-by: Dmitry Skorodumov <skorodumov.dmitry@huawei.com>
> CC: Paolo Abeni <pabeni@redhat.com>
> Signed-off-by: Dmitry Skorodumov <skorodumov.dmitry@huawei.com>
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH net] ipvlan: Make the addrs_lock be per port
2025-12-15 17:00 ` Dmitry Skorodumov
@ 2025-12-16 11:17 ` Simon Horman
0 siblings, 0 replies; 4+ messages in thread
From: Simon Horman @ 2025-12-16 11:17 UTC (permalink / raw)
To: Dmitry Skorodumov
Cc: netdev, Xiao Liang, Jakub Kicinski, Kuniyuki Iwashima,
Guillaume Nault, Julian Vetter, Eric Dumazet, Stanislav Fomichev,
Etienne Champetier, David S. Miller, Paolo Abeni, linux-kernel,
Andrew Lunn
On Mon, Dec 15, 2025 at 08:00:52PM +0300, Dmitry Skorodumov wrote:
> I'm working currently on some selftests/net for ipvtap for some kind of test (test calls "ip a a/ip a d" in several threads), but I'm unsure how to proceed:
>
> This patch is supposed to be a "fix". But selftest - obviously not a fix.
>
> So, I'm unsure how to send a selftest for this.
Hi Dmitry,
I think that in cases like this - a fix coupled with a selftest for the
fixed problem - both the fix and the selftest can be included in a
patch-set targeted at net.
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH net] ipvlan: Make the addrs_lock be per port
2025-12-15 16:54 [PATCH net] ipvlan: Make the addrs_lock be per port Dmitry Skorodumov
2025-12-15 17:00 ` Dmitry Skorodumov
@ 2025-12-23 10:13 ` Paolo Abeni
1 sibling, 0 replies; 4+ messages in thread
From: Paolo Abeni @ 2025-12-23 10:13 UTC (permalink / raw)
To: Dmitry Skorodumov, netdev, Xiao Liang, Jakub Kicinski,
Kuniyuki Iwashima, Guillaume Nault, Julian Vetter, Eric Dumazet,
Stanislav Fomichev, Etienne Champetier, David S. Miller,
linux-kernel
Cc: Andrew Lunn
On 12/15/25 5:54 PM, Dmitry Skorodumov wrote:
> Make the addrs_lock be per port, not per ipvlan dev.
>
> Initial code seems to be written in the assumption,
> that any address change must occur under RTNL.
> But it is not so for the case of IPv6. So
>
> 1) Introduce per-port addrs_lock.
>
> 2) It was needed to fix places where it was forgotten
> to take lock (ipvlan_open/ipvlan_close)
>
> 3) Fix places, where list_for_each_entry_rcu()
> was used to iterate the list while holding a lock
>
> This appears to be a very minor problem though.
> Since it's highly unlikely that ipvlan_add_addr() will
> be called on 2 CPU simultaneously. But nevertheless,
> this could cause:
>
> 1) False-negative of ipvlan_addr_busy(): one interface
> iterated through all port->ipvlans + ipvlan->addrs
> under some ipvlan spinlock, and another added IP
> under its own lock. Though this is only possible
> for IPv6, since looks like only ipvlan_addr6_event() can be
> called without rtnl_lock.
>
> 2) Race since ipvlan_ht_addr_add(port) is called under
> different ipvlan->addrs_lock locks
>
> This should not affect performance, since add/remove IP
> is a rare situation and spinlock is not taken on fast
> paths.
>
> Fixes: 8230819494b3 ("ipvlan: use per device spinlock to protect addrs list updates")
> Signed-off-by: Dmitry Skorodumov <skorodumov.dmitry@huawei.com>
> CC: Paolo Abeni <pabeni@redhat.com>
> Signed-off-by: Dmitry Skorodumov <skorodumov.dmitry@huawei.com>
Duplicate signature: drop one.
Side note: you should have included a revision number in the subj prefix
(v2) and a summary of changes since v1 after the '---' separator
> ---
> drivers/net/ipvlan/ipvlan.h | 2 +-
> drivers/net/ipvlan/ipvlan_core.c | 12 ++++----
> drivers/net/ipvlan/ipvlan_main.c | 52 ++++++++++++++++++--------------
> 3 files changed, 37 insertions(+), 29 deletions(-)
>
> diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
> index 50de3ee204db..80f84fc87008 100644
> --- a/drivers/net/ipvlan/ipvlan.h
> +++ b/drivers/net/ipvlan/ipvlan.h
> @@ -69,7 +69,6 @@ struct ipvl_dev {
> DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE);
> netdev_features_t sfeatures;
> u32 msg_enable;
> - spinlock_t addrs_lock;
> };
>
> struct ipvl_addr {
> @@ -90,6 +89,7 @@ struct ipvl_port {
> struct net_device *dev;
> possible_net_t pnet;
> struct hlist_head hlhead[IPVLAN_HASH_SIZE];
> + spinlock_t addrs_lock; /* guards hash-table and addrs */
> struct list_head ipvlans;
> u16 mode;
> u16 flags;
> diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
> index 2efa3ba148aa..22cb5ee7a231 100644
> --- a/drivers/net/ipvlan/ipvlan_core.c
> +++ b/drivers/net/ipvlan/ipvlan_core.c
> @@ -109,14 +109,14 @@ struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
> {
> struct ipvl_addr *addr, *ret = NULL;
>
> - rcu_read_lock();
> - list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) {
> + assert_spin_locked(&ipvlan->port->addrs_lock);
> +
> + list_for_each_entry(addr, &ipvlan->addrs, anode) {
> if (addr_equal(is_v6, addr, iaddr)) {
> ret = addr;
> break;
You could just return `addr`, and remove the `ret` variable
> }
> }
> - rcu_read_unlock();
> return ret;
> }
>
> @@ -125,14 +125,14 @@ bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
> struct ipvl_dev *ipvlan;
> bool ret = false;
>
> - rcu_read_lock();
> - list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
> + assert_spin_locked(&port->addrs_lock);
> +
> + list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
What protects the `ipvlans` list here? I think the RCU lock is still needed.
> if (ipvlan_find_addr(ipvlan, iaddr, is_v6)) {
> ret = true;
> break;
> }
> }
> - rcu_read_unlock();
> return ret;
> }
>
> diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
> index 660f3db11766..b0b4f747f162 100644
> --- a/drivers/net/ipvlan/ipvlan_main.c
> +++ b/drivers/net/ipvlan/ipvlan_main.c
> @@ -75,6 +75,7 @@ static int ipvlan_port_create(struct net_device *dev)
> for (idx = 0; idx < IPVLAN_HASH_SIZE; idx++)
> INIT_HLIST_HEAD(&port->hlhead[idx]);
>
> + spin_lock_init(&port->addrs_lock);
> skb_queue_head_init(&port->backlog);
> INIT_WORK(&port->wq, ipvlan_process_multicast);
> ida_init(&port->ida);
> @@ -181,18 +182,18 @@ static void ipvlan_uninit(struct net_device *dev)
> static int ipvlan_open(struct net_device *dev)
> {
> struct ipvl_dev *ipvlan = netdev_priv(dev);
> + struct ipvl_port *port = ipvlan->port;
> struct ipvl_addr *addr;
>
> - if (ipvlan->port->mode == IPVLAN_MODE_L3 ||
> - ipvlan->port->mode == IPVLAN_MODE_L3S)
> + if (port->mode == IPVLAN_MODE_L3 || port->mode == IPVLAN_MODE_L3S)
> dev->flags |= IFF_NOARP;
> else
> dev->flags &= ~IFF_NOARP;
Please omit unrelated formatting changes, this fix is already quite big
as is.
Please include the paired self-test in the next iteration (as noted by
Simon self-test can be included into 'net' series, too), thanks!
Paolo
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2025-12-23 10:14 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-12-15 16:54 [PATCH net] ipvlan: Make the addrs_lock be per port Dmitry Skorodumov
2025-12-15 17:00 ` Dmitry Skorodumov
2025-12-16 11:17 ` Simon Horman
2025-12-23 10:13 ` Paolo Abeni
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).