From: "Kaigai Kohei" <kaigai@ak.jp.nec.com>
To: "Stephen Smalley" <sds@epoch.ncsc.mil>
Cc: "SELinux-ML(Eng)" <selinux@tycho.nsa.gov>,
"Linux Kernel ML(Eng)" <linux-kernel@vger.kernel.org>,
"James Morris" <jmorris@redhat.com>
Subject: Re: RCU issue with SELinux (Re: SELINUX performance issues)
Date: Tue, 24 Aug 2004 16:25:32 +0900 [thread overview]
Message-ID: <042b01c489ab$8a871ce0$f97d220a@linux.bs1.fc.nec.co.jp> (raw)
In-Reply-To: 1093014789.16585.186.camel@moss-spartans.epoch.ncsc.mil
[-- Attachment #1: Type: text/plain, Size: 2027 bytes --]
Hi Stephen, Thanks for your comments.
> I'm not overly familiar with RCU myself, but the comments in list.h for
> list_add_rcu suggest that you still need to hold a lock to avoid racing
> with another list_add_rcu or list_del_rcu call on the same list. But
> avc_insert is calling list_add_rcu without holding any lock; can't it
> race with another avc_insert on the same hash bucket? Do I just
> misunderstand, or is this unsafe? Thanks for clarifying.
You are right. Indeed, the lock for hash bucket is also necessary
when avc_insert() is called. I fixed them.
> I think we can likely eliminate the mutation of the node in the
> !selinux_enforcing case in avc_has_perm_noaudit, i.e. eliminate the
> entire else clause and just fall through with rc still 0. Adding the
> requested permissions to the node was simply to avoid flooding denials
> in permissive mode on the same permission check, but this can be
> addressed separately using the audit ratelimit mechanism.
I have another opinion.
This simple mechanism against the flood of audit log is necessary,
because it prevents the depletion of the system log buffer and denied log
all over the console when we are debugging the security policy in permissive mode.
So, I improved the avc_update_node() function and avc_node data structure.
It does not need kmalloc() when avc_update_node().
This approach is good for durability and compatibility of original implementation,
but double area of avc_nodes is needed for updating without kmalloc().
This approach can apply to any kinds of updating of avc_entry.
This idea is pretty complexer, though.
I modified the following points:
- We hold the lock for hash backet when avc_insert() and avc_ss_reset() are
called for safety.
- list_for_each_rcu() and list_entry() are replaced by list_for_entry().
- avc_node_dual structure which contains two avc_node objects is defined.
It allows to do avc_update_node() without kmalloc() or any locks.
Any comments please. Thanks.
--------
Kai Gai <kaigai@ak.jp.nec.com>
[-- Attachment #2: list_replace_rcu-2.6.8.1.patch --]
[-- Type: application/octet-stream, Size: 845 bytes --]
--- linux-2.6.8.1/include/linux/list.h 2004-08-14 19:55:33.000000000 +0900
+++ linux-2.6.8.1.rcu/include/linux/list.h 2004-08-20 18:04:10.000000000 +0900
@@ -194,8 +194,23 @@
__list_del(entry->prev, entry->next);
entry->prev = LIST_POISON2;
}
+/*
+ * list_replace_rcu - replace old entry by new onw from list
+ * @old : the element to be replaced from the list.
+ * @new : the new element to insert to the list.
+ *
+ * The old entry will be replaced to the new entry atomically.
+ */
+static inline void list_replace_rcu(struct list_head *old, struct list_head *new){
+ new->next = old->next;
+ new->prev = old->prev;
+ smp_wmb();
+ new->next->prev = new;
+ new->prev->next = new;
+}
+
/**
* list_del_init - deletes entry from list and reinitialize it.
* @entry: the element to delete from the list.
*/
[-- Attachment #3: selinux.rcu-2.6.8.1-take2.patch --]
[-- Type: application/octet-stream, Size: 20952 bytes --]
diff -rNU4 linux-2.6.8.1/security/selinux/avc.c linux-2.6.8.1.rcu/security/selinux/avc.c
--- linux-2.6.8.1/security/selinux/avc.c 2004-08-14 19:55:48.000000000 +0900
+++ linux-2.6.8.1.rcu/security/selinux/avc.c 2004-08-24 13:30:56.000000000 +0900
@@ -35,28 +35,40 @@
#include "av_perm_to_string.h"
#include "objsec.h"
#define AVC_CACHE_SLOTS 512
-#define AVC_CACHE_MAXNODES 410
+#define AVC_CACHE_THRESHOLD 410
+#define AVC_CACHE_RECLAIM 16
struct avc_entry {
u32 ssid;
u32 tsid;
u16 tclass;
struct av_decision avd;
- int used; /* used recently */
+ atomic_t used; /* used recently */
};
+struct avc_node_dual;
struct avc_node {
+ struct list_head list;
struct avc_entry ae;
- struct avc_node *next;
+ struct avc_node *reserved;
+ struct avc_node_dual *container;
+};
+
+/* dual avc_node is necessary for update */
+struct avc_node_dual {
+ struct rcu_head rhead;
+ struct avc_node node[2];
};
struct avc_cache {
- struct avc_node *slots[AVC_CACHE_SLOTS];
- u32 lru_hint; /* LRU hint for reclaim scan */
- u32 active_nodes;
- u32 latest_notif; /* latest revocation notification */
+ struct list_head slots[AVC_CACHE_SLOTS];
+ spinlock_t slots_lock[AVC_CACHE_SLOTS];
+ /* lock for insert/update/delete and reset */
+ atomic_t lru_hint; /* LRU hint for reclaim scan */
+ atomic_t active_nodes;
+ u32 latest_notif; /* latest revocation notification */
};
struct avc_callback_node {
int (*callback) (u32 event, u32 ssid, u32 tsid,
@@ -69,10 +81,8 @@
u32 perms;
struct avc_callback_node *next;
};
-static spinlock_t avc_lock = SPIN_LOCK_UNLOCKED;
-static struct avc_node *avc_node_freelist;
static struct avc_cache avc_cache;
static unsigned avc_cache_stats[AVC_NSTATS];
static struct avc_callback_node *avc_callbacks;
@@ -187,52 +197,44 @@
* Initialize the access vector cache.
*/
void __init avc_init(void)
{
- struct avc_node *new;
int i;
- for (i = 0; i < AVC_CACHE_MAXNODES; i++) {
- new = kmalloc(sizeof(*new), GFP_ATOMIC);
- if (!new) {
- printk(KERN_WARNING "avc: only able to allocate "
- "%d entries\n", i);
- break;
- }
- memset(new, 0, sizeof(*new));
- new->next = avc_node_freelist;
- avc_node_freelist = new;
- }
-
+ for (i =0; i < AVC_CACHE_SLOTS; i++) {
+ INIT_LIST_HEAD(&avc_cache.slots[i]);
+ avc_cache.slots_lock[i] = SPIN_LOCK_UNLOCKED;
+ }
+ atomic_set(&avc_cache.active_nodes, 0);
+ atomic_set(&avc_cache.lru_hint, 0);
+
audit_log(current->audit_context, "AVC INITIALIZED\n");
}
#if 0
static void avc_hash_eval(char *tag)
{
int i, chain_len, max_chain_len, slots_used;
struct avc_node *node;
+ struct list_head *pos;
unsigned long flags;
- spin_lock_irqsave(&avc_lock,flags);
+ rcu_read_lock();
slots_used = 0;
max_chain_len = 0;
for (i = 0; i < AVC_CACHE_SLOTS; i++) {
- node = avc_cache.slots[i];
- if (node) {
+ if (!list_empty(&avc_cache.slots[i])) {
slots_used++;
chain_len = 0;
- while (node) {
+ list_for_each_rcu(pos, &avc_cache.slots[i])
chain_len++;
- node = node->next;
- }
if (chain_len > max_chain_len)
max_chain_len = chain_len;
}
}
- spin_unlock_irqrestore(&avc_lock,flags);
+ rcu_read_unlock();
printk(KERN_INFO "\n");
printk(KERN_INFO "%s avc: %d entries and %d/%d buckets used, longest "
"chain length %d\n", tag, avc_cache.active_nodes, slots_used,
@@ -242,188 +244,208 @@
static inline void avc_hash_eval(char *tag)
{ }
#endif
-static inline struct avc_node *avc_reclaim_node(void)
-{
- struct avc_node *prev, *cur;
- int hvalue, try;
+static void avc_node_free(struct rcu_head *rhead) {
+ struct avc_node_dual *dual_node;
+ dual_node = container_of(rhead, struct avc_node_dual, rhead);
+ kfree(dual_node);
+}
- hvalue = avc_cache.lru_hint;
- for (try = 0; try < 2; try++) {
- do {
- prev = NULL;
- cur = avc_cache.slots[hvalue];
- while (cur) {
- if (!cur->ae.used)
- goto found;
+static inline void avc_reclaim_node(void)
+{
+ struct avc_node *node;
+ int hvalue, try, ecx;
- cur->ae.used = 0;
+ for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++ ) {
+ hvalue = atomic_inc_return(&avc_cache.lru_hint) % AVC_CACHE_SLOTS;
- prev = cur;
- cur = cur->next;
+ if (spin_trylock(&avc_cache.slots_lock[hvalue])) {
+ list_for_each_entry(node, &avc_cache.slots[hvalue], list) {
+ if (!atomic_dec_and_test(&node->ae.used)) {
+ /* Recently Unused */
+ list_del_rcu(&node->list);
+ call_rcu(&node->container->rhead, avc_node_free);
+ atomic_dec(&avc_cache.active_nodes);
+ ecx++;
+ if (ecx >= AVC_CACHE_RECLAIM) {
+ spin_unlock(&avc_cache.slots_lock[hvalue]);
+ goto out;
+ }
+ }
}
- hvalue = (hvalue + 1) & (AVC_CACHE_SLOTS - 1);
- } while (hvalue != avc_cache.lru_hint);
+ spin_unlock(&avc_cache.slots_lock[hvalue]);
+ }
}
-
- panic("avc_reclaim_node");
-
-found:
- avc_cache.lru_hint = hvalue;
-
- if (prev == NULL)
- avc_cache.slots[hvalue] = cur->next;
- else
- prev->next = cur->next;
-
- return cur;
+out:
+ return;
}
-static inline struct avc_node *avc_claim_node(u32 ssid,
- u32 tsid, u16 tclass)
+static inline struct avc_node *avc_get_node(void)
{
- struct avc_node *new;
- int hvalue;
+ struct avc_node_dual *new;
+ int actives;
- hvalue = avc_hash(ssid, tsid, tclass);
- if (avc_node_freelist) {
- new = avc_node_freelist;
- avc_node_freelist = avc_node_freelist->next;
- avc_cache.active_nodes++;
- } else {
- new = avc_reclaim_node();
- if (!new)
- goto out;
- }
+ new = kmalloc(sizeof(struct avc_node_dual), GFP_ATOMIC);
+ if (!new)
+ return NULL;
- new->ae.used = 1;
- new->ae.ssid = ssid;
- new->ae.tsid = tsid;
- new->ae.tclass = tclass;
- new->next = avc_cache.slots[hvalue];
- avc_cache.slots[hvalue] = new;
+ INIT_RCU_HEAD(&new->rhead);
+ INIT_LIST_HEAD(&new->node[0].list);
+ INIT_LIST_HEAD(&new->node[1].list);
+ new->node[0].container = new;
+ new->node[1].container = new;
+ new->node[0].reserved = &new->node[1];
+ new->node[1].reserved = &new->node[0];
-out:
- return new;
+ actives = atomic_inc_return(&avc_cache.active_nodes);
+ if (actives > AVC_CACHE_THRESHOLD)
+ avc_reclaim_node();
+
+ return &new->node[0];
}
static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid,
u16 tclass, int *probes)
{
- struct avc_node *cur;
+ struct avc_node *node, *ret = NULL;
int hvalue;
int tprobes = 1;
hvalue = avc_hash(ssid, tsid, tclass);
- cur = avc_cache.slots[hvalue];
- while (cur != NULL &&
- (ssid != cur->ae.ssid ||
- tclass != cur->ae.tclass ||
- tsid != cur->ae.tsid)) {
+ list_for_each_entry(node, &avc_cache.slots[hvalue], list) {
+ if (ssid == node->ae.ssid &&
+ tclass == node->ae.tclass &&
+ tsid == node->ae.tsid) {
+ ret = node;
+ break;
+ }
tprobes++;
- cur = cur->next;
}
- if (cur == NULL) {
+ if (ret == NULL) {
/* cache miss */
goto out;
}
/* cache hit */
if (probes)
*probes = tprobes;
-
- cur->ae.used = 1;
-
+ if (atomic_read(&ret->ae.used) != 1)
+ atomic_set(&ret->ae.used, 1);
out:
- return cur;
+ return ret;
}
/**
* avc_lookup - Look up an AVC entry.
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
* @requested: requested permissions, interpreted based on @tclass
- * @aeref: AVC entry reference
*
* Look up an AVC entry that is valid for the
* @requested permissions between the SID pair
* (@ssid, @tsid), interpreting the permissions
* based on @tclass. If a valid AVC entry exists,
- * then this function updates @aeref to refer to the
- * entry and returns %0. Otherwise, this function
- * returns -%ENOENT.
+ * then this function return the avc_node.
+ * Otherwise, this function returns NULL.
*/
-int avc_lookup(u32 ssid, u32 tsid, u16 tclass,
- u32 requested, struct avc_entry_ref *aeref)
+struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass, u32 requested)
{
struct avc_node *node;
- int probes, rc = 0;
+ int probes;
avc_cache_stats_incr(AVC_CAV_LOOKUPS);
node = avc_search_node(ssid, tsid, tclass,&probes);
if (node && ((node->ae.avd.decided & requested) == requested)) {
avc_cache_stats_incr(AVC_CAV_HITS);
avc_cache_stats_add(AVC_CAV_PROBES,probes);
- aeref->ae = &node->ae;
goto out;
}
avc_cache_stats_incr(AVC_CAV_MISSES);
- rc = -ENOENT;
out:
- return rc;
+ return node;
+}
+
+static int avc_latest_notif_update(int seqno, int is_insert)
+{
+ int ret = 0;
+ static spinlock_t notif_lock = SPIN_LOCK_UNLOCKED;
+
+ spin_lock(¬if_lock);
+ if (seqno < avc_cache.latest_notif) {
+ if (is_insert) {
+ printk(KERN_WARNING "avc: seqno %d < latest_notif %d\n",
+ seqno, avc_cache.latest_notif);
+ ret = -EAGAIN;
+ } else {
+ avc_cache.latest_notif = seqno;
+ }
+ }
+ spin_unlock(¬if_lock);
+ return ret;
}
/**
* avc_insert - Insert an AVC entry.
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
* @ae: AVC entry
- * @aeref: AVC entry reference
*
* Insert an AVC entry for the SID pair
* (@ssid, @tsid) and class @tclass.
* The access vectors and the sequence number are
* normally provided by the security server in
* response to a security_compute_av() call. If the
* sequence number @ae->avd.seqno is not less than the latest
* revocation notification, then the function copies
- * the access vectors into a cache entry, updates
- * @aeref to refer to the entry, and returns %0.
- * Otherwise, this function returns -%EAGAIN.
+ * the access vectors into a cache entry, returns
+ * avc_node inserted. Otherwise, this function returns NULL.
*/
-int avc_insert(u32 ssid, u32 tsid, u16 tclass,
- struct avc_entry *ae, struct avc_entry_ref *aeref)
+struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct avc_entry *ae)
{
- struct avc_node *node;
- int rc = 0;
+ struct avc_node *pos, *node = NULL;
+ int hvalue;
- if (ae->avd.seqno < avc_cache.latest_notif) {
- printk(KERN_WARNING "avc: seqno %d < latest_notif %d\n",
- ae->avd.seqno, avc_cache.latest_notif);
- rc = -EAGAIN;
+ if (avc_latest_notif_update(ae->avd.seqno, 1))
goto out;
- }
- node = avc_claim_node(ssid, tsid, tclass);
- if (!node) {
- rc = -ENOMEM;
- goto out;
- }
+ node = avc_get_node();
- node->ae.avd.allowed = ae->avd.allowed;
- node->ae.avd.decided = ae->avd.decided;
- node->ae.avd.auditallow = ae->avd.auditallow;
- node->ae.avd.auditdeny = ae->avd.auditdeny;
- node->ae.avd.seqno = ae->avd.seqno;
- aeref->ae = &node->ae;
+ if (node) {
+ hvalue = avc_hash(ssid, tsid, tclass);
+
+ node->ae.ssid = ssid;
+ node->ae.tsid = tsid;
+ node->ae.tclass = tclass;
+ atomic_set(&node->ae.used, 1);
+
+ node->ae.avd.allowed = ae->avd.allowed;
+ node->ae.avd.decided = ae->avd.decided;
+ node->ae.avd.auditallow = ae->avd.auditallow;
+ node->ae.avd.auditdeny = ae->avd.auditdeny;
+ node->ae.avd.seqno = ae->avd.seqno;
+
+ spin_lock(&avc_cache.slots_lock[hvalue]);
+ list_for_each_entry(pos, &avc_cache.slots[hvalue], list){
+ if( pos->ae.ssid == ssid &&
+ pos->ae.tsid == tsid &&
+ pos->ae.tclass == tclass ){
+ atomic_dec(&avc_cache.active_nodes);
+ kfree(node->container);
+ goto duplicate;
+ }
+ }
+ list_add_rcu(&node->list, &avc_cache.slots[hvalue]);
+duplicate:
+ spin_unlock(&avc_cache.slots_lock[hvalue]);
+ }
out:
- return rc;
+ return node;
}
static inline void avc_print_ipv6_addr(struct audit_buffer *ab,
struct in6_addr *addr, u16 port,
@@ -685,10 +707,32 @@
{
return (x == y || x == SECSID_WILD || y == SECSID_WILD);
}
-static inline void avc_update_node(u32 event, struct avc_node *node, u32 perms)
+static void avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass)
{
+ int hvalue;
+ struct avc_node *pos, *node, *org = NULL;
+
+ /* Lock the target slot */
+ hvalue = avc_hash(ssid, tsid, tclass);
+ spin_lock(&avc_cache.slots_lock[hvalue]);
+
+ list_for_each_entry(pos, &avc_cache.slots[hvalue], list){
+ if ( ssid==pos->ae.ssid &&
+ tsid==pos->ae.tsid &&
+ tclass==pos->ae.tclass ){
+ org = pos;
+ break;
+ }
+ }
+
+ if (!org)
+ goto out;
+
+ node = org->reserved;
+ /* Duplicate and Update */
+ memcpy(node, org, sizeof(struct avc_node));
switch (event) {
case AVC_CALLBACK_GRANT:
node->ae.avd.allowed |= perms;
break;
@@ -708,40 +752,42 @@
case AVC_CALLBACK_AUDITDENY_DISABLE:
node->ae.avd.auditdeny &= ~perms;
break;
}
+ list_replace_rcu(&org->list,&node->list);
+out:
+ spin_unlock(&avc_cache.slots_lock[hvalue]);
}
static int avc_update_cache(u32 event, u32 ssid, u32 tsid,
u16 tclass, u32 perms)
{
struct avc_node *node;
int i;
- unsigned long flags;
- spin_lock_irqsave(&avc_lock,flags);
+ rcu_read_lock();
if (ssid == SECSID_WILD || tsid == SECSID_WILD) {
/* apply to all matching nodes */
for (i = 0; i < AVC_CACHE_SLOTS; i++) {
- for (node = avc_cache.slots[i]; node;
- node = node->next) {
+ list_for_each_entry(node, &avc_cache.slots[i], list) {
if (avc_sidcmp(ssid, node->ae.ssid) &&
avc_sidcmp(tsid, node->ae.tsid) &&
- tclass == node->ae.tclass) {
- avc_update_node(event,node,perms);
+ tclass == node->ae.tclass ) {
+ avc_update_node(event, perms, node->ae.ssid
+ ,node->ae.tsid, node->ae.tclass);
}
}
}
} else {
/* apply to one node */
node = avc_search_node(ssid, tsid, tclass, NULL);
if (node) {
- avc_update_node(event,node,perms);
+ avc_update_node(event, perms, ssid, tsid, tclass);
}
}
- spin_unlock_irqrestore(&avc_lock,flags);
+ rcu_read_unlock();
return 0;
}
@@ -751,9 +797,8 @@
{
struct avc_callback_node *c;
u32 tretained = 0, cretained = 0;
int rc = 0;
- unsigned long flags;
/*
* try_revoke only removes permissions from the cache
* state if they are not retained by the object manager.
@@ -786,12 +831,9 @@
avc_update_cache(event,ssid,tsid,tclass,perms);
*out_retained = tretained;
}
- spin_lock_irqsave(&avc_lock,flags);
- if (seqno > avc_cache.latest_notif)
- avc_cache.latest_notif = seqno;
- spin_unlock_irqrestore(&avc_lock,flags);
+ avc_latest_notif_update(seqno, 0);
out:
return rc;
}
@@ -856,34 +898,22 @@
int avc_ss_reset(u32 seqno)
{
struct avc_callback_node *c;
int i, rc = 0;
- struct avc_node *node, *tmp;
- unsigned long flags;
+ struct avc_node *node;
avc_hash_eval("reset");
- spin_lock_irqsave(&avc_lock,flags);
-
+ rcu_read_lock();
for (i = 0; i < AVC_CACHE_SLOTS; i++) {
- node = avc_cache.slots[i];
- while (node) {
- tmp = node;
- node = node->next;
- tmp->ae.ssid = tmp->ae.tsid = SECSID_NULL;
- tmp->ae.tclass = SECCLASS_NULL;
- tmp->ae.avd.allowed = tmp->ae.avd.decided = 0;
- tmp->ae.avd.auditallow = tmp->ae.avd.auditdeny = 0;
- tmp->ae.used = 0;
- tmp->next = avc_node_freelist;
- avc_node_freelist = tmp;
- avc_cache.active_nodes--;
+ spin_lock(&avc_cache.slots_lock[i]);
+ list_for_each_entry(node, &avc_cache.slots[i], list){
+ list_del_rcu(&node->list);
+ call_rcu(&node->container->rhead, avc_node_free);
}
- avc_cache.slots[i] = NULL;
+ spin_unlock(&avc_cache.slots_lock[i]);
}
- avc_cache.lru_hint = 0;
-
- spin_unlock_irqrestore(&avc_lock,flags);
+ rcu_read_unlock();
for (i = 0; i < AVC_NSTATS; i++)
avc_cache_stats[i] = 0;
@@ -895,12 +925,9 @@
goto out;
}
}
- spin_lock_irqsave(&avc_lock,flags);
- if (seqno > avc_cache.latest_notif)
- avc_cache.latest_notif = seqno;
- spin_unlock_irqrestore(&avc_lock,flags);
+ avc_latest_notif_update(seqno, 0);
out:
return rc;
}
@@ -949,9 +976,9 @@
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
* @requested: requested permissions, interpreted based on @tclass
- * @aeref: AVC entry reference
+ * @aeref: AVC entry reference(not in use)
* @avd: access vector decisions
*
* Check the AVC to determine whether the @requested permissions are granted
* for the SID pair (@ssid, @tsid), interpreting the permissions
@@ -968,72 +995,46 @@
int avc_has_perm_noaudit(u32 ssid, u32 tsid,
u16 tclass, u32 requested,
struct avc_entry_ref *aeref, struct av_decision *avd)
{
- struct avc_entry *ae;
- int rc = 0;
- unsigned long flags;
+ struct avc_node *node;
struct avc_entry entry;
+ int rc = 0;
u32 denied;
- struct avc_entry_ref ref;
-
- if (!aeref) {
- avc_entry_ref_init(&ref);
- aeref = &ref;
- }
- spin_lock_irqsave(&avc_lock, flags);
+ rcu_read_lock();
avc_cache_stats_incr(AVC_ENTRY_LOOKUPS);
- ae = aeref->ae;
- if (ae) {
- if (ae->ssid == ssid &&
- ae->tsid == tsid &&
- ae->tclass == tclass &&
- ((ae->avd.decided & requested) == requested)) {
- avc_cache_stats_incr(AVC_ENTRY_HITS);
- ae->used = 1;
- } else {
- avc_cache_stats_incr(AVC_ENTRY_DISCARDS);
- ae = NULL;
- }
- }
- if (!ae) {
- avc_cache_stats_incr(AVC_ENTRY_MISSES);
- rc = avc_lookup(ssid, tsid, tclass, requested, aeref);
- if (rc) {
- spin_unlock_irqrestore(&avc_lock,flags);
- rc = security_compute_av(ssid,tsid,tclass,requested,&entry.avd);
- if (rc)
- goto out;
- spin_lock_irqsave(&avc_lock, flags);
- rc = avc_insert(ssid,tsid,tclass,&entry,aeref);
- if (rc) {
- spin_unlock_irqrestore(&avc_lock,flags);
- goto out;
- }
+ node = avc_lookup(ssid, tsid, tclass, requested);
+ if (!node) {
+ rcu_read_unlock();
+ rc = security_compute_av(ssid,tsid,tclass,requested,&entry.avd);
+ if (rc)
+ goto out;
+ rcu_read_lock();
+ node = avc_insert(ssid,tsid,tclass,&entry);
+ if (!node) {
+ rc = -ENOMEM;
+ rcu_read_unlock();
+ goto out;
}
- ae = aeref->ae;
}
-
if (avd)
- memcpy(avd, &ae->avd, sizeof(*avd));
+ memcpy(avd, &node->ae.avd, sizeof(*avd));
- denied = requested & ~(ae->avd.allowed);
+ denied = requested & ~(node->ae.avd.allowed);
if (!requested || denied) {
if (selinux_enforcing) {
- spin_unlock_irqrestore(&avc_lock,flags);
rc = -EACCES;
- goto out;
} else {
- ae->avd.allowed |= requested;
- spin_unlock_irqrestore(&avc_lock,flags);
- goto out;
+ if (node->ae.avd.allowed != (node->ae.avd.allowed|requested))
+ avc_update_node(AVC_CALLBACK_GRANT
+ ,requested,ssid,tsid,tclass);
}
}
- spin_unlock_irqrestore(&avc_lock,flags);
+ rcu_read_unlock();
out:
return rc;
}
@@ -1042,9 +1043,9 @@
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
* @requested: requested permissions, interpreted based on @tclass
- * @aeref: AVC entry reference
+ * @aeref: AVC entry reference(not in use)
* @auditdata: auxiliary audit data
*
* Check the AVC to determine whether the @requested permissions are granted
* for the SID pair (@ssid, @tsid), interpreting the permissions
@@ -1061,8 +1062,8 @@
{
struct av_decision avd;
int rc;
- rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, aeref, &avd);
+ rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, NULL, &avd);
avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata);
return rc;
}
diff -rNU4 linux-2.6.8.1/security/selinux/include/avc.h linux-2.6.8.1.rcu/security/selinux/include/avc.h
--- linux-2.6.8.1/security/selinux/include/avc.h 2004-08-14 19:54:51.000000000 +0900
+++ linux-2.6.8.1.rcu/security/selinux/include/avc.h 2004-08-20 20:40:50.000000000 +0900
@@ -118,13 +118,11 @@
*/
void __init avc_init(void);
-int avc_lookup(u32 ssid, u32 tsid, u16 tclass,
- u32 requested, struct avc_entry_ref *aeref);
+struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass, u32 requested);
-int avc_insert(u32 ssid, u32 tsid, u16 tclass,
- struct avc_entry *ae, struct avc_entry_ref *out_aeref);
+struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct avc_entry *ae);
void avc_audit(u32 ssid, u32 tsid,
u16 tclass, u32 requested,
struct av_decision *avd, int result, struct avc_audit_data *auditdata);
next prev parent reply other threads:[~2004-08-24 7:25 UTC|newest]
Thread overview: 56+ messages / expand[flat|nested] mbox.gz Atom feed top
2004-08-16 9:33 RCU issue with SELinux (Re: SELINUX performance issues) Kaigai Kohei
2004-08-16 9:33 ` Kaigai Kohei
2004-08-16 15:19 ` James Morris
2004-08-16 15:19 ` James Morris
2004-08-20 13:36 ` Kaigai Kohei
2004-08-20 14:53 ` James Morris
2004-08-20 14:53 ` James Morris
2004-08-24 7:27 ` Kaigai Kohei
2004-08-24 7:27 ` Kaigai Kohei
2004-08-24 13:24 ` James Morris
2004-08-24 13:24 ` James Morris
2004-08-25 9:51 ` Kaigai Kohei
2004-08-25 9:51 ` Kaigai Kohei
2004-08-25 18:31 ` James Morris
2004-08-25 18:31 ` James Morris
2004-08-25 9:52 ` [PATCH]atomic_inc_return() for i386/x86_64 (Re: RCU issue with SELinux) Kaigai Kohei
2004-08-20 17:31 ` RCU issue with SELinux (Re: SELINUX performance issues) Luke Kenneth Casson Leighton
2004-08-20 17:31 ` Luke Kenneth Casson Leighton
2004-08-20 18:15 ` James Morris
2004-08-20 18:15 ` James Morris
2004-08-20 20:19 ` Paul E. McKenney
2004-08-20 20:35 ` James Morris
2004-08-20 20:35 ` James Morris
2004-08-24 7:27 ` Kaigai Kohei
2004-08-24 7:27 ` Kaigai Kohei
[not found] ` <1093014789.16585.186.camel@moss-spartans.epoch.ncsc.mil>
2004-08-24 7:25 ` Kaigai Kohei [this message]
2004-08-24 15:37 ` Stephen Smalley
2004-08-24 15:37 ` Stephen Smalley
2004-08-25 9:51 ` Kaigai Kohei
2004-08-25 15:50 ` Stephen Smalley
2004-08-25 15:50 ` Stephen Smalley
2004-08-25 16:11 ` Stephen Smalley
2004-08-25 16:11 ` Stephen Smalley
2004-08-26 7:53 ` Kaigai Kohei
2004-08-26 7:53 ` Kaigai Kohei
2004-08-26 13:24 ` Stephen Smalley
2004-08-26 13:24 ` Stephen Smalley
2004-08-27 11:07 ` Kaigai Kohei
2004-08-27 11:07 ` Kaigai Kohei
2004-08-30 11:17 ` [PATCH]SELinux performance improvement by RCU (Re: RCU issue with SELinux) Kaigai Kohei
2004-08-30 15:35 ` Stephen Smalley
2004-08-30 15:35 ` Stephen Smalley
2004-08-30 16:13 ` Paul E. McKenney
2004-08-30 16:13 ` Paul E. McKenney
2004-08-31 4:33 ` Kaigai Kohei
2004-08-31 4:33 ` Kaigai Kohei
2004-08-31 16:20 ` Paul E. McKenney
2004-08-31 16:20 ` Paul E. McKenney
2004-08-31 15:33 ` James Morris
2004-08-31 15:33 ` James Morris
2004-08-24 23:02 ` RCU issue with SELinux (Re: SELINUX performance issues) Paul E. McKenney
2004-08-24 23:02 ` Paul E. McKenney
2004-08-25 9:51 ` Kaigai Kohei
2004-08-25 9:51 ` Kaigai Kohei
2004-08-25 17:34 ` Paul E. McKenney
2004-08-25 17:34 ` Paul E. McKenney
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to='042b01c489ab$8a871ce0$f97d220a@linux.bs1.fc.nec.co.jp' \
--to=kaigai@ak.jp.nec.com \
--cc=jmorris@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=sds@epoch.ncsc.mil \
--cc=selinux@tycho.nsa.gov \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.