Commit a639e7ca authored by Paul Moore's avatar Paul Moore Committed by James Morris

SELinux: Made netnode cache adds faster

When adding new entries to the network node cache we would walk the entire
hash bucket to make sure we didn't cross a threshold (done to bound the
cache size).  This isn't a very quick or elegant solution for something
which is supposed to be quick-ish so add a counter to each hash bucket to
track the size of the bucket and eliminate the need to walk the entire
bucket list on each add.
Signed-off-by: default avatarPaul Moore <paul.moore@hp.com>
Signed-off-by: default avatarJames Morris <jmorris@namei.org>
parent 7b41b173
...@@ -40,11 +40,17 @@ ...@@ -40,11 +40,17 @@
#include <net/ipv6.h> #include <net/ipv6.h>
#include <asm/bug.h> #include <asm/bug.h>
#include "netnode.h"
#include "objsec.h" #include "objsec.h"
#define SEL_NETNODE_HASH_SIZE 256 #define SEL_NETNODE_HASH_SIZE 256
#define SEL_NETNODE_HASH_BKT_LIMIT 16 #define SEL_NETNODE_HASH_BKT_LIMIT 16
struct sel_netnode_bkt {
unsigned int size;
struct list_head list;
};
struct sel_netnode { struct sel_netnode {
struct netnode_security_struct nsec; struct netnode_security_struct nsec;
...@@ -60,7 +66,7 @@ struct sel_netnode { ...@@ -60,7 +66,7 @@ struct sel_netnode {
static LIST_HEAD(sel_netnode_list); static LIST_HEAD(sel_netnode_list);
static DEFINE_SPINLOCK(sel_netnode_lock); static DEFINE_SPINLOCK(sel_netnode_lock);
static struct list_head sel_netnode_hash[SEL_NETNODE_HASH_SIZE]; static struct sel_netnode_bkt sel_netnode_hash[SEL_NETNODE_HASH_SIZE];
/** /**
* sel_netnode_free - Frees a node entry * sel_netnode_free - Frees a node entry
...@@ -87,7 +93,7 @@ static void sel_netnode_free(struct rcu_head *p) ...@@ -87,7 +93,7 @@ static void sel_netnode_free(struct rcu_head *p)
* the bucket number for the given IP address. * the bucket number for the given IP address.
* *
*/ */
static u32 sel_netnode_hashfn_ipv4(__be32 addr) static unsigned int sel_netnode_hashfn_ipv4(__be32 addr)
{ {
/* at some point we should determine if the mismatch in byte order /* at some point we should determine if the mismatch in byte order
* affects the hash function dramatically */ * affects the hash function dramatically */
...@@ -103,7 +109,7 @@ static u32 sel_netnode_hashfn_ipv4(__be32 addr) ...@@ -103,7 +109,7 @@ static u32 sel_netnode_hashfn_ipv4(__be32 addr)
* the bucket number for the given IP address. * the bucket number for the given IP address.
* *
*/ */
static u32 sel_netnode_hashfn_ipv6(const struct in6_addr *addr) static unsigned int sel_netnode_hashfn_ipv6(const struct in6_addr *addr)
{ {
/* just hash the least significant 32 bits to keep things fast (they /* just hash the least significant 32 bits to keep things fast (they
* are the most likely to be different anyway), we can revisit this * are the most likely to be different anyway), we can revisit this
...@@ -123,7 +129,7 @@ static u32 sel_netnode_hashfn_ipv6(const struct in6_addr *addr) ...@@ -123,7 +129,7 @@ static u32 sel_netnode_hashfn_ipv6(const struct in6_addr *addr)
*/ */
static struct sel_netnode *sel_netnode_find(const void *addr, u16 family) static struct sel_netnode *sel_netnode_find(const void *addr, u16 family)
{ {
u32 idx; unsigned int idx;
struct sel_netnode *node; struct sel_netnode *node;
switch (family) { switch (family) {
...@@ -137,7 +143,7 @@ static struct sel_netnode *sel_netnode_find(const void *addr, u16 family) ...@@ -137,7 +143,7 @@ static struct sel_netnode *sel_netnode_find(const void *addr, u16 family)
BUG(); BUG();
} }
list_for_each_entry_rcu(node, &sel_netnode_hash[idx], list) list_for_each_entry_rcu(node, &sel_netnode_hash[idx].list, list)
if (node->nsec.family == family) if (node->nsec.family == family)
switch (family) { switch (family) {
case PF_INET: case PF_INET:
...@@ -159,15 +165,12 @@ static struct sel_netnode *sel_netnode_find(const void *addr, u16 family) ...@@ -159,15 +165,12 @@ static struct sel_netnode *sel_netnode_find(const void *addr, u16 family)
* @node: the new node record * @node: the new node record
* *
* Description: * Description:
* Add a new node record to the network address hash table. Returns zero on * Add a new node record to the network address hash table.
* success, negative values on failure.
* *
*/ */
static int sel_netnode_insert(struct sel_netnode *node) static void sel_netnode_insert(struct sel_netnode *node)
{ {
u32 idx; unsigned int idx;
u32 count = 0;
struct sel_netnode *iter;
switch (node->nsec.family) { switch (node->nsec.family) {
case PF_INET: case PF_INET:
...@@ -179,32 +182,21 @@ static int sel_netnode_insert(struct sel_netnode *node) ...@@ -179,32 +182,21 @@ static int sel_netnode_insert(struct sel_netnode *node)
default: default:
BUG(); BUG();
} }
list_add_rcu(&node->list, &sel_netnode_hash[idx]);
INIT_RCU_HEAD(&node->rcu);
/* we need to impose a limit on the growth of the hash table so check /* we need to impose a limit on the growth of the hash table so check
* this bucket to make sure it is within the specified bounds */ * this bucket to make sure it is within the specified bounds */
list_for_each_entry(iter, &sel_netnode_hash[idx], list) list_add_rcu(&node->list, &sel_netnode_hash[idx].list);
if (++count > SEL_NETNODE_HASH_BKT_LIMIT) { if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) {
list_del_rcu(&iter->list); struct sel_netnode *tail;
call_rcu(&iter->rcu, sel_netnode_free); tail = list_entry(
break; rcu_dereference(sel_netnode_hash[idx].list.prev),
} struct sel_netnode, list);
list_del_rcu(&tail->list);
return 0; call_rcu(&tail->rcu, sel_netnode_free);
} } else
sel_netnode_hash[idx].size++;
/**
* sel_netnode_destroy - Remove a node record from the table
* @node: the existing node record
*
* Description:
* Remove an existing node record from the network address table.
*
*/
static void sel_netnode_destroy(struct sel_netnode *node)
{
list_del_rcu(&node->list);
call_rcu(&node->rcu, sel_netnode_free);
} }
/** /**
...@@ -222,7 +214,7 @@ static void sel_netnode_destroy(struct sel_netnode *node) ...@@ -222,7 +214,7 @@ static void sel_netnode_destroy(struct sel_netnode *node)
*/ */
static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid) static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
{ {
int ret; int ret = -ENOMEM;
struct sel_netnode *node; struct sel_netnode *node;
struct sel_netnode *new = NULL; struct sel_netnode *new = NULL;
...@@ -230,25 +222,21 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid) ...@@ -230,25 +222,21 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
node = sel_netnode_find(addr, family); node = sel_netnode_find(addr, family);
if (node != NULL) { if (node != NULL) {
*sid = node->nsec.sid; *sid = node->nsec.sid;
ret = 0; spin_unlock_bh(&sel_netnode_lock);
goto out; return 0;
} }
new = kzalloc(sizeof(*new), GFP_ATOMIC); new = kzalloc(sizeof(*new), GFP_ATOMIC);
if (new == NULL) { if (new == NULL)
ret = -ENOMEM;
goto out; goto out;
}
switch (family) { switch (family) {
case PF_INET: case PF_INET:
ret = security_node_sid(PF_INET, ret = security_node_sid(PF_INET,
addr, sizeof(struct in_addr), addr, sizeof(struct in_addr), sid);
&new->nsec.sid);
new->nsec.addr.ipv4 = *(__be32 *)addr; new->nsec.addr.ipv4 = *(__be32 *)addr;
break; break;
case PF_INET6: case PF_INET6:
ret = security_node_sid(PF_INET6, ret = security_node_sid(PF_INET6,
addr, sizeof(struct in6_addr), addr, sizeof(struct in6_addr), sid);
&new->nsec.sid);
ipv6_addr_copy(&new->nsec.addr.ipv6, addr); ipv6_addr_copy(&new->nsec.addr.ipv6, addr);
break; break;
default: default:
...@@ -256,11 +244,10 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid) ...@@ -256,11 +244,10 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
} }
if (ret != 0) if (ret != 0)
goto out; goto out;
new->nsec.family = family; new->nsec.family = family;
ret = sel_netnode_insert(new); new->nsec.sid = *sid;
if (ret != 0) sel_netnode_insert(new);
goto out;
*sid = new->nsec.sid;
out: out:
spin_unlock_bh(&sel_netnode_lock); spin_unlock_bh(&sel_netnode_lock);
...@@ -312,13 +299,18 @@ int sel_netnode_sid(void *addr, u16 family, u32 *sid) ...@@ -312,13 +299,18 @@ int sel_netnode_sid(void *addr, u16 family, u32 *sid)
*/ */
static void sel_netnode_flush(void) static void sel_netnode_flush(void)
{ {
u32 idx; unsigned int idx;
struct sel_netnode *node; struct sel_netnode *node, *node_tmp;
spin_lock_bh(&sel_netnode_lock); spin_lock_bh(&sel_netnode_lock);
for (idx = 0; idx < SEL_NETNODE_HASH_SIZE; idx++) for (idx = 0; idx < SEL_NETNODE_HASH_SIZE; idx++) {
list_for_each_entry(node, &sel_netnode_hash[idx], list) list_for_each_entry_safe(node, node_tmp,
sel_netnode_destroy(node); &sel_netnode_hash[idx].list, list) {
list_del_rcu(&node->list);
call_rcu(&node->rcu, sel_netnode_free);
}
sel_netnode_hash[idx].size = 0;
}
spin_unlock_bh(&sel_netnode_lock); spin_unlock_bh(&sel_netnode_lock);
} }
...@@ -340,8 +332,10 @@ static __init int sel_netnode_init(void) ...@@ -340,8 +332,10 @@ static __init int sel_netnode_init(void)
if (!selinux_enabled) if (!selinux_enabled)
return 0; return 0;
for (iter = 0; iter < SEL_NETNODE_HASH_SIZE; iter++) for (iter = 0; iter < SEL_NETNODE_HASH_SIZE; iter++) {
INIT_LIST_HEAD(&sel_netnode_hash[iter]); INIT_LIST_HEAD(&sel_netnode_hash[iter].list);
sel_netnode_hash[iter].size = 0;
}
ret = avc_add_callback(sel_netnode_avc_callback, AVC_CALLBACK_RESET, ret = avc_add_callback(sel_netnode_avc_callback, AVC_CALLBACK_RESET,
SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0); SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment