Commit 53dad6d3 authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Linus Torvalds

ipc: fix race with LSMs

Currently, IPC mechanisms do security and auditing related checks under
RCU.  However, since security modules can free the security structure,
for example, through selinux_[sem,msg_queue,shm]_free_security(), we can
race if the structure is freed before other tasks are done with it,
creating a use-after-free condition.  Manfred illustrates this nicely,
for instance with shared mem and selinux:

 -> do_shmat calls rcu_read_lock()
 -> do_shmat calls shm_object_check().
     Checks that the object is still valid - but doesn't acquire any locks.
     Then it returns.
 -> do_shmat calls security_shm_shmat (e.g. selinux_shm_shmat)
 -> selinux_shm_shmat calls ipc_has_perm()
 -> ipc_has_perm accesses ipc_perms->security

shm_close()
 -> shm_close acquires rw_mutex & shm_lock
 -> shm_close calls shm_destroy
 -> shm_destroy calls security_shm_free (e.g. selinux_shm_free_security)
 -> selinux_shm_free_security calls ipc_free_security(&shp->shm_perm)
 -> ipc_free_security calls kfree(ipc_perms->security)

This patch delays the freeing of the security structures after all RCU
readers are done.  Furthermore it aligns the security life cycle with
that of the rest of IPC - freeing them based on the reference counter.
For situations where we need not free security, the current behavior is
kept.  Linus states:

 "... the old behavior was suspect for another reason too: having the
  security blob go away from under a user sounds like it could cause
  various other problems anyway, so I think the old code was at least
  _prone_ to bugs even if it didn't have catastrophic behavior."

I have tested this patch with IPC testcases from LTP on both my
quad-core laptop and on a 64 core NUMA server.  In both cases selinux is
enabled, and tests pass for both voluntary and forced preemption models.
While the mentioned races are theoretical (at least no one as reported
them), I wanted to make sure that this new logic doesn't break anything
we weren't aware of.
Suggested-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarDavidlohr Bueso <davidlohr@hp.com>
Acked-by: default avatarManfred Spraul <manfred@colorfullife.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4a10c2ac
...@@ -165,6 +165,15 @@ static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s) ...@@ -165,6 +165,15 @@ static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
ipc_rmid(&msg_ids(ns), &s->q_perm); ipc_rmid(&msg_ids(ns), &s->q_perm);
} }
static void msg_rcu_free(struct rcu_head *head)
{
struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
struct msg_queue *msq = ipc_rcu_to_struct(p);
security_msg_queue_free(msq);
ipc_rcu_free(head);
}
/** /**
* newque - Create a new msg queue * newque - Create a new msg queue
* @ns: namespace * @ns: namespace
...@@ -189,15 +198,14 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) ...@@ -189,15 +198,14 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
msq->q_perm.security = NULL; msq->q_perm.security = NULL;
retval = security_msg_queue_alloc(msq); retval = security_msg_queue_alloc(msq);
if (retval) { if (retval) {
ipc_rcu_putref(msq); ipc_rcu_putref(msq, ipc_rcu_free);
return retval; return retval;
} }
/* ipc_addid() locks msq upon success. */ /* ipc_addid() locks msq upon success. */
id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
if (id < 0) { if (id < 0) {
security_msg_queue_free(msq); ipc_rcu_putref(msq, msg_rcu_free);
ipc_rcu_putref(msq);
return id; return id;
} }
...@@ -276,8 +284,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) ...@@ -276,8 +284,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
free_msg(msg); free_msg(msg);
} }
atomic_sub(msq->q_cbytes, &ns->msg_bytes); atomic_sub(msq->q_cbytes, &ns->msg_bytes);
security_msg_queue_free(msq); ipc_rcu_putref(msq, msg_rcu_free);
ipc_rcu_putref(msq);
} }
/* /*
...@@ -717,7 +724,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, ...@@ -717,7 +724,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
rcu_read_lock(); rcu_read_lock();
ipc_lock_object(&msq->q_perm); ipc_lock_object(&msq->q_perm);
ipc_rcu_putref(msq); ipc_rcu_putref(msq, ipc_rcu_free);
if (msq->q_perm.deleted) { if (msq->q_perm.deleted) {
err = -EIDRM; err = -EIDRM;
goto out_unlock0; goto out_unlock0;
......
...@@ -243,6 +243,15 @@ static void merge_queues(struct sem_array *sma) ...@@ -243,6 +243,15 @@ static void merge_queues(struct sem_array *sma)
} }
} }
static void sem_rcu_free(struct rcu_head *head)
{
struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
struct sem_array *sma = ipc_rcu_to_struct(p);
security_sem_free(sma);
ipc_rcu_free(head);
}
/* /*
* If the request contains only one semaphore operation, and there are * If the request contains only one semaphore operation, and there are
* no complex transactions pending, lock only the semaphore involved. * no complex transactions pending, lock only the semaphore involved.
...@@ -374,12 +383,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns ...@@ -374,12 +383,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma) static inline void sem_lock_and_putref(struct sem_array *sma)
{ {
sem_lock(sma, NULL, -1); sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma); ipc_rcu_putref(sma, ipc_rcu_free);
}
static inline void sem_putref(struct sem_array *sma)
{
ipc_rcu_putref(sma);
} }
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
...@@ -458,14 +462,13 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) ...@@ -458,14 +462,13 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_perm.security = NULL; sma->sem_perm.security = NULL;
retval = security_sem_alloc(sma); retval = security_sem_alloc(sma);
if (retval) { if (retval) {
ipc_rcu_putref(sma); ipc_rcu_putref(sma, ipc_rcu_free);
return retval; return retval;
} }
id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
if (id < 0) { if (id < 0) {
security_sem_free(sma); ipc_rcu_putref(sma, sem_rcu_free);
ipc_rcu_putref(sma);
return id; return id;
} }
ns->used_sems += nsems; ns->used_sems += nsems;
...@@ -1047,8 +1050,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) ...@@ -1047,8 +1050,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
wake_up_sem_queue_do(&tasks); wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems; ns->used_sems -= sma->sem_nsems;
security_sem_free(sma); ipc_rcu_putref(sma, sem_rcu_free);
ipc_rcu_putref(sma);
} }
static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
...@@ -1292,7 +1294,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, ...@@ -1292,7 +1294,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
rcu_read_unlock(); rcu_read_unlock();
sem_io = ipc_alloc(sizeof(ushort)*nsems); sem_io = ipc_alloc(sizeof(ushort)*nsems);
if(sem_io == NULL) { if(sem_io == NULL) {
sem_putref(sma); ipc_rcu_putref(sma, ipc_rcu_free);
return -ENOMEM; return -ENOMEM;
} }
...@@ -1328,20 +1330,20 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, ...@@ -1328,20 +1330,20 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
if(nsems > SEMMSL_FAST) { if(nsems > SEMMSL_FAST) {
sem_io = ipc_alloc(sizeof(ushort)*nsems); sem_io = ipc_alloc(sizeof(ushort)*nsems);
if(sem_io == NULL) { if(sem_io == NULL) {
sem_putref(sma); ipc_rcu_putref(sma, ipc_rcu_free);
return -ENOMEM; return -ENOMEM;
} }
} }
if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) { if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) {
sem_putref(sma); ipc_rcu_putref(sma, ipc_rcu_free);
err = -EFAULT; err = -EFAULT;
goto out_free; goto out_free;
} }
for (i = 0; i < nsems; i++) { for (i = 0; i < nsems; i++) {
if (sem_io[i] > SEMVMX) { if (sem_io[i] > SEMVMX) {
sem_putref(sma); ipc_rcu_putref(sma, ipc_rcu_free);
err = -ERANGE; err = -ERANGE;
goto out_free; goto out_free;
} }
...@@ -1629,7 +1631,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) ...@@ -1629,7 +1631,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 2: allocate new undo structure */ /* step 2: allocate new undo structure */
new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
if (!new) { if (!new) {
sem_putref(sma); ipc_rcu_putref(sma, ipc_rcu_free);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
......
...@@ -167,6 +167,15 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) ...@@ -167,6 +167,15 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
ipc_lock_object(&ipcp->shm_perm); ipc_lock_object(&ipcp->shm_perm);
} }
static void shm_rcu_free(struct rcu_head *head)
{
struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
struct shmid_kernel *shp = ipc_rcu_to_struct(p);
security_shm_free(shp);
ipc_rcu_free(head);
}
static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
{ {
ipc_rmid(&shm_ids(ns), &s->shm_perm); ipc_rmid(&shm_ids(ns), &s->shm_perm);
...@@ -208,8 +217,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) ...@@ -208,8 +217,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
user_shm_unlock(file_inode(shp->shm_file)->i_size, user_shm_unlock(file_inode(shp->shm_file)->i_size,
shp->mlock_user); shp->mlock_user);
fput (shp->shm_file); fput (shp->shm_file);
security_shm_free(shp); ipc_rcu_putref(shp, shm_rcu_free);
ipc_rcu_putref(shp);
} }
/* /*
...@@ -497,7 +505,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) ...@@ -497,7 +505,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
shp->shm_perm.security = NULL; shp->shm_perm.security = NULL;
error = security_shm_alloc(shp); error = security_shm_alloc(shp);
if (error) { if (error) {
ipc_rcu_putref(shp); ipc_rcu_putref(shp, ipc_rcu_free);
return error; return error;
} }
...@@ -566,8 +574,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) ...@@ -566,8 +574,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
user_shm_unlock(size, shp->mlock_user); user_shm_unlock(size, shp->mlock_user);
fput(file); fput(file);
no_file: no_file:
security_shm_free(shp); ipc_rcu_putref(shp, shm_rcu_free);
ipc_rcu_putref(shp);
return error; return error;
} }
......
...@@ -474,11 +474,6 @@ void ipc_free(void* ptr, int size) ...@@ -474,11 +474,6 @@ void ipc_free(void* ptr, int size)
kfree(ptr); kfree(ptr);
} }
struct ipc_rcu {
struct rcu_head rcu;
atomic_t refcount;
} ____cacheline_aligned_in_smp;
/** /**
* ipc_rcu_alloc - allocate ipc and rcu space * ipc_rcu_alloc - allocate ipc and rcu space
* @size: size desired * @size: size desired
...@@ -505,27 +500,24 @@ int ipc_rcu_getref(void *ptr) ...@@ -505,27 +500,24 @@ int ipc_rcu_getref(void *ptr)
return atomic_inc_not_zero(&p->refcount); return atomic_inc_not_zero(&p->refcount);
} }
/** void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head))
* ipc_schedule_free - free ipc + rcu space
* @head: RCU callback structure for queued work
*/
static void ipc_schedule_free(struct rcu_head *head)
{
vfree(container_of(head, struct ipc_rcu, rcu));
}
void ipc_rcu_putref(void *ptr)
{ {
struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1; struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1;
if (!atomic_dec_and_test(&p->refcount)) if (!atomic_dec_and_test(&p->refcount))
return; return;
if (is_vmalloc_addr(ptr)) { call_rcu(&p->rcu, func);
call_rcu(&p->rcu, ipc_schedule_free); }
} else {
kfree_rcu(p, rcu); void ipc_rcu_free(struct rcu_head *head)
} {
struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
if (is_vmalloc_addr(p))
vfree(p);
else
kfree(p);
} }
/** /**
......
...@@ -47,6 +47,13 @@ static inline void msg_exit_ns(struct ipc_namespace *ns) { } ...@@ -47,6 +47,13 @@ static inline void msg_exit_ns(struct ipc_namespace *ns) { }
static inline void shm_exit_ns(struct ipc_namespace *ns) { } static inline void shm_exit_ns(struct ipc_namespace *ns) { }
#endif #endif
struct ipc_rcu {
struct rcu_head rcu;
atomic_t refcount;
} ____cacheline_aligned_in_smp;
#define ipc_rcu_to_struct(p) ((void *)(p+1))
/* /*
* Structure that holds the parameters needed by the ipc operations * Structure that holds the parameters needed by the ipc operations
* (see after) * (see after)
...@@ -120,7 +127,8 @@ void ipc_free(void* ptr, int size); ...@@ -120,7 +127,8 @@ void ipc_free(void* ptr, int size);
*/ */
void* ipc_rcu_alloc(int size); void* ipc_rcu_alloc(int size);
int ipc_rcu_getref(void *ptr); int ipc_rcu_getref(void *ptr);
void ipc_rcu_putref(void *ptr); void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head));
void ipc_rcu_free(struct rcu_head *head);
struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id); struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment