Commit c377a2c8 authored by James Morris's avatar James Morris

Merge tag 'seccomp-next' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux into next

parents 5965453d 0b5fa229
......@@ -13,7 +13,7 @@
* of Berkeley Packet Filters/Linux Socket Filters.
*/
#include <linux/atomic.h>
#include <linux/refcount.h>
#include <linux/audit.h>
#include <linux/compat.h>
#include <linux/coredump.h>
......@@ -56,7 +56,7 @@
* to a task_struct (other than @usage).
*/
struct seccomp_filter {
atomic_t usage;
refcount_t usage;
struct seccomp_filter *prev;
struct bpf_prog *prog;
};
......@@ -378,7 +378,7 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
return ERR_PTR(ret);
}
atomic_set(&sfilter->usage, 1);
refcount_set(&sfilter->usage, 1);
return sfilter;
}
......@@ -465,7 +465,7 @@ void get_seccomp_filter(struct task_struct *tsk)
if (!orig)
return;
/* Reference count is bounded by the number of total processes. */
atomic_inc(&orig->usage);
refcount_inc(&orig->usage);
}
static inline void seccomp_filter_free(struct seccomp_filter *filter)
......@@ -481,7 +481,7 @@ void put_seccomp_filter(struct task_struct *tsk)
{
struct seccomp_filter *orig = tsk->seccomp.filter;
/* Clean up single-reference branches iteratively. */
while (orig && atomic_dec_and_test(&orig->usage)) {
while (orig && refcount_dec_and_test(&orig->usage)) {
struct seccomp_filter *freeme = orig;
orig = orig->prev;
seccomp_filter_free(freeme);
......@@ -641,11 +641,12 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
return 0;
case SECCOMP_RET_KILL:
default: {
siginfo_t info;
default:
audit_seccomp(this_syscall, SIGSYS, action);
/* Dump core only if this is the last remaining thread. */
if (get_nr_threads(current) == 1) {
siginfo_t info;
/* Show the original registers in the dump. */
syscall_rollback(current, task_pt_regs(current));
/* Trigger a manual coredump since do_exit skips it. */
......@@ -654,7 +655,6 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
}
do_exit(SIGSYS);
}
}
unreachable();
......
......@@ -1822,6 +1822,23 @@ struct tsync_sibling {
struct __test_metadata *metadata;
};
/*
* To avoid joining joined threads (which is not allowed by Bionic),
* make sure we both successfully join and clear the tid to skip a
* later join attempt during fixture teardown. Any remaining threads
* will be directly killed during teardown.
*/
#define PTHREAD_JOIN(tid, status) \
do { \
int _rc = pthread_join(tid, status); \
if (_rc) { \
TH_LOG("pthread_join of tid %u failed: %d\n", \
(unsigned int)tid, _rc); \
} else { \
tid = 0; \
} \
} while (0)
FIXTURE_DATA(TSYNC) {
struct sock_fprog root_prog, apply_prog;
struct tsync_sibling sibling[TSYNC_SIBLINGS];
......@@ -1890,14 +1907,14 @@ FIXTURE_TEARDOWN(TSYNC)
for ( ; sib < self->sibling_count; ++sib) {
struct tsync_sibling *s = &self->sibling[sib];
void *status;
if (!s->tid)
continue;
if (pthread_kill(s->tid, 0)) {
pthread_cancel(s->tid);
pthread_join(s->tid, &status);
}
/*
* If a thread is still running, it may be stuck, so hit
* it over the head really hard.
*/
pthread_kill(s->tid, 9);
}
pthread_mutex_destroy(&self->mutex);
pthread_cond_destroy(&self->cond);
......@@ -1987,9 +2004,9 @@ TEST_F(TSYNC, siblings_fail_prctl)
pthread_mutex_unlock(&self->mutex);
/* Ensure diverging sibling failed to call prctl. */
pthread_join(self->sibling[0].tid, &status);
PTHREAD_JOIN(self->sibling[0].tid, &status);
EXPECT_EQ(SIBLING_EXIT_FAILURE, (long)status);
pthread_join(self->sibling[1].tid, &status);
PTHREAD_JOIN(self->sibling[1].tid, &status);
EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
}
......@@ -2029,9 +2046,9 @@ TEST_F(TSYNC, two_siblings_with_ancestor)
}
pthread_mutex_unlock(&self->mutex);
/* Ensure they are both killed and don't exit cleanly. */
pthread_join(self->sibling[0].tid, &status);
PTHREAD_JOIN(self->sibling[0].tid, &status);
EXPECT_EQ(0x0, (long)status);
pthread_join(self->sibling[1].tid, &status);
PTHREAD_JOIN(self->sibling[1].tid, &status);
EXPECT_EQ(0x0, (long)status);
}
......@@ -2055,9 +2072,9 @@ TEST_F(TSYNC, two_sibling_want_nnp)
pthread_mutex_unlock(&self->mutex);
/* Ensure they are both upset about lacking nnp. */
pthread_join(self->sibling[0].tid, &status);
PTHREAD_JOIN(self->sibling[0].tid, &status);
EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status);
pthread_join(self->sibling[1].tid, &status);
PTHREAD_JOIN(self->sibling[1].tid, &status);
EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status);
}
......@@ -2095,9 +2112,9 @@ TEST_F(TSYNC, two_siblings_with_no_filter)
pthread_mutex_unlock(&self->mutex);
/* Ensure they are both killed and don't exit cleanly. */
pthread_join(self->sibling[0].tid, &status);
PTHREAD_JOIN(self->sibling[0].tid, &status);
EXPECT_EQ(0x0, (long)status);
pthread_join(self->sibling[1].tid, &status);
PTHREAD_JOIN(self->sibling[1].tid, &status);
EXPECT_EQ(0x0, (long)status);
}
......@@ -2140,9 +2157,9 @@ TEST_F(TSYNC, two_siblings_with_one_divergence)
pthread_mutex_unlock(&self->mutex);
/* Ensure they are both unkilled. */
pthread_join(self->sibling[0].tid, &status);
PTHREAD_JOIN(self->sibling[0].tid, &status);
EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
pthread_join(self->sibling[1].tid, &status);
PTHREAD_JOIN(self->sibling[1].tid, &status);
EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
}
......@@ -2199,7 +2216,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
TH_LOG("cond broadcast non-zero");
}
pthread_mutex_unlock(&self->mutex);
pthread_join(self->sibling[sib].tid, &status);
PTHREAD_JOIN(self->sibling[sib].tid, &status);
EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
/* Poll for actual task death. pthread_join doesn't guarantee it. */
while (!kill(self->sibling[sib].system_tid, 0))
......@@ -2224,7 +2241,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
TH_LOG("cond broadcast non-zero");
}
pthread_mutex_unlock(&self->mutex);
pthread_join(self->sibling[sib].tid, &status);
PTHREAD_JOIN(self->sibling[sib].tid, &status);
EXPECT_EQ(0, (long)status);
/* Poll for actual task death. pthread_join doesn't guarantee it. */
while (!kill(self->sibling[sib].system_tid, 0))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment