Commit 9eef0233 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'locking-core-2021-02-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:
 "Core locking primitives updates:
    - Remove mutex_trylock_recursive() from the API - no users left
    - Simplify + constify the futex code a bit

  Lockdep updates:
    - Teach lockdep about local_lock_t
    - Add CONFIG_DEBUG_IRQFLAGS=y debug config option to check for
      potentially unsafe IRQ mask restoration patterns. (I.e.
      calling raw_local_irq_restore() with IRQs enabled.)
    - Add wait context self-tests
    - Fix graph lock corner case corrupting internal data structures
    - Fix noinstr annotations

  LKMM updates:
    - Simplify the litmus tests
    - Documentation fixes

  KCSAN updates:
    - Re-enable KCSAN instrumentation in lib/random32.c

  Misc fixes:
    - Don't branch-trace static label APIs
    - DocBook fix
    - Remove stale leftover empty file"

* tag 'locking-core-2021-02-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
  checkpatch: Don't check for mutex_trylock_recursive()
  locking/mutex: Kill mutex_trylock_recursive()
  s390: Use arch_local_irq_{save,restore}() in early boot code
  lockdep: Noinstr annotate warn_bogus_irq_restore()
  locking/lockdep: Avoid unmatched unlock
  locking/rwsem: Remove empty rwsem.h
  locking/rtmutex: Add missing kernel-doc markup
  futex: Remove unneeded gotos
  futex: Change utime parameter to be 'const ... *'
  lockdep: report broken irq restoration
  jump_label: Do not profile branch annotations
  locking: Add Reviewers
  locking/selftests: Add local_lock inversion tests
  locking/lockdep: Exclude local_lock_t from IRQ inversions
  locking/lockdep: Clean up check_redundant() a bit
  locking/lockdep: Add a skip() function to __bfs()
  locking/lockdep: Mark local_lock_t
  locking/selftests: More granular debug_locks_verbose
  lockdep/selftest: Add wait context selftests
  tools/memory-model: Fix typo in klitmus7 compatibility table
  ...
parents d089f48f 3765d01b
......@@ -802,13 +802,14 @@
insecure, please do not use on production kernels.
debug_locks_verbose=
[KNL] verbose self-tests
Format=<0|1>
[KNL] verbose locking self-tests
Format: <int>
Print debugging info while doing the locking API
self-tests.
We default to 0 (no extra messages), setting it to
1 will print _a lot_ more information - normally
only useful to kernel developers.
Bitmask for the various LOCKTYPE_ tests. Defaults to 0
(no extra messages), setting it to -1 (all bits set)
will print _a_lot_ more information - normally only
useful to lockdep developers.
debug_objects [KNL] Enable object debugging
......
......@@ -10342,6 +10342,8 @@ LOCKING PRIMITIVES
M: Peter Zijlstra <peterz@infradead.org>
M: Ingo Molnar <mingo@redhat.com>
M: Will Deacon <will@kernel.org>
R: Waiman Long <longman@redhat.com>
R: Boqun Feng <boqun.feng@gmail.com> (LOCKDEP)
L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
......
......@@ -66,13 +66,13 @@ int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb)
unsigned long flags;
int rc;
raw_local_irq_save(flags);
flags = arch_local_irq_save();
rc = sclp_service_call(cmd, sccb);
if (rc)
goto out;
sclp_early_wait_irq();
out:
raw_local_irq_restore(flags);
arch_local_irq_restore(flags);
return rc;
}
......
......@@ -76,6 +76,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#else
# define likely(x) __builtin_expect(!!(x), 1)
# define unlikely(x) __builtin_expect(!!(x), 0)
# define likely_notrace(x) likely(x)
# define unlikely_notrace(x) unlikely(x)
#endif
/* Optimization barrier */
......
......@@ -149,6 +149,17 @@ do { \
# define start_critical_timings() do { } while (0)
#endif
#ifdef CONFIG_DEBUG_IRQFLAGS
extern void warn_bogus_irq_restore(void);
#define raw_check_bogus_irq_restore() \
do { \
if (unlikely(!arch_irqs_disabled())) \
warn_bogus_irq_restore(); \
} while (0)
#else
#define raw_check_bogus_irq_restore() do { } while (0)
#endif
/*
* Wrap the arch provided IRQ routines to provide appropriate checks.
*/
......@@ -162,6 +173,7 @@ do { \
#define raw_local_irq_restore(flags) \
do { \
typecheck(unsigned long, flags); \
raw_check_bogus_irq_restore(); \
arch_local_irq_restore(flags); \
} while (0)
#define raw_local_save_flags(flags) \
......
......@@ -261,14 +261,14 @@ static __always_inline void jump_label_init(void)
static __always_inline bool static_key_false(struct static_key *key)
{
if (unlikely(static_key_count(key) > 0))
if (unlikely_notrace(static_key_count(key) > 0))
return true;
return false;
}
static __always_inline bool static_key_true(struct static_key *key)
{
if (likely(static_key_count(key) > 0))
if (likely_notrace(static_key_count(key) > 0))
return true;
return false;
}
......@@ -460,7 +460,7 @@ extern bool ____wrong_branch_error(void);
branch = !arch_static_branch_jump(&(x)->key, true); \
else \
branch = ____wrong_branch_error(); \
likely(branch); \
likely_notrace(branch); \
})
#define static_branch_unlikely(x) \
......@@ -472,13 +472,13 @@ extern bool ____wrong_branch_error(void);
branch = arch_static_branch(&(x)->key, false); \
else \
branch = ____wrong_branch_error(); \
unlikely(branch); \
unlikely_notrace(branch); \
})
#else /* !CONFIG_JUMP_LABEL */
#define static_branch_likely(x) likely(static_key_enabled(&(x)->key))
#define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key))
#define static_branch_likely(x) likely_notrace(static_key_enabled(&(x)->key))
#define static_branch_unlikely(x) unlikely_notrace(static_key_enabled(&(x)->key))
#endif /* CONFIG_JUMP_LABEL */
......
......@@ -18,6 +18,7 @@ typedef struct {
.dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_CONFIG, \
.lock_type = LD_LOCK_PERCPU, \
}
#else
# define LL_DEP_MAP_INIT(lockname)
......@@ -30,7 +31,9 @@ do { \
static struct lock_class_key __key; \
\
debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
lockdep_init_map_wait(&(lock)->dep_map, #lock, &__key, 0, LD_WAIT_CONFIG);\
lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, 0, \
LD_WAIT_CONFIG, LD_WAIT_INV, \
LD_LOCK_PERCPU); \
} while (0)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
......
......@@ -185,12 +185,19 @@ extern void lockdep_unregister_key(struct lock_class_key *key);
* to lockdep:
*/
extern void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass, short inner, short outer);
extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type);
static inline void
lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass, u8 inner, u8 outer)
{
lockdep_init_map_type(lock, name, key, subclass, inner, LD_WAIT_INV, LD_LOCK_NORMAL);
}
static inline void
lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass, short inner)
struct lock_class_key *key, int subclass, u8 inner)
{
lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
}
......@@ -340,6 +347,8 @@ static inline void lockdep_set_selftest_task(struct task_struct *task)
# define lock_set_class(l, n, k, s, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
# define lockdep_init() do { } while (0)
# define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \
do { (void)(name); (void)(key); } while (0)
# define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
do { (void)(name); (void)(key); } while (0)
# define lockdep_init_map_wait(lock, name, key, sub, inner) \
......
......@@ -30,6 +30,12 @@ enum lockdep_wait_type {
LD_WAIT_MAX, /* must be last */
};
enum lockdep_lock_type {
LD_LOCK_NORMAL = 0, /* normal, catch all */
LD_LOCK_PERCPU, /* percpu */
LD_LOCK_MAX,
};
#ifdef CONFIG_LOCKDEP
/*
......@@ -119,8 +125,10 @@ struct lock_class {
int name_version;
const char *name;
short wait_type_inner;
short wait_type_outer;
u8 wait_type_inner;
u8 wait_type_outer;
u8 lock_type;
/* u8 hole; */
#ifdef CONFIG_LOCK_STAT
unsigned long contention_point[LOCKSTAT_POINTS];
......@@ -169,8 +177,10 @@ struct lockdep_map {
struct lock_class_key *key;
struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
const char *name;
short wait_type_outer; /* can be taken in this context */
short wait_type_inner; /* presents this context */
u8 wait_type_outer; /* can be taken in this context */
u8 wait_type_inner; /* presents this context */
u8 lock_type;
/* u8 hole; */
#ifdef CONFIG_LOCK_STAT
int cpu;
unsigned long ip;
......
......@@ -199,29 +199,4 @@ extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
/*
* These values are chosen such that FAIL and SUCCESS match the
* values of the regular mutex_trylock().
*/
enum mutex_trylock_recursive_enum {
MUTEX_TRYLOCK_FAILED = 0,
MUTEX_TRYLOCK_SUCCESS = 1,
MUTEX_TRYLOCK_RECURSIVE,
};
/**
* mutex_trylock_recursive - trylock variant that allows recursive locking
* @lock: mutex to be locked
*
* This function should not be used, _ever_. It is purely for hysterical GEM
* raisins, and once those are gone this will be removed.
*
* Returns:
* - MUTEX_TRYLOCK_FAILED - trylock failed,
* - MUTEX_TRYLOCK_SUCCESS - lock acquired,
* - MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
*/
extern /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
mutex_trylock_recursive(struct mutex *lock);
#endif /* __LINUX_MUTEX_H */
......@@ -607,11 +607,11 @@ asmlinkage long sys_unshare(unsigned long unshare_flags);
/* kernel/futex.c */
asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
struct __kernel_timespec __user *utime, u32 __user *uaddr2,
u32 val3);
const struct __kernel_timespec __user *utime,
u32 __user *uaddr2, u32 val3);
asmlinkage long sys_futex_time32(u32 __user *uaddr, int op, u32 val,
struct old_timespec32 __user *utime, u32 __user *uaddr2,
u32 val3);
const struct old_timespec32 __user *utime,
u32 __user *uaddr2, u32 val3);
asmlinkage long sys_get_robust_list(int pid,
struct robust_list_head __user * __user *head_ptr,
size_t __user *len_ptr);
......
......@@ -3012,7 +3012,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
* Success, we're done! No tricky corner cases.
*/
if (!ret)
goto out_putkey;
return ret;
/*
* The atomic access to the futex value generated a
* pagefault, so retry the user-access and the wakeup:
......@@ -3029,7 +3029,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
* wake_futex_pi has detected invalid state. Tell user
* space.
*/
goto out_putkey;
return ret;
}
/*
......@@ -3050,7 +3050,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
default:
WARN_ON_ONCE(1);
goto out_putkey;
return ret;
}
}
......@@ -3061,7 +3061,6 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
out_unlock:
spin_unlock(&hb->lock);
out_putkey:
return ret;
pi_retry:
......@@ -3763,8 +3762,8 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
struct __kernel_timespec __user *, utime, u32 __user *, uaddr2,
u32, val3)
const struct __kernel_timespec __user *, utime,
u32 __user *, uaddr2, u32, val3)
{
struct timespec64 ts;
ktime_t t, *tp = NULL;
......@@ -3959,7 +3958,7 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
#ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
const struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
u32, val3)
{
struct timespec64 ts;
......
......@@ -12,7 +12,6 @@
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
......@@ -101,7 +100,7 @@ static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
static DEFINE_PER_CPU(long, kcsan_skip);
/* For kcsan_prandom_u32_max(). */
static DEFINE_PER_CPU(struct rnd_state, kcsan_rand_state);
static DEFINE_PER_CPU(u32, kcsan_rand_state);
static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
size_t size,
......@@ -275,20 +274,17 @@ should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *
}
/*
* Returns a pseudo-random number in interval [0, ep_ro). See prandom_u32_max()
* for more details.
*
* The open-coded version here is using only safe primitives for all contexts
* where we can have KCSAN instrumentation. In particular, we cannot use
* prandom_u32() directly, as its tracepoint could cause recursion.
* Returns a pseudo-random number in interval [0, ep_ro). Simple linear
* congruential generator, using constants from "Numerical Recipes".
*/
static u32 kcsan_prandom_u32_max(u32 ep_ro)
{
struct rnd_state *state = &get_cpu_var(kcsan_rand_state);
const u32 res = prandom_u32_state(state);
u32 state = this_cpu_read(kcsan_rand_state);
state = 1664525 * state + 1013904223;
this_cpu_write(kcsan_rand_state, state);
put_cpu_var(kcsan_rand_state);
return (u32)(((u64) res * ep_ro) >> 32);
return state % ep_ro;
}
static inline void reset_kcsan_skip(void)
......@@ -639,10 +635,14 @@ static __always_inline void check_access(const volatile void *ptr, size_t size,
void __init kcsan_init(void)
{
int cpu;
BUG_ON(!in_task());
kcsan_debugfs_init();
prandom_seed_full_state(&kcsan_rand_state);
for_each_possible_cpu(cpu)
per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
/*
* We are in the init task, and no other tasks should be running;
......
......@@ -15,6 +15,7 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE)
endif
obj-$(CONFIG_DEBUG_IRQFLAGS) += irqflag-debug.o
obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
obj-$(CONFIG_LOCKDEP) += lockdep.o
ifeq ($(CONFIG_PROC_FS),y)
......
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/bug.h>
#include <linux/export.h>
#include <linux/irqflags.h>
noinstr void warn_bogus_irq_restore(void)
{
instrumentation_begin();
WARN_ONCE(1, "raw_local_irq_restore() called with IRQs enabled\n");
instrumentation_end();
}
EXPORT_SYMBOL(warn_bogus_irq_restore);
This diff is collapsed.
......@@ -86,16 +86,6 @@ bool mutex_is_locked(struct mutex *lock)
}
EXPORT_SYMBOL(mutex_is_locked);
__must_check enum mutex_trylock_recursive_enum
mutex_trylock_recursive(struct mutex *lock)
{
if (unlikely(__mutex_owner(lock) == current))
return MUTEX_TRYLOCK_RECURSIVE;
return mutex_trylock(lock);
}
EXPORT_SYMBOL(mutex_trylock_recursive);
static inline unsigned long __owner_flags(unsigned long owner)
{
return owner & MUTEX_FLAGS;
......
......@@ -1604,8 +1604,11 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
/**
* Futex variant, that since futex variants do not use the fast-path, can be
* simple and will not need to retry.
* __rt_mutex_futex_unlock - Futex variant, that since futex variants
* do not use the fast-path, can be simple and will not need to retry.
*
* @lock: The rt_mutex to be unlocked
* @wake_q: The wake queue head from which to get the next lock waiter
*/
bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
struct wake_q_head *wake_q)
......@@ -1662,13 +1665,15 @@ void rt_mutex_destroy(struct rt_mutex *lock)
EXPORT_SYMBOL_GPL(rt_mutex_destroy);
/**
* __rt_mutex_init - initialize the rt lock
* __rt_mutex_init - initialize the rt_mutex
*
* @lock: the rt lock to be initialized
* @lock: The rt_mutex to be initialized
* @name: The lock name used for debugging
* @key: The lock class key used for debugging
*
* Initialize the rt lock to unlocked state.
* Initialize the rt_mutex to unlocked state.
*
* Initializing of a locked rt lock is not allowed
* Initializing of a locked rt_mutex is not allowed
*/
void __rt_mutex_init(struct rt_mutex *lock, const char *name,
struct lock_class_key *key)
......
......@@ -1335,6 +1335,7 @@ config LOCKDEP_SMALL
config DEBUG_LOCKDEP
bool "Lock dependency engine debugging"
depends on DEBUG_KERNEL && LOCKDEP
select DEBUG_IRQFLAGS
help
If you say Y here, the lock dependency engine will do
additional runtime checks to debug itself, at the price
......@@ -1423,6 +1424,13 @@ config TRACE_IRQFLAGS_NMI
depends on TRACE_IRQFLAGS
depends on TRACE_IRQFLAGS_NMI_SUPPORT
config DEBUG_IRQFLAGS
bool "Debug IRQ flag manipulation"
help
Enables checks for potentially unsafe enabling or disabling of
interrupts, such as calling raw_local_irq_restore() when interrupts
are enabled.
config STACKTRACE
bool "Stack backtrace support"
depends on STACKTRACE_SUPPORT
......
......@@ -27,9 +27,6 @@ KASAN_SANITIZE_string.o := n
CFLAGS_string.o += -fno-stack-protector
endif
# Used by KCSAN while enabled, avoid recursion.
KCSAN_SANITIZE_random32.o := n
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o timerqueue.o xarray.o \
idr.o extable.o sha1.o irq_regs.o argv_split.o \
......
This diff is collapsed.
......@@ -7062,12 +7062,6 @@ sub process {
}
}
# check for mutex_trylock_recursive usage
if ($line =~ /mutex_trylock_recursive/) {
ERROR("LOCKING",
"recursive locking is bad, do not use this ever.\n" . $herecurr);
}
# check for lockdep_set_novalidate_class
if ($line =~ /^.\s*lockdep_set_novalidate_class\s*\(/ ||
$line =~ /__lockdep_no_validate__\s*\)/ ) {
......
......@@ -33,10 +33,11 @@ Acquire: With respect to a lock, acquiring that lock, for example,
acquire loads.
When an acquire load returns the value stored by a release store
to that same variable, then all operations preceding that store
happen before any operations following that load acquire.
to that same variable, (in other words, the acquire load "reads
from" the release store), then all operations preceding that
store "happen before" any operations following that load acquire.
See also "Relaxed" and "Release".
See also "Happens-Before", "Reads-From", "Relaxed", and "Release".
Coherence (co): When one CPU's store to a given variable overwrites
either the value from another CPU's store or some later value,
......@@ -119,6 +120,11 @@ Fully Ordered: An operation such as smp_mb() that orders all of
that orders all of its CPU's prior accesses, itself, and
all of its CPU's subsequent accesses.
Happens-Before (hb): A relation between two accesses in which LKMM
guarantees the first access precedes the second. For more
detail, please see the "THE HAPPENS-BEFORE RELATION: hb"
section of explanation.txt.
Marked Access: An access to a variable that uses an special function or
macro such as "r1 = READ_ONCE(x)" or "smp_store_release(&a, 1)".
......
......@@ -51,7 +51,7 @@ klitmus7 Compatibility Table
============ ==========
target Linux herdtools7
------------ ----------
-- 4.18 7.48 --
-- 4.14 7.48 --
4.15 -- 4.19 7.49 --
4.20 -- 5.5 7.54 --
5.6 -- 7.56 --
......
......@@ -7,9 +7,7 @@ C CoRR+poonceonce+Once
* reads from the same variable are ordered.
*)
{
int x;
}
{}
P0(int *x)
{
......
......@@ -7,9 +7,7 @@ C CoRW+poonceonce+Once
* a given variable and a later write to that same variable are ordered.
*)
{
int x;
}
{}
P0(int *x)
{
......
......@@ -7,9 +7,7 @@ C CoWR+poonceonce+Once
* given variable and a later read from that same variable are ordered.
*)
{
int x;
}
{}
P0(int *x)
{
......
......@@ -7,9 +7,7 @@ C CoWW+poonceonce
* writes to the same variable are ordered.
*)
{
int x;
}
{}
P0(int *x)
{
......
......@@ -10,10 +10,7 @@ C IRIW+fencembonceonces+OnceOnce
* process? This litmus test exercises LKMM's "propagation" rule.
*)
{
int x;
int y;
}
{}
P0(int *x)
{
......
......@@ -10,10 +10,7 @@ C IRIW+poonceonces+OnceOnce
* different process?
*)
{
int x;
int y;
}
{}
P0(int *x)
{
......
......@@ -7,12 +7,7 @@ C ISA2+pooncelock+pooncelock+pombonce
* (in P0() and P1()) is visible to external process P2().
*)
{
spinlock_t mylock;
int x;
int y;
int z;
}
{}
P0(int *x, int *y, spinlock_t *mylock)
{
......
......@@ -9,11 +9,7 @@ C ISA2+poonceonces
* of the smp_load_acquire() invocations are replaced by READ_ONCE()?
*)
{
int x;
int y;
int z;
}
{}
P0(int *x, int *y)
{
......
......@@ -11,11 +11,7 @@ C ISA2+pooncerelease+poacquirerelease+poacquireonce
* (AKA non-rf) link, so release-acquire is all that is needed.
*)
{
int x;
int y;
int z;
}
{}
P0(int *x, int *y)
{
......
......@@ -11,10 +11,7 @@ C LB+fencembonceonce+ctrlonceonce
* another control dependency and order would still be maintained.)
*)
{
int x;
int y;
}
{}
P0(int *x, int *y)
{
......
......@@ -8,10 +8,7 @@ C LB+poacquireonce+pooncerelease
* to the other?
*)
{
int x;
int y;
}
{}
P0(int *x, int *y)
{
......
......@@ -7,10 +7,7 @@ C LB+poonceonces
* be prevented even with no explicit ordering?
*)
{
int x;
int y;
}
{}
P0(int *x, int *y)
{
......
......@@ -8,10 +8,7 @@ C MP+fencewmbonceonce+fencermbonceonce
* is usually better to use smp_store_release() and smp_load_acquire().
*)
{
int buf;
int flag;
}
{}
P0(int *buf, int *flag) // Producer
{
......
......@@ -10,9 +10,7 @@ C MP+onceassign+derefonce
*)
{
int *p=y;
int x;
int y=0;
p=y;
}
P0(int *x, int **p) // Producer
......
......@@ -10,10 +10,7 @@ C MP+polockmbonce+poacquiresilsil
* executed before the lock was acquired (loosely speaking).
*)
{
spinlock_t lo;
int x;
}
{}
P0(spinlock_t *lo, int *x) // Producer
{
......
......@@ -10,10 +10,7 @@ C MP+polockonce+poacquiresilsil
* speaking).
*)
{
spinlock_t lo;
int x;
}
{}
P0(spinlock_t *lo, int *x) // Producer
{
......
......@@ -11,11 +11,7 @@ C MP+polocks
* to see all prior accesses by those other CPUs.
*)
{
spinlock_t mylock;
int buf;
int flag;
}
{}
P0(int *buf, int *flag, spinlock_t *mylock) // Producer
{
......
......@@ -7,10 +7,7 @@ C MP+poonceonces
* no ordering at all?
*)
{
int buf;
int flag;
}
{}
P0(int *buf, int *flag) // Producer
{
......
......@@ -8,10 +8,7 @@ C MP+pooncerelease+poacquireonce
* pattern.
*)
{
int buf;
int flag;
}
{}
P0(int *buf, int *flag) // Producer
{
......
......@@ -11,11 +11,7 @@ C MP+porevlocks
* see all prior accesses by those other CPUs.
*)
{
spinlock_t mylock;
int buf;
int flag;
}
{}
P0(int *buf, int *flag, spinlock_t *mylock) // Consumer
{
......
......@@ -9,10 +9,7 @@ C R+fencembonceonces
* cause the resulting test to be allowed.
*)
{
int x;
int y;
}
{}
P0(int *x, int *y)
{
......
......@@ -8,10 +8,7 @@ C R+poonceonces
* store propagation delays.
*)
{
int x;
int y;
}
{}
P0(int *x, int *y)
{
......
......@@ -7,10 +7,7 @@ C S+fencewmbonceonce+poacquireonce
* store against a subsequent store?
*)
{
int x;
int y;
}
{}
P0(int *x, int *y)
{
......
......@@ -9,10 +9,7 @@ C S+poonceonces
* READ_ONCE(), is ordering preserved?
*)
{
int x;
int y;
}
{}
P0(int *x, int *y)
{
......
......@@ -9,10 +9,7 @@ C SB+fencembonceonces
* suffice, but not much else.)
*)
{
int x;
int y;
}
{}
P0(int *x, int *y)
{
......
......@@ -8,10 +8,7 @@ C SB+poonceonces
* variable that the preceding process reads.
*)
{
int x;
int y;
}
{}
P0(int *x, int *y)
{
......
......@@ -6,10 +6,7 @@ C SB+rfionceonce-poonceonces
* This litmus test demonstrates that LKMM is not fully multicopy atomic.
*)
{
int x;
int y;
}
{}
P0(int *x, int *y)
{
......
......@@ -8,10 +8,7 @@ C WRC+poonceonces+Once
* test has no ordering at all.
*)
{
int x;
int y;
}
{}
P0(int *x)
{
......
......@@ -10,10 +10,7 @@ C WRC+pooncerelease+fencermbonceonce+Once
* is A-cumulative in LKMM.
*)
{
int x;
int y;
}
{}
P0(int *x)
{
......
......@@ -9,12 +9,7 @@ C Z6.0+pooncelock+poonceLock+pombonce
* by CPUs not holding that lock.
*)
{
spinlock_t mylock;
int x;
int y;
int z;
}
{}
P0(int *x, int *y, spinlock_t *mylock)
{
......
......@@ -8,12 +8,7 @@ C Z6.0+pooncelock+pooncelock+pombonce
* seen as ordered by a third process not holding that lock.
*)
{
spinlock_t mylock;
int x;
int y;
int z;
}
{}
P0(int *x, int *y, spinlock_t *mylock)
{
......
......@@ -14,11 +14,7 @@ C Z6.0+pooncerelease+poacquirerelease+fencembonceonce
* involving locking.)
*)
{
int x;
int y;
int z;
}
{}
P0(int *x, int *y)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment