Commit 026a14f0 authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds

[PATCH] Add a few might_sleep() checks

Add a whole bunch more might_sleep() checks.  We also enable might_sleep()
checking in copy_*_user().  This was non-trivial because of the "copy_*_user()
in atomic regions" trick would generate false positives.  Fix that up by
adding a new __copy_*_user_inatomic(), which avoids the might_sleep() check.

Only i386 is supported in this patch.

With: Arjan van de Ven <arjanv@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent c46cde9d
......@@ -31,6 +31,7 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon
#define __do_strncpy_from_user(dst,src,count,res) \
do { \
int __d0, __d1, __d2; \
might_sleep(); \
__asm__ __volatile__( \
" testl %1,%1\n" \
" jz 2f\n" \
......@@ -119,6 +120,7 @@ strncpy_from_user(char *dst, const char __user *src, long count)
#define __do_clear_user(addr,size) \
do { \
int __d0; \
might_sleep(); \
__asm__ __volatile__( \
"0: rep; stosl\n" \
" movl %2,%0\n" \
......
......@@ -2535,6 +2535,7 @@ void generic_make_request(struct bio *bio)
sector_t maxsector;
int ret, nr_sectors = bio_sectors(bio);
might_sleep();
/* Test device or partition size, when known. */
maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
if (maxsector) {
......
......@@ -1551,6 +1551,7 @@ __getblk(struct block_device *bdev, sector_t block, int size)
{
struct buffer_head *bh = __find_get_block(bdev, block, size);
might_sleep();
if (bh == NULL)
bh = __getblk_slow(bdev, block, size);
return bh;
......@@ -1776,6 +1777,8 @@ void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
{
struct buffer_head *old_bh;
might_sleep();
old_bh = __find_get_block_slow(bdev, block, 0);
if (old_bh) {
clear_buffer_dirty(old_bh);
......
......@@ -144,6 +144,8 @@ void dput(struct dentry *dentry)
return;
repeat:
if (atomic_read(&dentry->d_count) == 1)
might_sleep();
if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock))
return;
......
......@@ -66,6 +66,8 @@ int ext3_forget(handle_t *handle, int is_metadata,
{
int err;
might_sleep();
BUFFER_TRACE(bh, "enter");
jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
......@@ -2966,6 +2968,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
struct ext3_iloc iloc;
int err;
might_sleep();
err = ext3_reserve_inode_write(handle, inode, &iloc);
if (!err)
err = ext3_mark_iloc_dirty(handle, inode, &iloc);
......
......@@ -123,6 +123,7 @@ void fastcall __fput(struct file *file)
struct vfsmount *mnt = file->f_vfsmnt;
struct inode *inode = dentry->d_inode;
might_sleep();
/*
* The function eventpoll_release() should be the first called
* in the file cleanup chain.
......
......@@ -398,6 +398,7 @@ writeback_inodes(struct writeback_control *wbc)
{
struct super_block *sb;
might_sleep();
spin_lock(&inode_lock);
spin_lock(&sb_lock);
restart:
......@@ -553,6 +554,7 @@ void write_inode_now(struct inode *inode, int sync)
if (inode->i_mapping->backing_dev_info->memory_backed)
return;
might_sleep();
spin_lock(&inode_lock);
__writeback_single_inode(inode, &wbc);
spin_unlock(&inode_lock);
......
......@@ -243,6 +243,7 @@ void __iget(struct inode * inode)
*/
void clear_inode(struct inode *inode)
{
might_sleep();
invalidate_inode_buffers(inode);
if (inode->i_data.nrpages)
......
......@@ -332,6 +332,7 @@ int journal_revoke(handle_t *handle, unsigned long blocknr,
struct block_device *bdev;
int err;
might_sleep();
if (bh_in)
BUFFER_TRACE(bh_in, "enter");
......
......@@ -1529,6 +1529,7 @@ struct dentry *proc_pid_unhash(struct task_struct *p)
void proc_pid_flush(struct dentry *proc_dentry)
{
might_sleep();
if(proc_dentry != NULL) {
shrink_dcache_parent(proc_dentry);
dput(proc_dentry);
......
......@@ -395,6 +395,10 @@ __copy_tofrom_user(void *to, const void *from, long len, const void __user *vali
__copy_tofrom_user_nocheck((to),(__force void *)(from),(n)); \
})
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
extern inline long
copy_to_user(void __user *to, const void *from, long n)
{
......
......@@ -394,6 +394,9 @@ static inline unsigned long __copy_to_user(void __user *to, const void *from, un
return __arch_copy_to_user(to, from, n);
}
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
static inline unsigned long clear_user (void __user *to, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
......
......@@ -217,6 +217,9 @@ static __inline__ unsigned long __copy_to_user(void *to, const void *from, unsig
return n;
}
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
static __inline__ unsigned long clear_user (void *to, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
......
......@@ -434,6 +434,8 @@ __generic_clear_user_nocheck(void *to, unsigned long n)
#define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n))
#define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n))
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
#define __clear_user(to,n) __generic_clear_user_nocheck((to),(n))
#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
......
......@@ -123,6 +123,8 @@ extern int __get_user_bad(void);
#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
#define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n)) return retval; })
......
......@@ -43,10 +43,12 @@ unsigned int csum_partial_copy_nocheck ( const char *src, char *dst,
}
static __inline__
unsigned int csum_partial_copy_from_user ( const char __user *src, char *dst,
unsigned int csum_partial_copy_from_user(const char __user *src, char *dst,
int len, int sum, int *err_ptr)
{
return csum_partial_copy_generic ( (__force char *)src, dst, len, sum, err_ptr, NULL);
might_sleep();
return csum_partial_copy_generic((__force char *)src, dst,
len, sum, err_ptr, NULL);
}
/*
......@@ -177,6 +179,7 @@ static __inline__ unsigned int csum_and_copy_to_user(const char *src,
int len, int sum,
int *err_ptr)
{
might_sleep();
if (access_ok(VERIFY_WRITE, dst, len))
return csum_partial_copy_generic(src, (__force char *)dst, len, sum, NULL, err_ptr);
......
......@@ -400,7 +400,7 @@ unsigned long __copy_from_user_ll(void *to, const void __user *from, unsigned lo
* On success, this will be zero.
*/
static inline unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
unsigned long ret;
......@@ -420,6 +420,13 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
return __copy_to_user_ll(to, from, n);
}
static inline unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_sleep();
return __copy_to_user_inatomic(to, from, n);
}
/**
* __copy_from_user: - Copy a block of data from user space, with less checking.
* @to: Destination address, in kernel space.
......@@ -438,7 +445,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
* data to the requested size using zero bytes.
*/
static inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
unsigned long ret;
......@@ -458,6 +465,12 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
return __copy_from_user_ll(to, from, n);
}
static inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_sleep();
return __copy_from_user_inatomic(to, from, n);
}
unsigned long copy_to_user(void __user *to, const void *from, unsigned long n);
unsigned long copy_from_user(void *to,
const void __user *from, unsigned long n);
......
......@@ -202,7 +202,8 @@ extern unsigned long __copy_user (void *to, const void *from, unsigned long coun
#define __copy_to_user(to, from, n) __copy_user((to), (from), (n))
#define __copy_from_user(to, from, n) __copy_user((to), (from), (n))
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
#define copy_to_user(to, from, n) __copy_tofrom_user((to), (from), (n), 1)
#define copy_from_user(to, from, n) __copy_tofrom_user((to), (from), (n), 0)
......
......@@ -521,6 +521,9 @@ __constant_copy_from_user(void *to, const void *from, unsigned long n)
: "0"(to), "1"(from), "2"(n/4) \
: "d0", "memory")
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
static inline unsigned long
__constant_copy_to_user(void *to, const void *from, unsigned long n)
{
......
......@@ -134,6 +134,8 @@ extern int __get_user_bad(void);
#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
#define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n)) return retval; })
......
......@@ -463,6 +463,9 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
__cu_len; \
})
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
/*
* copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space.
......
......@@ -279,5 +279,7 @@ extern long lstrnlen_user(const char __user *,long);
#define __copy_to_user lcopy_to_user
#define copy_in_user lcopy_in_user
#define __copy_in_user lcopy_in_user
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
#endif /* __PARISC_UACCESS_H */
......@@ -331,6 +331,8 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
__copy_tofrom_user((void __user *)(to), (from), (size))
#define __copy_to_user(to, from, size) \
__copy_tofrom_user((to), (void __user *)(from), (size))
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
extern unsigned long __clear_user(void __user *addr, unsigned long size);
......
......@@ -281,6 +281,9 @@ extern unsigned long copy_in_user(void __user *to, const void __user *from,
extern unsigned long __clear_user(void __user *addr, unsigned long size);
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
static inline unsigned long
clear_user(void __user *addr, unsigned long size)
{
......
......@@ -272,6 +272,9 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
return __copy_to_user_asm(from, n, to);
}
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
/**
* copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space.
......
......@@ -446,6 +446,10 @@ __copy_res; })
__copy_user((void *)(to), \
(void *)(from), n)
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
#define copy_from_user(to,from,n) ({ \
void *__copy_to = (void *) (to); \
void *__copy_from = (void *) (from); \
......
......@@ -261,6 +261,9 @@ if (__copy_from_user(to,from,n)) \
return retval; \
})
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
/* XXX: Not sure it works well..
should be such that: 4byte clear and the rest. */
extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
......
......@@ -322,6 +322,9 @@ static inline unsigned long __copy_from_user(void *to, const void __user *from,
return __copy_user((void __user *) to, from, n);
}
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
static inline unsigned long __clear_user(void __user *addr, unsigned long size)
{
unsigned long ret;
......
......@@ -316,6 +316,8 @@ extern long __strnlen_user(const char __user *, long len);
#define strlen_user __strlen_user
#define strnlen_user __strnlen_user
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
#endif /* __ASSEMBLY__ */
......
......@@ -36,6 +36,9 @@
#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
#define __get_user(x, ptr) \
({ \
const __typeof__(ptr) __private_ptr = ptr; \
......
......@@ -112,6 +112,9 @@ extern int bad_user_access_length (void);
#define __copy_from_user(to, from, n) (memcpy (to, from, n), 0)
#define __copy_to_user(to, from, n) (memcpy(to, from, n), 0)
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
#define copy_from_user(to, from, n) __copy_from_user (to, from, n)
#define copy_to_user(to, from, n) __copy_to_user(to, from, n)
......
......@@ -351,4 +351,7 @@ long strlen_user(const char __user *str);
unsigned long clear_user(void __user *mem, unsigned long len);
unsigned long __clear_user(void __user *mem, unsigned long len);
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
#endif /* __X86_64_UACCESS_H */
......@@ -272,12 +272,14 @@ map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
*/
static inline void wait_on_buffer(struct buffer_head *bh)
{
might_sleep();
if (buffer_locked(bh) || atomic_read(&bh->b_count) == 0)
__wait_on_buffer(bh);
}
static inline void lock_buffer(struct buffer_head *bh)
{
might_sleep();
if (test_set_buffer_locked(bh))
__lock_buffer(bh);
}
......
......@@ -156,6 +156,7 @@ extern void FASTCALL(unlock_page(struct page *page));
static inline void lock_page(struct page *page)
{
might_sleep();
if (TestSetPageLocked(page))
__lock_page(page);
}
......
......@@ -75,6 +75,7 @@ void sync_inodes(int wait);
/* writeback.h requires fs.h; it, too, is not included from here. */
static inline void wait_on_inode(struct inode *inode)
{
might_sleep();
if (inode->i_state & I_LOCK)
__wait_on_inode(inode);
}
......
......@@ -888,7 +888,8 @@ int file_read_actor(read_descriptor_t *desc, struct page *page,
*/
if (!fault_in_pages_writeable(desc->arg.buf, size)) {
kaddr = kmap_atomic(page, KM_USER0);
left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
left = __copy_to_user_inatomic(desc->arg.buf,
kaddr + offset, size);
kunmap_atomic(kaddr, KM_USER0);
if (left == 0)
goto success;
......@@ -1685,7 +1686,7 @@ filemap_copy_from_user(struct page *page, unsigned long offset,
int left;
kaddr = kmap_atomic(page, KM_USER0);
left = __copy_from_user(kaddr + offset, buf, bytes);
left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
kunmap_atomic(kaddr, KM_USER0);
if (left != 0) {
......@@ -1708,7 +1709,7 @@ __filemap_copy_from_user_iovec(char *vaddr,
int copy = min(bytes, iov->iov_len - base);
base = 0;
left = __copy_from_user(vaddr, buf, copy);
left = __copy_from_user_inatomic(vaddr, buf, copy);
copied += copy;
bytes -= copy;
vaddr += copy;
......
......@@ -194,6 +194,7 @@ void * mempool_alloc(mempool_t *pool, int gfp_mask)
DEFINE_WAIT(wait);
int gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
might_sleep_if(gfp_mask & __GFP_WAIT);
repeat_alloc:
element = pool->alloc(gfp_nowait|__GFP_NOWARN, pool->pool_data);
if (likely(element != NULL))
......
......@@ -90,6 +90,7 @@ static void remove_vm_struct(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
might_sleep();
if (file) {
struct address_space *mapping = file->f_mapping;
spin_lock(&mapping->i_mmap_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment