Commit 56d8b39d authored by Patrick Mochel's avatar Patrick Mochel

Merge bk://ldm@bkbits.net/linux-2.5

into osdl.org:/home/mochel/src/kernel/devel/linux-2.5
parents abe2e064 4a99b33d
......@@ -380,18 +380,21 @@ static ssize_t mousedev_read(struct file * file, char * buffer, size_t count, lo
if (!list->ready && !list->buffer) {
add_wait_queue(&list->mousedev->wait, &wait);
set_current_state(TASK_INTERRUPTIBLE);
while (!list->ready) {
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
retval = 0;
if (list->ready || list->buffer)
break;
}
if (signal_pending(current)) {
retval = -ERESTARTSYS;
retval = -EAGAIN;
if (file->f_flags & O_NONBLOCK)
break;
retval = -ERESTARTSYS;
if (signal_pending(current))
break;
}
schedule();
}
......
......@@ -648,7 +648,7 @@ int dbNextAG(struct inode *ipbmap)
agpref = bmp->db_agpref;
if ((atomic_read(&bmp->db_active[agpref]) == 0) &&
(bmp->db_agfree[agpref] >= avgfree))
goto found;
goto unlock;
/* From the last preferred ag, find the next one with at least
* average free space.
......@@ -660,9 +660,12 @@ int dbNextAG(struct inode *ipbmap)
if (atomic_read(&bmp->db_active[agpref]))
/* open file is currently growing in this ag */
continue;
if (bmp->db_agfree[agpref] >= avgfree)
goto found;
else if (bmp->db_agfree[agpref] > hwm) {
if (bmp->db_agfree[agpref] >= avgfree) {
/* Return this one */
bmp->db_agpref = agpref;
goto unlock;
} else if (bmp->db_agfree[agpref] > hwm) {
/* Less than avg. freespace, but best so far */
hwm = bmp->db_agfree[agpref];
next_best = agpref;
}
......@@ -673,12 +676,9 @@ int dbNextAG(struct inode *ipbmap)
* next best
*/
if (next_best != -1)
agpref = next_best;
/* else agpref should be back to its original value */
found:
bmp->db_agpref = agpref;
bmp->db_agpref = next_best;
/* else leave db_agpref unchanged */
unlock:
BMAP_UNLOCK(bmp);
/* return the preferred group.
......
......@@ -253,7 +253,7 @@ static int flock_make_lock(struct file *filp,
fl->fl_file = filp;
fl->fl_pid = current->pid;
fl->fl_flags = FL_FLOCK;
fl->fl_flags = (cmd & LOCK_NB) ? FL_FLOCK : FL_FLOCK | FL_SLEEP;
fl->fl_type = type;
fl->fl_end = OFFSET_MAX;
......
......@@ -85,7 +85,8 @@ static inline void br_read_lock (enum brlock_indices idx)
if (idx >= __BR_END)
__br_lock_usage_bug();
read_lock(&__brlock_array[smp_processor_id()][idx]);
preempt_disable();
_raw_read_lock(&__brlock_array[smp_processor_id()][idx]);
}
static inline void br_read_unlock (enum brlock_indices idx)
......@@ -109,6 +110,7 @@ static inline void br_read_lock (enum brlock_indices idx)
if (idx >= __BR_END)
__br_lock_usage_bug();
preempt_disable();
ctr = &__brlock_array[smp_processor_id()][idx];
lock = &__br_write_locks[idx].lock;
again:
......@@ -147,6 +149,7 @@ static inline void br_read_unlock (enum brlock_indices idx)
wmb();
(*ctr)--;
preempt_enable();
}
#endif /* __BRLOCK_USE_ATOMICS */
......
......@@ -514,9 +514,10 @@ static inline void __netif_schedule(struct net_device *dev)
{
if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
unsigned long flags;
int cpu = smp_processor_id();
int cpu;
local_irq_save(flags);
cpu = smp_processor_id();
dev->next_sched = softnet_data[cpu].output_queue;
softnet_data[cpu].output_queue = dev;
cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
......@@ -563,10 +564,11 @@ static inline int netif_running(struct net_device *dev)
static inline void dev_kfree_skb_irq(struct sk_buff *skb)
{
if (atomic_dec_and_test(&skb->users)) {
int cpu =smp_processor_id();
int cpu;
unsigned long flags;
local_irq_save(flags);
cpu = smp_processor_id();
skb->next = softnet_data[cpu].completion_queue;
softnet_data[cpu].completion_queue = skb;
cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
......@@ -726,9 +728,10 @@ static inline int netif_rx_schedule_prep(struct net_device *dev)
static inline void __netif_rx_schedule(struct net_device *dev)
{
unsigned long flags;
int cpu = smp_processor_id();
int cpu;
local_irq_save(flags);
cpu = smp_processor_id();
dev_hold(dev);
list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list);
if (dev->quota < 0)
......@@ -754,11 +757,12 @@ static inline int netif_rx_reschedule(struct net_device *dev, int undo)
{
if (netif_rx_schedule_prep(dev)) {
unsigned long flags;
int cpu = smp_processor_id();
int cpu;
dev->quota += undo;
local_irq_save(flags);
cpu = smp_processor_id();
list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list);
__cpu_raise_softirq(cpu, NET_RX_SOFTIRQ);
local_irq_restore(flags);
......
......@@ -86,9 +86,9 @@ extern void get_page_state(struct page_state *ret);
#define mod_page_state(member, delta) \
do { \
preempt_disable(); \
page_states[smp_processor_id()].member += (delta); \
preempt_enable(); \
int cpu = get_cpu(); \
page_states[cpu].member += (delta); \
put_cpu(); \
} while (0)
#define inc_page_state(member) mod_page_state(member, 1UL)
......
......@@ -626,7 +626,7 @@ NORET_TYPE void do_exit(long code)
tsk->flags |= PF_EXITING;
del_timer_sync(&tsk->real_timer);
if (unlikely(preempt_count()))
if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
current->comm, current->pid,
preempt_count());
......
......@@ -472,6 +472,7 @@ void check_highmem_ptes(void)
{
int idx, type;
preempt_disable();
for (type = 0; type < KM_TYPE_NR; type++) {
idx = type + KM_TYPE_NR*smp_processor_id();
if (!pte_none(*(kmap_pte-idx))) {
......@@ -479,6 +480,7 @@ void check_highmem_ptes(void)
BUG();
}
}
preempt_enable();
}
#endif
......@@ -193,6 +193,11 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
if (error)
goto fail;
}
/*
* Unless it returns an error, this function always sets *pprev to
* the first vma for which vma->vm_end >= end.
*/
*pprev = vma;
if (end != vma->vm_end) {
error = split_vma(mm, vma, end, 0);
......
......@@ -1357,11 +1357,7 @@ void* kmem_cache_alloc_batch(kmem_cache_t* cachep, int flags)
cc_entry(cc)[cc->avail++] =
kmem_cache_alloc_one_tail(cachep, slabp);
}
/*
* CAREFUL: do not enable preemption yet, the per-CPU
* entries rely on us being atomic.
*/
_raw_spin_unlock(&cachep->spinlock);
spin_unlock(&cachep->spinlock);
if (cc->avail)
return cc_entry(cc)[--cc->avail];
......@@ -1389,8 +1385,6 @@ static inline void * __kmem_cache_alloc (kmem_cache_t *cachep, int flags)
STATS_INC_ALLOCMISS(cachep);
objp = kmem_cache_alloc_batch(cachep,flags);
local_irq_restore(save_flags);
/* end of non-preemptible region */
preempt_enable();
if (!objp)
goto alloc_new_slab_nolock;
return objp;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment