Commit 1ddc6dd8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-4.2-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen bug fixes from David Vrabel:

 - don't lose interrupts when offlining CPUs

 - fix gntdev oops during unmap

 - drop the balloon lock occasionally to allow domain create/destroy

* tag 'for-linus-4.2-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/events/fifo: Handle linked events when closing a port
  xen: release lock occasionally during ballooning
  xen/gntdevt: Fix race condition in gntdev_release()
parents ed8bbba0 fcdf31a7
...@@ -472,7 +472,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) ...@@ -472,7 +472,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
} }
/* /*
* We avoid multiple worker processes conflicting via the balloon mutex. * As this is a work item it is guaranteed to run as a single instance only.
* We may of course race updates of the target counts (which are protected * We may of course race updates of the target counts (which are protected
* by the balloon lock), or with changes to the Xen hard limit, but we will * by the balloon lock), or with changes to the Xen hard limit, but we will
* recover from these in time. * recover from these in time.
...@@ -482,9 +482,10 @@ static void balloon_process(struct work_struct *work) ...@@ -482,9 +482,10 @@ static void balloon_process(struct work_struct *work)
enum bp_state state = BP_DONE; enum bp_state state = BP_DONE;
long credit; long credit;
mutex_lock(&balloon_mutex);
do { do {
mutex_lock(&balloon_mutex);
credit = current_credit(); credit = current_credit();
if (credit > 0) { if (credit > 0) {
...@@ -499,17 +500,15 @@ static void balloon_process(struct work_struct *work) ...@@ -499,17 +500,15 @@ static void balloon_process(struct work_struct *work)
state = update_schedule(state); state = update_schedule(state);
#ifndef CONFIG_PREEMPT mutex_unlock(&balloon_mutex);
if (need_resched())
schedule(); cond_resched();
#endif
} while (credit && state == BP_DONE); } while (credit && state == BP_DONE);
/* Schedule more work if there is some still to be done. */ /* Schedule more work if there is some still to be done. */
if (state == BP_EAGAIN) if (state == BP_EAGAIN)
schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ); schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
mutex_unlock(&balloon_mutex);
} }
/* Resets the Xen limit, sets new target, and kicks off processing. */ /* Resets the Xen limit, sets new target, and kicks off processing. */
......
...@@ -452,10 +452,12 @@ static void xen_free_irq(unsigned irq) ...@@ -452,10 +452,12 @@ static void xen_free_irq(unsigned irq)
irq_free_desc(irq); irq_free_desc(irq);
} }
static void xen_evtchn_close(unsigned int port) static void xen_evtchn_close(unsigned int port, unsigned int cpu)
{ {
struct evtchn_close close; struct evtchn_close close;
xen_evtchn_op_close(port, cpu);
close.port = port; close.port = port;
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
BUG(); BUG();
...@@ -544,7 +546,7 @@ static unsigned int __startup_pirq(unsigned int irq) ...@@ -544,7 +546,7 @@ static unsigned int __startup_pirq(unsigned int irq)
err: err:
pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
xen_evtchn_close(evtchn); xen_evtchn_close(evtchn, NR_CPUS);
return 0; return 0;
} }
...@@ -565,7 +567,7 @@ static void shutdown_pirq(struct irq_data *data) ...@@ -565,7 +567,7 @@ static void shutdown_pirq(struct irq_data *data)
return; return;
mask_evtchn(evtchn); mask_evtchn(evtchn);
xen_evtchn_close(evtchn); xen_evtchn_close(evtchn, cpu_from_evtchn(evtchn));
xen_irq_info_cleanup(info); xen_irq_info_cleanup(info);
} }
...@@ -609,7 +611,7 @@ static void __unbind_from_irq(unsigned int irq) ...@@ -609,7 +611,7 @@ static void __unbind_from_irq(unsigned int irq)
if (VALID_EVTCHN(evtchn)) { if (VALID_EVTCHN(evtchn)) {
unsigned int cpu = cpu_from_irq(irq); unsigned int cpu = cpu_from_irq(irq);
xen_evtchn_close(evtchn); xen_evtchn_close(evtchn, cpu);
switch (type_from_irq(irq)) { switch (type_from_irq(irq)) {
case IRQT_VIRQ: case IRQT_VIRQ:
......
...@@ -255,6 +255,12 @@ static void evtchn_fifo_unmask(unsigned port) ...@@ -255,6 +255,12 @@ static void evtchn_fifo_unmask(unsigned port)
} }
} }
static bool evtchn_fifo_is_linked(unsigned port)
{
event_word_t *word = event_word_from_port(port);
return sync_test_bit(EVTCHN_FIFO_BIT(LINKED, word), BM(word));
}
static uint32_t clear_linked(volatile event_word_t *word) static uint32_t clear_linked(volatile event_word_t *word)
{ {
event_word_t new, old, w; event_word_t new, old, w;
...@@ -281,7 +287,8 @@ static void handle_irq_for_port(unsigned port) ...@@ -281,7 +287,8 @@ static void handle_irq_for_port(unsigned port)
static void consume_one_event(unsigned cpu, static void consume_one_event(unsigned cpu,
struct evtchn_fifo_control_block *control_block, struct evtchn_fifo_control_block *control_block,
unsigned priority, unsigned long *ready) unsigned priority, unsigned long *ready,
bool drop)
{ {
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
uint32_t head; uint32_t head;
...@@ -313,13 +320,15 @@ static void consume_one_event(unsigned cpu, ...@@ -313,13 +320,15 @@ static void consume_one_event(unsigned cpu,
if (head == 0) if (head == 0)
clear_bit(priority, ready); clear_bit(priority, ready);
if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
handle_irq_for_port(port); if (likely(!drop))
handle_irq_for_port(port);
}
q->head[priority] = head; q->head[priority] = head;
} }
static void evtchn_fifo_handle_events(unsigned cpu) static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
{ {
struct evtchn_fifo_control_block *control_block; struct evtchn_fifo_control_block *control_block;
unsigned long ready; unsigned long ready;
...@@ -331,11 +340,16 @@ static void evtchn_fifo_handle_events(unsigned cpu) ...@@ -331,11 +340,16 @@ static void evtchn_fifo_handle_events(unsigned cpu)
while (ready) { while (ready) {
q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES); q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
consume_one_event(cpu, control_block, q, &ready); consume_one_event(cpu, control_block, q, &ready, drop);
ready |= xchg(&control_block->ready, 0); ready |= xchg(&control_block->ready, 0);
} }
} }
static void evtchn_fifo_handle_events(unsigned cpu)
{
__evtchn_fifo_handle_events(cpu, false);
}
static void evtchn_fifo_resume(void) static void evtchn_fifo_resume(void)
{ {
unsigned cpu; unsigned cpu;
...@@ -371,6 +385,26 @@ static void evtchn_fifo_resume(void) ...@@ -371,6 +385,26 @@ static void evtchn_fifo_resume(void)
event_array_pages = 0; event_array_pages = 0;
} }
static void evtchn_fifo_close(unsigned port, unsigned int cpu)
{
if (cpu == NR_CPUS)
return;
get_online_cpus();
if (cpu_online(cpu)) {
if (WARN_ON(irqs_disabled()))
goto out;
while (evtchn_fifo_is_linked(port))
cpu_relax();
} else {
__evtchn_fifo_handle_events(cpu, true);
}
out:
put_online_cpus();
}
static const struct evtchn_ops evtchn_ops_fifo = { static const struct evtchn_ops evtchn_ops_fifo = {
.max_channels = evtchn_fifo_max_channels, .max_channels = evtchn_fifo_max_channels,
.nr_channels = evtchn_fifo_nr_channels, .nr_channels = evtchn_fifo_nr_channels,
...@@ -384,6 +418,7 @@ static const struct evtchn_ops evtchn_ops_fifo = { ...@@ -384,6 +418,7 @@ static const struct evtchn_ops evtchn_ops_fifo = {
.unmask = evtchn_fifo_unmask, .unmask = evtchn_fifo_unmask,
.handle_events = evtchn_fifo_handle_events, .handle_events = evtchn_fifo_handle_events,
.resume = evtchn_fifo_resume, .resume = evtchn_fifo_resume,
.close = evtchn_fifo_close,
}; };
static int evtchn_fifo_alloc_control_block(unsigned cpu) static int evtchn_fifo_alloc_control_block(unsigned cpu)
......
...@@ -68,6 +68,7 @@ struct evtchn_ops { ...@@ -68,6 +68,7 @@ struct evtchn_ops {
bool (*test_and_set_mask)(unsigned port); bool (*test_and_set_mask)(unsigned port);
void (*mask)(unsigned port); void (*mask)(unsigned port);
void (*unmask)(unsigned port); void (*unmask)(unsigned port);
void (*close)(unsigned port, unsigned cpu);
void (*handle_events)(unsigned cpu); void (*handle_events)(unsigned cpu);
void (*resume)(void); void (*resume)(void);
...@@ -145,6 +146,12 @@ static inline void xen_evtchn_resume(void) ...@@ -145,6 +146,12 @@ static inline void xen_evtchn_resume(void)
evtchn_ops->resume(); evtchn_ops->resume();
} }
static inline void xen_evtchn_op_close(unsigned port, unsigned cpu)
{
if (evtchn_ops->close)
return evtchn_ops->close(port, cpu);
}
void xen_evtchn_2l_init(void); void xen_evtchn_2l_init(void);
int xen_evtchn_fifo_init(void); int xen_evtchn_fifo_init(void);
......
...@@ -568,12 +568,14 @@ static int gntdev_release(struct inode *inode, struct file *flip) ...@@ -568,12 +568,14 @@ static int gntdev_release(struct inode *inode, struct file *flip)
pr_debug("priv %p\n", priv); pr_debug("priv %p\n", priv);
mutex_lock(&priv->lock);
while (!list_empty(&priv->maps)) { while (!list_empty(&priv->maps)) {
map = list_entry(priv->maps.next, struct grant_map, next); map = list_entry(priv->maps.next, struct grant_map, next);
list_del(&map->next); list_del(&map->next);
gntdev_put_map(NULL /* already removed */, map); gntdev_put_map(NULL /* already removed */, map);
} }
WARN_ON(!list_empty(&priv->freeable_maps)); WARN_ON(!list_empty(&priv->freeable_maps));
mutex_unlock(&priv->lock);
if (use_ptemod) if (use_ptemod)
mmu_notifier_unregister(&priv->mn, priv->mm); mmu_notifier_unregister(&priv->mn, priv->mm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment