Commit e33ab8f2 authored by Linus Torvalds's avatar Linus Torvalds

Merge branches 'stable/irq', 'stable/p2m.bugfixes', 'stable/e820.bugfixes' and...

Merge branches 'stable/irq', 'stable/p2m.bugfixes', 'stable/e820.bugfixes' and 'stable/mmu.bugfixes' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen

* 'stable/irq' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
  xen: do not clear and mask evtchns in __xen_evtchn_do_upcall

* 'stable/p2m.bugfixes' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
  xen/p2m: Create entries in the P2M_MFN trees's to track 1-1 mappings

* 'stable/e820.bugfixes' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
  xen/setup: Fix for incorrect xen_extra_mem_start initialization under 32-bit
  xen/setup: Ignore E820_UNUSABLE when setting 1-1 mappings.

* 'stable/mmu.bugfixes' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
  xen mmu: fix a race window causing leave_mm BUG()
...@@ -1187,7 +1187,7 @@ static void drop_other_mm_ref(void *info) ...@@ -1187,7 +1187,7 @@ static void drop_other_mm_ref(void *info)
active_mm = percpu_read(cpu_tlbstate.active_mm); active_mm = percpu_read(cpu_tlbstate.active_mm);
if (active_mm == mm) if (active_mm == mm && percpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
leave_mm(smp_processor_id()); leave_mm(smp_processor_id());
/* If this cpu still has a stale cr3 reference, then make sure /* If this cpu still has a stale cr3 reference, then make sure
......
...@@ -522,11 +522,20 @@ static bool __init __early_alloc_p2m(unsigned long pfn) ...@@ -522,11 +522,20 @@ static bool __init __early_alloc_p2m(unsigned long pfn)
/* Boundary cross-over for the edges: */ /* Boundary cross-over for the edges: */
if (idx) { if (idx) {
unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
unsigned long *mid_mfn_p;
p2m_init(p2m); p2m_init(p2m);
p2m_top[topidx][mididx] = p2m; p2m_top[topidx][mididx] = p2m;
/* For save/restore we need to MFN of the P2M saved */
mid_mfn_p = p2m_top_mfn_p[topidx];
WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
"P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
topidx, mididx);
mid_mfn_p[mididx] = virt_to_mfn(p2m);
} }
return idx != 0; return idx != 0;
} }
...@@ -549,12 +558,29 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s, ...@@ -549,12 +558,29 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s,
pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE)
{ {
unsigned topidx = p2m_top_index(pfn); unsigned topidx = p2m_top_index(pfn);
if (p2m_top[topidx] == p2m_mid_missing) { unsigned long *mid_mfn_p;
unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); unsigned long **mid;
mid = p2m_top[topidx];
mid_mfn_p = p2m_top_mfn_p[topidx];
if (mid == p2m_mid_missing) {
mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
p2m_mid_init(mid); p2m_mid_init(mid);
p2m_top[topidx] = mid; p2m_top[topidx] = mid;
BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
}
/* And the save/restore P2M tables.. */
if (mid_mfn_p == p2m_mid_missing_mfn) {
mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
p2m_mid_mfn_init(mid_mfn_p);
p2m_top_mfn_p[topidx] = mid_mfn_p;
p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
/* Note: we don't set mid_mfn_p[midix] here,
* look in __early_alloc_p2m */
} }
} }
......
...@@ -166,7 +166,7 @@ static unsigned long __init xen_set_identity(const struct e820entry *list, ...@@ -166,7 +166,7 @@ static unsigned long __init xen_set_identity(const struct e820entry *list,
if (last > end) if (last > end)
continue; continue;
if (entry->type == E820_RAM) { if ((entry->type == E820_RAM) || (entry->type == E820_UNUSABLE)) {
if (start > start_pci) if (start > start_pci)
identity += set_phys_range_identity( identity += set_phys_range_identity(
PFN_UP(start_pci), PFN_DOWN(start)); PFN_UP(start_pci), PFN_DOWN(start));
...@@ -227,7 +227,11 @@ char * __init xen_memory_setup(void) ...@@ -227,7 +227,11 @@ char * __init xen_memory_setup(void)
memcpy(map_raw, map, sizeof(map)); memcpy(map_raw, map, sizeof(map));
e820.nr_map = 0; e820.nr_map = 0;
#ifdef CONFIG_X86_32
xen_extra_mem_start = mem_end;
#else
xen_extra_mem_start = max((1ULL << 32), mem_end); xen_extra_mem_start = max((1ULL << 32), mem_end);
#endif
for (i = 0; i < memmap.nr_entries; i++) { for (i = 0; i < memmap.nr_entries; i++) {
unsigned long long end; unsigned long long end;
......
...@@ -119,6 +119,8 @@ static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG], ...@@ -119,6 +119,8 @@ static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
static struct irq_chip xen_dynamic_chip; static struct irq_chip xen_dynamic_chip;
static struct irq_chip xen_percpu_chip; static struct irq_chip xen_percpu_chip;
static struct irq_chip xen_pirq_chip; static struct irq_chip xen_pirq_chip;
static void enable_dynirq(struct irq_data *data);
static void disable_dynirq(struct irq_data *data);
/* Get info for IRQ */ /* Get info for IRQ */
static struct irq_info *info_for_irq(unsigned irq) static struct irq_info *info_for_irq(unsigned irq)
...@@ -476,16 +478,6 @@ static void xen_free_irq(unsigned irq) ...@@ -476,16 +478,6 @@ static void xen_free_irq(unsigned irq)
irq_free_desc(irq); irq_free_desc(irq);
} }
static void pirq_unmask_notify(int irq)
{
struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) };
if (unlikely(pirq_needs_eoi(irq))) {
int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
WARN_ON(rc);
}
}
static void pirq_query_unmask(int irq) static void pirq_query_unmask(int irq)
{ {
struct physdev_irq_status_query irq_status; struct physdev_irq_status_query irq_status;
...@@ -509,6 +501,29 @@ static bool probing_irq(int irq) ...@@ -509,6 +501,29 @@ static bool probing_irq(int irq)
return desc && desc->action == NULL; return desc && desc->action == NULL;
} }
static void eoi_pirq(struct irq_data *data)
{
int evtchn = evtchn_from_irq(data->irq);
struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
int rc = 0;
irq_move_irq(data);
if (VALID_EVTCHN(evtchn))
clear_evtchn(evtchn);
if (pirq_needs_eoi(data->irq)) {
rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
WARN_ON(rc);
}
}
static void mask_ack_pirq(struct irq_data *data)
{
disable_dynirq(data);
eoi_pirq(data);
}
static unsigned int __startup_pirq(unsigned int irq) static unsigned int __startup_pirq(unsigned int irq)
{ {
struct evtchn_bind_pirq bind_pirq; struct evtchn_bind_pirq bind_pirq;
...@@ -542,7 +557,7 @@ static unsigned int __startup_pirq(unsigned int irq) ...@@ -542,7 +557,7 @@ static unsigned int __startup_pirq(unsigned int irq)
out: out:
unmask_evtchn(evtchn); unmask_evtchn(evtchn);
pirq_unmask_notify(irq); eoi_pirq(irq_get_irq_data(irq));
return 0; return 0;
} }
...@@ -582,18 +597,7 @@ static void enable_pirq(struct irq_data *data) ...@@ -582,18 +597,7 @@ static void enable_pirq(struct irq_data *data)
static void disable_pirq(struct irq_data *data) static void disable_pirq(struct irq_data *data)
{ {
} disable_dynirq(data);
static void ack_pirq(struct irq_data *data)
{
int evtchn = evtchn_from_irq(data->irq);
irq_move_irq(data);
if (VALID_EVTCHN(evtchn)) {
mask_evtchn(evtchn);
clear_evtchn(evtchn);
}
} }
static int find_irq_by_gsi(unsigned gsi) static int find_irq_by_gsi(unsigned gsi)
...@@ -642,9 +646,6 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, ...@@ -642,9 +646,6 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
if (irq < 0) if (irq < 0)
goto out; goto out;
irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq,
name);
irq_op.irq = irq; irq_op.irq = irq;
irq_op.vector = 0; irq_op.vector = 0;
...@@ -661,6 +662,32 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, ...@@ -661,6 +662,32 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF, xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF,
shareable ? PIRQ_SHAREABLE : 0); shareable ? PIRQ_SHAREABLE : 0);
pirq_query_unmask(irq);
/* We try to use the handler with the appropriate semantic for the
* type of interrupt: if the interrupt doesn't need an eoi
* (pirq_needs_eoi returns false), we treat it like an edge
* triggered interrupt so we use handle_edge_irq.
* As a matter of fact this only happens when the corresponding
* physical interrupt is edge triggered or an msi.
*
* On the other hand if the interrupt needs an eoi (pirq_needs_eoi
* returns true) we treat it like a level triggered interrupt so we
* use handle_fasteoi_irq like the native code does for this kind of
* interrupts.
* Depending on the Xen version, pirq_needs_eoi might return true
* not only for level triggered interrupts but for edge triggered
* interrupts too. In any case Xen always honors the eoi mechanism,
* not injecting any more pirqs of the same kind if the first one
* hasn't received an eoi yet. Therefore using the fasteoi handler
* is the right choice either way.
*/
if (pirq_needs_eoi(irq))
irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
handle_fasteoi_irq, name);
else
irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
handle_edge_irq, name);
out: out:
spin_unlock(&irq_mapping_update_lock); spin_unlock(&irq_mapping_update_lock);
...@@ -694,7 +721,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, ...@@ -694,7 +721,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
if (irq == -1) if (irq == -1)
goto out; goto out;
irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
name); name);
xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0); xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0);
...@@ -790,7 +817,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) ...@@ -790,7 +817,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
goto out; goto out;
irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
handle_fasteoi_irq, "event"); handle_edge_irq, "event");
xen_irq_info_evtchn_init(irq, evtchn); xen_irq_info_evtchn_init(irq, evtchn);
} }
...@@ -1196,9 +1223,6 @@ static void __xen_evtchn_do_upcall(void) ...@@ -1196,9 +1223,6 @@ static void __xen_evtchn_do_upcall(void)
port = (word_idx * BITS_PER_LONG) + bit_idx; port = (word_idx * BITS_PER_LONG) + bit_idx;
irq = evtchn_to_irq[port]; irq = evtchn_to_irq[port];
mask_evtchn(port);
clear_evtchn(port);
if (irq != -1) { if (irq != -1) {
desc = irq_to_desc(irq); desc = irq_to_desc(irq);
if (desc) if (desc)
...@@ -1354,10 +1378,16 @@ static void ack_dynirq(struct irq_data *data) ...@@ -1354,10 +1378,16 @@ static void ack_dynirq(struct irq_data *data)
{ {
int evtchn = evtchn_from_irq(data->irq); int evtchn = evtchn_from_irq(data->irq);
irq_move_masked_irq(data); irq_move_irq(data);
if (VALID_EVTCHN(evtchn)) if (VALID_EVTCHN(evtchn))
unmask_evtchn(evtchn); clear_evtchn(evtchn);
}
static void mask_ack_dynirq(struct irq_data *data)
{
disable_dynirq(data);
ack_dynirq(data);
} }
static int retrigger_dynirq(struct irq_data *data) static int retrigger_dynirq(struct irq_data *data)
...@@ -1564,7 +1594,9 @@ static struct irq_chip xen_dynamic_chip __read_mostly = { ...@@ -1564,7 +1594,9 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
.irq_mask = disable_dynirq, .irq_mask = disable_dynirq,
.irq_unmask = enable_dynirq, .irq_unmask = enable_dynirq,
.irq_eoi = ack_dynirq, .irq_ack = ack_dynirq,
.irq_mask_ack = mask_ack_dynirq,
.irq_set_affinity = set_affinity_irq, .irq_set_affinity = set_affinity_irq,
.irq_retrigger = retrigger_dynirq, .irq_retrigger = retrigger_dynirq,
}; };
...@@ -1574,14 +1606,15 @@ static struct irq_chip xen_pirq_chip __read_mostly = { ...@@ -1574,14 +1606,15 @@ static struct irq_chip xen_pirq_chip __read_mostly = {
.irq_startup = startup_pirq, .irq_startup = startup_pirq,
.irq_shutdown = shutdown_pirq, .irq_shutdown = shutdown_pirq,
.irq_enable = enable_pirq, .irq_enable = enable_pirq,
.irq_unmask = enable_pirq,
.irq_disable = disable_pirq, .irq_disable = disable_pirq,
.irq_mask = disable_pirq,
.irq_ack = ack_pirq, .irq_mask = disable_dynirq,
.irq_unmask = enable_dynirq,
.irq_ack = eoi_pirq,
.irq_eoi = eoi_pirq,
.irq_mask_ack = mask_ack_pirq,
.irq_set_affinity = set_affinity_irq, .irq_set_affinity = set_affinity_irq,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment