Commit 6eb1acd9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-6.7a-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:

 - A fix in the Xen events driver avoiding the use of RCU after
   the call to rcu_report_dead() when taking a cpu down

 - A fix for running as Xen dom0 to line up ACPI's idea of power
   management capabilities with the one of Xen

 - A cleanup eliminating several kernel-doc warnings in Xen related
   code

 - A cleanup series of the Xen events driver

* tag 'for-linus-6.7a-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/events: remove some info_for_irq() calls in pirq handling
  xen/events: modify internal [un]bind interfaces
  xen/events: drop xen_allocate_irqs_dynamic()
  xen/events: remove some simple helpers from events_base.c
  xen/events: reduce externally visible helper functions
  xen/events: remove unused functions
  xen/events: fix delayed eoi list handling
  xen/shbuf: eliminate 17 kernel-doc warnings
  acpi/processor: sanitize _OSC/_PDC capabilities for Xen dom0
  xen/events: avoid using info_for_irq() in xen_send_IPI_one()
parents 372bed5f cee96422
...@@ -16,6 +16,9 @@ ...@@ -16,6 +16,9 @@
#include <asm/x86_init.h> #include <asm/x86_init.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/irq_vectors.h> #include <asm/irq_vectors.h>
#include <asm/xen/hypervisor.h>
#include <xen/xen.h>
#ifdef CONFIG_ACPI_APEI #ifdef CONFIG_ACPI_APEI
# include <asm/pgtable_types.h> # include <asm/pgtable_types.h>
...@@ -127,6 +130,17 @@ static inline void arch_acpi_set_proc_cap_bits(u32 *cap) ...@@ -127,6 +130,17 @@ static inline void arch_acpi_set_proc_cap_bits(u32 *cap)
if (!cpu_has(c, X86_FEATURE_MWAIT) || if (!cpu_has(c, X86_FEATURE_MWAIT) ||
boot_option_idle_override == IDLE_NOMWAIT) boot_option_idle_override == IDLE_NOMWAIT)
*cap &= ~(ACPI_PROC_CAP_C_C1_FFH | ACPI_PROC_CAP_C_C2C3_FFH); *cap &= ~(ACPI_PROC_CAP_C_C1_FFH | ACPI_PROC_CAP_C_C2C3_FFH);
if (xen_initial_domain()) {
/*
* When Linux is running as Xen dom0, the hypervisor is the
* entity in charge of the processor power management, and so
* Xen needs to check the OS capabilities reported in the
* processor capabilities buffer matches what the hypervisor
* driver supports.
*/
xen_sanitize_proc_cap_bits(cap);
}
} }
static inline bool acpi_has_cpu_in_madt(void) static inline bool acpi_has_cpu_in_madt(void)
......
...@@ -100,4 +100,13 @@ static inline void leave_lazy(enum xen_lazy_mode mode) ...@@ -100,4 +100,13 @@ static inline void leave_lazy(enum xen_lazy_mode mode)
enum xen_lazy_mode xen_get_lazy_mode(void); enum xen_lazy_mode xen_get_lazy_mode(void);
#if defined(CONFIG_XEN_DOM0) && defined(CONFIG_ACPI)
void xen_sanitize_proc_cap_bits(uint32_t *buf);
#else
static inline void xen_sanitize_proc_cap_bits(uint32_t *buf)
{
BUG();
}
#endif
#endif /* _ASM_X86_XEN_HYPERVISOR_H */ #endif /* _ASM_X86_XEN_HYPERVISOR_H */
...@@ -171,11 +171,11 @@ static void evtchn_2l_handle_events(unsigned cpu, struct evtchn_loop_ctrl *ctrl) ...@@ -171,11 +171,11 @@ static void evtchn_2l_handle_events(unsigned cpu, struct evtchn_loop_ctrl *ctrl)
int i; int i;
struct shared_info *s = HYPERVISOR_shared_info; struct shared_info *s = HYPERVISOR_shared_info;
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
evtchn_port_t evtchn;
/* Timer interrupt has highest priority. */ /* Timer interrupt has highest priority. */
irq = irq_from_virq(cpu, VIRQ_TIMER); irq = irq_evtchn_from_virq(cpu, VIRQ_TIMER, &evtchn);
if (irq != -1) { if (irq != -1) {
evtchn_port_t evtchn = evtchn_from_irq(irq);
word_idx = evtchn / BITS_PER_LONG; word_idx = evtchn / BITS_PER_LONG;
bit_idx = evtchn % BITS_PER_LONG; bit_idx = evtchn % BITS_PER_LONG;
if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx)) if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx))
...@@ -328,9 +328,9 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) ...@@ -328,9 +328,9 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
for (i = 0; i < EVTCHN_2L_NR_CHANNELS; i++) { for (i = 0; i < EVTCHN_2L_NR_CHANNELS; i++) {
if (sync_test_bit(i, BM(sh->evtchn_pending))) { if (sync_test_bit(i, BM(sh->evtchn_pending))) {
int word_idx = i / BITS_PER_EVTCHN_WORD; int word_idx = i / BITS_PER_EVTCHN_WORD;
printk(" %d: event %d -> irq %d%s%s%s\n", printk(" %d: event %d -> irq %u%s%s%s\n",
cpu_from_evtchn(i), i, cpu_from_evtchn(i), i,
get_evtchn_to_irq(i), irq_from_evtchn(i),
sync_test_bit(word_idx, BM(&v->evtchn_pending_sel)) sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
? "" : " l2-clear", ? "" : " l2-clear",
!sync_test_bit(i, BM(sh->evtchn_mask)) !sync_test_bit(i, BM(sh->evtchn_mask))
......
...@@ -164,6 +164,8 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; ...@@ -164,6 +164,8 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
/* IRQ <-> IPI mapping */ /* IRQ <-> IPI mapping */
static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
/* Cache for IPI event channels - needed for hot cpu unplug (avoid RCU usage). */
static DEFINE_PER_CPU(evtchn_port_t [XEN_NR_IPIS], ipi_to_evtchn) = {[0 ... XEN_NR_IPIS-1] = 0};
/* Event channel distribution data */ /* Event channel distribution data */
static atomic_t channels_on_cpu[NR_CPUS]; static atomic_t channels_on_cpu[NR_CPUS];
...@@ -172,7 +174,7 @@ static int **evtchn_to_irq; ...@@ -172,7 +174,7 @@ static int **evtchn_to_irq;
#ifdef CONFIG_X86 #ifdef CONFIG_X86
static unsigned long *pirq_eoi_map; static unsigned long *pirq_eoi_map;
#endif #endif
static bool (*pirq_needs_eoi)(unsigned irq); static bool (*pirq_needs_eoi)(struct irq_info *info);
#define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq))) #define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
#define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq))) #define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
...@@ -188,7 +190,6 @@ static struct irq_chip xen_lateeoi_chip; ...@@ -188,7 +190,6 @@ static struct irq_chip xen_lateeoi_chip;
static struct irq_chip xen_percpu_chip; static struct irq_chip xen_percpu_chip;
static struct irq_chip xen_pirq_chip; static struct irq_chip xen_pirq_chip;
static void enable_dynirq(struct irq_data *data); static void enable_dynirq(struct irq_data *data);
static void disable_dynirq(struct irq_data *data);
static DEFINE_PER_CPU(unsigned int, irq_epoch); static DEFINE_PER_CPU(unsigned int, irq_epoch);
...@@ -246,15 +247,6 @@ static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq) ...@@ -246,15 +247,6 @@ static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
return 0; return 0;
} }
int get_evtchn_to_irq(evtchn_port_t evtchn)
{
if (evtchn >= xen_evtchn_max_channels())
return -1;
if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
return -1;
return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
}
/* Get info for IRQ */ /* Get info for IRQ */
static struct irq_info *info_for_irq(unsigned irq) static struct irq_info *info_for_irq(unsigned irq)
{ {
...@@ -272,6 +264,19 @@ static void set_info_for_irq(unsigned int irq, struct irq_info *info) ...@@ -272,6 +264,19 @@ static void set_info_for_irq(unsigned int irq, struct irq_info *info)
irq_set_chip_data(irq, info); irq_set_chip_data(irq, info);
} }
static struct irq_info *evtchn_to_info(evtchn_port_t evtchn)
{
int irq;
if (evtchn >= xen_evtchn_max_channels())
return NULL;
if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
return NULL;
irq = READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
return (irq < 0) ? NULL : info_for_irq(irq);
}
/* Per CPU channel accounting */ /* Per CPU channel accounting */
static void channels_on_cpu_dec(struct irq_info *info) static void channels_on_cpu_dec(struct irq_info *info)
{ {
...@@ -298,6 +303,13 @@ static void channels_on_cpu_inc(struct irq_info *info) ...@@ -298,6 +303,13 @@ static void channels_on_cpu_inc(struct irq_info *info)
info->is_accounted = 1; info->is_accounted = 1;
} }
static void xen_irq_free_desc(unsigned int irq)
{
/* Legacy IRQ descriptors are managed by the arch. */
if (irq >= nr_legacy_irqs())
irq_free_desc(irq);
}
static void delayed_free_irq(struct work_struct *work) static void delayed_free_irq(struct work_struct *work)
{ {
struct irq_info *info = container_of(to_rcu_work(work), struct irq_info, struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
...@@ -309,14 +321,11 @@ static void delayed_free_irq(struct work_struct *work) ...@@ -309,14 +321,11 @@ static void delayed_free_irq(struct work_struct *work)
kfree(info); kfree(info);
/* Legacy IRQ descriptors are managed by the arch. */ xen_irq_free_desc(irq);
if (irq >= nr_legacy_irqs())
irq_free_desc(irq);
} }
/* Constructors for packed IRQ information. */ /* Constructors for packed IRQ information. */
static int xen_irq_info_common_setup(struct irq_info *info, static int xen_irq_info_common_setup(struct irq_info *info,
unsigned irq,
enum xen_irq_type type, enum xen_irq_type type,
evtchn_port_t evtchn, evtchn_port_t evtchn,
unsigned short cpu) unsigned short cpu)
...@@ -326,29 +335,27 @@ static int xen_irq_info_common_setup(struct irq_info *info, ...@@ -326,29 +335,27 @@ static int xen_irq_info_common_setup(struct irq_info *info,
BUG_ON(info->type != IRQT_UNBOUND && info->type != type); BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
info->type = type; info->type = type;
info->irq = irq;
info->evtchn = evtchn; info->evtchn = evtchn;
info->cpu = cpu; info->cpu = cpu;
info->mask_reason = EVT_MASK_REASON_EXPLICIT; info->mask_reason = EVT_MASK_REASON_EXPLICIT;
raw_spin_lock_init(&info->lock); raw_spin_lock_init(&info->lock);
ret = set_evtchn_to_irq(evtchn, irq); ret = set_evtchn_to_irq(evtchn, info->irq);
if (ret < 0) if (ret < 0)
return ret; return ret;
irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN); irq_clear_status_flags(info->irq, IRQ_NOREQUEST | IRQ_NOAUTOEN);
return xen_evtchn_port_setup(evtchn); return xen_evtchn_port_setup(evtchn);
} }
static int xen_irq_info_evtchn_setup(unsigned irq, static int xen_irq_info_evtchn_setup(struct irq_info *info,
evtchn_port_t evtchn, evtchn_port_t evtchn,
struct xenbus_device *dev) struct xenbus_device *dev)
{ {
struct irq_info *info = info_for_irq(irq);
int ret; int ret;
ret = xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0); ret = xen_irq_info_common_setup(info, IRQT_EVTCHN, evtchn, 0);
info->u.interdomain = dev; info->u.interdomain = dev;
if (dev) if (dev)
atomic_inc(&dev->event_channels); atomic_inc(&dev->event_channels);
...@@ -356,49 +363,37 @@ static int xen_irq_info_evtchn_setup(unsigned irq, ...@@ -356,49 +363,37 @@ static int xen_irq_info_evtchn_setup(unsigned irq,
return ret; return ret;
} }
static int xen_irq_info_ipi_setup(unsigned cpu, static int xen_irq_info_ipi_setup(struct irq_info *info, unsigned int cpu,
unsigned irq, evtchn_port_t evtchn, enum ipi_vector ipi)
evtchn_port_t evtchn,
enum ipi_vector ipi)
{ {
struct irq_info *info = info_for_irq(irq);
info->u.ipi = ipi; info->u.ipi = ipi;
per_cpu(ipi_to_irq, cpu)[ipi] = irq; per_cpu(ipi_to_irq, cpu)[ipi] = info->irq;
per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0); return xen_irq_info_common_setup(info, IRQT_IPI, evtchn, 0);
} }
static int xen_irq_info_virq_setup(unsigned cpu, static int xen_irq_info_virq_setup(struct irq_info *info, unsigned int cpu,
unsigned irq, evtchn_port_t evtchn, unsigned int virq)
evtchn_port_t evtchn,
unsigned virq)
{ {
struct irq_info *info = info_for_irq(irq);
info->u.virq = virq; info->u.virq = virq;
per_cpu(virq_to_irq, cpu)[virq] = irq; per_cpu(virq_to_irq, cpu)[virq] = info->irq;
return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0); return xen_irq_info_common_setup(info, IRQT_VIRQ, evtchn, 0);
} }
static int xen_irq_info_pirq_setup(unsigned irq, static int xen_irq_info_pirq_setup(struct irq_info *info, evtchn_port_t evtchn,
evtchn_port_t evtchn, unsigned int pirq, unsigned int gsi,
unsigned pirq, uint16_t domid, unsigned char flags)
unsigned gsi,
uint16_t domid,
unsigned char flags)
{ {
struct irq_info *info = info_for_irq(irq);
info->u.pirq.pirq = pirq; info->u.pirq.pirq = pirq;
info->u.pirq.gsi = gsi; info->u.pirq.gsi = gsi;
info->u.pirq.domid = domid; info->u.pirq.domid = domid;
info->u.pirq.flags = flags; info->u.pirq.flags = flags;
return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0); return xen_irq_info_common_setup(info, IRQT_PIRQ, evtchn, 0);
} }
static void xen_irq_info_cleanup(struct irq_info *info) static void xen_irq_info_cleanup(struct irq_info *info)
...@@ -412,7 +407,7 @@ static void xen_irq_info_cleanup(struct irq_info *info) ...@@ -412,7 +407,7 @@ static void xen_irq_info_cleanup(struct irq_info *info)
/* /*
* Accessors for packed IRQ information. * Accessors for packed IRQ information.
*/ */
evtchn_port_t evtchn_from_irq(unsigned irq) static evtchn_port_t evtchn_from_irq(unsigned int irq)
{ {
const struct irq_info *info = NULL; const struct irq_info *info = NULL;
...@@ -426,64 +421,51 @@ evtchn_port_t evtchn_from_irq(unsigned irq) ...@@ -426,64 +421,51 @@ evtchn_port_t evtchn_from_irq(unsigned irq)
unsigned int irq_from_evtchn(evtchn_port_t evtchn) unsigned int irq_from_evtchn(evtchn_port_t evtchn)
{ {
return get_evtchn_to_irq(evtchn); struct irq_info *info = evtchn_to_info(evtchn);
return info ? info->irq : -1;
} }
EXPORT_SYMBOL_GPL(irq_from_evtchn); EXPORT_SYMBOL_GPL(irq_from_evtchn);
int irq_from_virq(unsigned int cpu, unsigned int virq) int irq_evtchn_from_virq(unsigned int cpu, unsigned int virq,
evtchn_port_t *evtchn)
{ {
return per_cpu(virq_to_irq, cpu)[virq]; int irq = per_cpu(virq_to_irq, cpu)[virq];
*evtchn = evtchn_from_irq(irq);
return irq;
} }
static enum ipi_vector ipi_from_irq(unsigned irq) static enum ipi_vector ipi_from_irq(struct irq_info *info)
{ {
struct irq_info *info = info_for_irq(irq);
BUG_ON(info == NULL); BUG_ON(info == NULL);
BUG_ON(info->type != IRQT_IPI); BUG_ON(info->type != IRQT_IPI);
return info->u.ipi; return info->u.ipi;
} }
static unsigned virq_from_irq(unsigned irq) static unsigned int virq_from_irq(struct irq_info *info)
{ {
struct irq_info *info = info_for_irq(irq);
BUG_ON(info == NULL); BUG_ON(info == NULL);
BUG_ON(info->type != IRQT_VIRQ); BUG_ON(info->type != IRQT_VIRQ);
return info->u.virq; return info->u.virq;
} }
static unsigned pirq_from_irq(unsigned irq) static unsigned int pirq_from_irq(struct irq_info *info)
{ {
struct irq_info *info = info_for_irq(irq);
BUG_ON(info == NULL); BUG_ON(info == NULL);
BUG_ON(info->type != IRQT_PIRQ); BUG_ON(info->type != IRQT_PIRQ);
return info->u.pirq.pirq; return info->u.pirq.pirq;
} }
static enum xen_irq_type type_from_irq(unsigned irq)
{
return info_for_irq(irq)->type;
}
static unsigned cpu_from_irq(unsigned irq)
{
return info_for_irq(irq)->cpu;
}
unsigned int cpu_from_evtchn(evtchn_port_t evtchn) unsigned int cpu_from_evtchn(evtchn_port_t evtchn)
{ {
int irq = get_evtchn_to_irq(evtchn); struct irq_info *info = evtchn_to_info(evtchn);
unsigned ret = 0;
if (irq != -1)
ret = cpu_from_irq(irq);
return ret; return info ? info->cpu : 0;
} }
static void do_mask(struct irq_info *info, u8 reason) static void do_mask(struct irq_info *info, u8 reason)
...@@ -515,36 +497,30 @@ static void do_unmask(struct irq_info *info, u8 reason) ...@@ -515,36 +497,30 @@ static void do_unmask(struct irq_info *info, u8 reason)
} }
#ifdef CONFIG_X86 #ifdef CONFIG_X86
static bool pirq_check_eoi_map(unsigned irq) static bool pirq_check_eoi_map(struct irq_info *info)
{ {
return test_bit(pirq_from_irq(irq), pirq_eoi_map); return test_bit(pirq_from_irq(info), pirq_eoi_map);
} }
#endif #endif
static bool pirq_needs_eoi_flag(unsigned irq) static bool pirq_needs_eoi_flag(struct irq_info *info)
{ {
struct irq_info *info = info_for_irq(irq);
BUG_ON(info->type != IRQT_PIRQ); BUG_ON(info->type != IRQT_PIRQ);
return info->u.pirq.flags & PIRQ_NEEDS_EOI; return info->u.pirq.flags & PIRQ_NEEDS_EOI;
} }
static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu, static void bind_evtchn_to_cpu(struct irq_info *info, unsigned int cpu,
bool force_affinity) bool force_affinity)
{ {
int irq = get_evtchn_to_irq(evtchn);
struct irq_info *info = info_for_irq(irq);
BUG_ON(irq == -1);
if (IS_ENABLED(CONFIG_SMP) && force_affinity) { if (IS_ENABLED(CONFIG_SMP) && force_affinity) {
struct irq_data *data = irq_get_irq_data(irq); struct irq_data *data = irq_get_irq_data(info->irq);
irq_data_update_affinity(data, cpumask_of(cpu)); irq_data_update_affinity(data, cpumask_of(cpu));
irq_data_update_effective_affinity(data, cpumask_of(cpu)); irq_data_update_effective_affinity(data, cpumask_of(cpu));
} }
xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu); xen_evtchn_port_bind_to_cpu(info->evtchn, cpu, info->cpu);
channels_on_cpu_dec(info); channels_on_cpu_dec(info);
info->cpu = cpu; info->cpu = cpu;
...@@ -601,7 +577,9 @@ static void lateeoi_list_add(struct irq_info *info) ...@@ -601,7 +577,9 @@ static void lateeoi_list_add(struct irq_info *info)
spin_lock_irqsave(&eoi->eoi_list_lock, flags); spin_lock_irqsave(&eoi->eoi_list_lock, flags);
if (list_empty(&eoi->eoi_list)) { elem = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
eoi_list);
if (!elem || info->eoi_time < elem->eoi_time) {
list_add(&info->eoi_list, &eoi->eoi_list); list_add(&info->eoi_list, &eoi->eoi_list);
mod_delayed_work_on(info->eoi_cpu, system_wq, mod_delayed_work_on(info->eoi_cpu, system_wq,
&eoi->delayed, delay); &eoi->delayed, delay);
...@@ -732,14 +710,13 @@ void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags) ...@@ -732,14 +710,13 @@ void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
} }
EXPORT_SYMBOL_GPL(xen_irq_lateeoi); EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
static void xen_irq_init(unsigned irq) static struct irq_info *xen_irq_init(unsigned int irq)
{ {
struct irq_info *info; struct irq_info *info;
info = kzalloc(sizeof(*info), GFP_KERNEL); info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL) if (info) {
panic("Unable to allocate metadata for IRQ%d\n", irq); info->irq = irq;
info->type = IRQT_UNBOUND; info->type = IRQT_UNBOUND;
info->refcnt = -1; info->refcnt = -1;
INIT_RCU_WORK(&info->rwork, delayed_free_irq); INIT_RCU_WORK(&info->rwork, delayed_free_irq);
...@@ -753,29 +730,29 @@ static void xen_irq_init(unsigned irq) ...@@ -753,29 +730,29 @@ static void xen_irq_init(unsigned irq)
INIT_LIST_HEAD(&info->eoi_list); INIT_LIST_HEAD(&info->eoi_list);
list_add_tail(&info->list, &xen_irq_list_head); list_add_tail(&info->list, &xen_irq_list_head);
}
return info;
} }
static int __must_check xen_allocate_irqs_dynamic(int nvec) static struct irq_info *xen_allocate_irq_dynamic(void)
{ {
int i, irq = irq_alloc_descs(-1, 0, nvec, -1); int irq = irq_alloc_desc_from(0, -1);
struct irq_info *info = NULL;
if (irq >= 0) { if (irq >= 0) {
for (i = 0; i < nvec; i++) info = xen_irq_init(irq);
xen_irq_init(irq + i); if (!info)
xen_irq_free_desc(irq);
} }
return irq; return info;
} }
static inline int __must_check xen_allocate_irq_dynamic(void) static struct irq_info *xen_allocate_irq_gsi(unsigned int gsi)
{
return xen_allocate_irqs_dynamic(1);
}
static int __must_check xen_allocate_irq_gsi(unsigned gsi)
{ {
int irq; int irq;
struct irq_info *info;
/* /*
* A PV guest has no concept of a GSI (since it has no ACPI * A PV guest has no concept of a GSI (since it has no ACPI
...@@ -792,15 +769,15 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi) ...@@ -792,15 +769,15 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
else else
irq = irq_alloc_desc_at(gsi, -1); irq = irq_alloc_desc_at(gsi, -1);
xen_irq_init(irq); info = xen_irq_init(irq);
if (!info)
xen_irq_free_desc(irq);
return irq; return info;
} }
static void xen_free_irq(unsigned irq) static void xen_free_irq(struct irq_info *info)
{ {
struct irq_info *info = info_for_irq(irq);
if (WARN_ON(!info)) if (WARN_ON(!info))
return; return;
...@@ -821,14 +798,11 @@ static void event_handler_exit(struct irq_info *info) ...@@ -821,14 +798,11 @@ static void event_handler_exit(struct irq_info *info)
clear_evtchn(info->evtchn); clear_evtchn(info->evtchn);
} }
static void pirq_query_unmask(int irq) static void pirq_query_unmask(struct irq_info *info)
{ {
struct physdev_irq_status_query irq_status; struct physdev_irq_status_query irq_status;
struct irq_info *info = info_for_irq(irq);
BUG_ON(info->type != IRQT_PIRQ); irq_status.irq = pirq_from_irq(info);
irq_status.irq = pirq_from_irq(irq);
if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
irq_status.flags = 0; irq_status.flags = 0;
...@@ -837,61 +811,81 @@ static void pirq_query_unmask(int irq) ...@@ -837,61 +811,81 @@ static void pirq_query_unmask(int irq)
info->u.pirq.flags |= PIRQ_NEEDS_EOI; info->u.pirq.flags |= PIRQ_NEEDS_EOI;
} }
static void eoi_pirq(struct irq_data *data) static void do_eoi_pirq(struct irq_info *info)
{ {
struct irq_info *info = info_for_irq(data->irq); struct physdev_eoi eoi = { .irq = pirq_from_irq(info) };
evtchn_port_t evtchn = info ? info->evtchn : 0;
struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
int rc = 0; int rc = 0;
if (!VALID_EVTCHN(evtchn)) if (!VALID_EVTCHN(info->evtchn))
return; return;
event_handler_exit(info); event_handler_exit(info);
if (pirq_needs_eoi(data->irq)) { if (pirq_needs_eoi(info)) {
rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
WARN_ON(rc); WARN_ON(rc);
} }
} }
static void eoi_pirq(struct irq_data *data)
{
struct irq_info *info = info_for_irq(data->irq);
do_eoi_pirq(info);
}
static void do_disable_dynirq(struct irq_info *info)
{
if (VALID_EVTCHN(info->evtchn))
do_mask(info, EVT_MASK_REASON_EXPLICIT);
}
static void disable_dynirq(struct irq_data *data)
{
struct irq_info *info = info_for_irq(data->irq);
if (info)
do_disable_dynirq(info);
}
static void mask_ack_pirq(struct irq_data *data) static void mask_ack_pirq(struct irq_data *data)
{ {
disable_dynirq(data); struct irq_info *info = info_for_irq(data->irq);
eoi_pirq(data);
if (info) {
do_disable_dynirq(info);
do_eoi_pirq(info);
}
} }
static unsigned int __startup_pirq(unsigned int irq) static unsigned int __startup_pirq(struct irq_info *info)
{ {
struct evtchn_bind_pirq bind_pirq; struct evtchn_bind_pirq bind_pirq;
struct irq_info *info = info_for_irq(irq); evtchn_port_t evtchn = info->evtchn;
evtchn_port_t evtchn = evtchn_from_irq(irq);
int rc; int rc;
BUG_ON(info->type != IRQT_PIRQ);
if (VALID_EVTCHN(evtchn)) if (VALID_EVTCHN(evtchn))
goto out; goto out;
bind_pirq.pirq = pirq_from_irq(irq); bind_pirq.pirq = pirq_from_irq(info);
/* NB. We are happy to share unless we are probing. */ /* NB. We are happy to share unless we are probing. */
bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ? bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
BIND_PIRQ__WILL_SHARE : 0; BIND_PIRQ__WILL_SHARE : 0;
rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq); rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
if (rc != 0) { if (rc != 0) {
pr_warn("Failed to obtain physical IRQ %d\n", irq); pr_warn("Failed to obtain physical IRQ %d\n", info->irq);
return 0; return 0;
} }
evtchn = bind_pirq.port; evtchn = bind_pirq.port;
pirq_query_unmask(irq); pirq_query_unmask(info);
rc = set_evtchn_to_irq(evtchn, irq); rc = set_evtchn_to_irq(evtchn, info->irq);
if (rc) if (rc)
goto err; goto err;
info->evtchn = evtchn; info->evtchn = evtchn;
bind_evtchn_to_cpu(evtchn, 0, false); bind_evtchn_to_cpu(info, 0, false);
rc = xen_evtchn_port_setup(evtchn); rc = xen_evtchn_port_setup(evtchn);
if (rc) if (rc)
...@@ -900,26 +894,28 @@ static unsigned int __startup_pirq(unsigned int irq) ...@@ -900,26 +894,28 @@ static unsigned int __startup_pirq(unsigned int irq)
out: out:
do_unmask(info, EVT_MASK_REASON_EXPLICIT); do_unmask(info, EVT_MASK_REASON_EXPLICIT);
eoi_pirq(irq_get_irq_data(irq)); do_eoi_pirq(info);
return 0; return 0;
err: err:
pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); pr_err("irq%d: Failed to set port to irq mapping (%d)\n", info->irq,
rc);
xen_evtchn_close(evtchn); xen_evtchn_close(evtchn);
return 0; return 0;
} }
static unsigned int startup_pirq(struct irq_data *data) static unsigned int startup_pirq(struct irq_data *data)
{ {
return __startup_pirq(data->irq); struct irq_info *info = info_for_irq(data->irq);
return __startup_pirq(info);
} }
static void shutdown_pirq(struct irq_data *data) static void shutdown_pirq(struct irq_data *data)
{ {
unsigned int irq = data->irq; struct irq_info *info = info_for_irq(data->irq);
struct irq_info *info = info_for_irq(irq); evtchn_port_t evtchn = info->evtchn;
evtchn_port_t evtchn = evtchn_from_irq(irq);
BUG_ON(info->type != IRQT_PIRQ); BUG_ON(info->type != IRQT_PIRQ);
...@@ -957,10 +953,14 @@ int xen_irq_from_gsi(unsigned gsi) ...@@ -957,10 +953,14 @@ int xen_irq_from_gsi(unsigned gsi)
} }
EXPORT_SYMBOL_GPL(xen_irq_from_gsi); EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
static void __unbind_from_irq(unsigned int irq) static void __unbind_from_irq(struct irq_info *info, unsigned int irq)
{ {
evtchn_port_t evtchn = evtchn_from_irq(irq); evtchn_port_t evtchn;
struct irq_info *info = info_for_irq(irq);
if (!info) {
xen_irq_free_desc(irq);
return;
}
if (info->refcnt > 0) { if (info->refcnt > 0) {
info->refcnt--; info->refcnt--;
...@@ -968,19 +968,22 @@ static void __unbind_from_irq(unsigned int irq) ...@@ -968,19 +968,22 @@ static void __unbind_from_irq(unsigned int irq)
return; return;
} }
evtchn = info->evtchn;
if (VALID_EVTCHN(evtchn)) { if (VALID_EVTCHN(evtchn)) {
unsigned int cpu = cpu_from_irq(irq); unsigned int cpu = info->cpu;
struct xenbus_device *dev; struct xenbus_device *dev;
if (!info->is_static) if (!info->is_static)
xen_evtchn_close(evtchn); xen_evtchn_close(evtchn);
switch (type_from_irq(irq)) { switch (info->type) {
case IRQT_VIRQ: case IRQT_VIRQ:
per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1; per_cpu(virq_to_irq, cpu)[virq_from_irq(info)] = -1;
break; break;
case IRQT_IPI: case IRQT_IPI:
per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1; per_cpu(ipi_to_irq, cpu)[ipi_from_irq(info)] = -1;
per_cpu(ipi_to_evtchn, cpu)[ipi_from_irq(info)] = 0;
break; break;
case IRQT_EVTCHN: case IRQT_EVTCHN:
dev = info->u.interdomain; dev = info->u.interdomain;
...@@ -994,7 +997,7 @@ static void __unbind_from_irq(unsigned int irq) ...@@ -994,7 +997,7 @@ static void __unbind_from_irq(unsigned int irq)
xen_irq_info_cleanup(info); xen_irq_info_cleanup(info);
} }
xen_free_irq(irq); xen_free_irq(info);
} }
/* /*
...@@ -1010,24 +1013,24 @@ static void __unbind_from_irq(unsigned int irq) ...@@ -1010,24 +1013,24 @@ static void __unbind_from_irq(unsigned int irq)
int xen_bind_pirq_gsi_to_irq(unsigned gsi, int xen_bind_pirq_gsi_to_irq(unsigned gsi,
unsigned pirq, int shareable, char *name) unsigned pirq, int shareable, char *name)
{ {
int irq; struct irq_info *info;
struct physdev_irq irq_op; struct physdev_irq irq_op;
int ret; int ret;
mutex_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
irq = xen_irq_from_gsi(gsi); ret = xen_irq_from_gsi(gsi);
if (irq != -1) { if (ret != -1) {
pr_info("%s: returning irq %d for gsi %u\n", pr_info("%s: returning irq %d for gsi %u\n",
__func__, irq, gsi); __func__, ret, gsi);
goto out; goto out;
} }
irq = xen_allocate_irq_gsi(gsi); info = xen_allocate_irq_gsi(gsi);
if (irq < 0) if (!info)
goto out; goto out;
irq_op.irq = irq; irq_op.irq = info->irq;
irq_op.vector = 0; irq_op.vector = 0;
/* Only the privileged domain can do this. For non-priv, the pcifront /* Only the privileged domain can do this. For non-priv, the pcifront
...@@ -1035,20 +1038,19 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, ...@@ -1035,20 +1038,19 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
* this in the priv domain. */ * this in the priv domain. */
if (xen_initial_domain() && if (xen_initial_domain() &&
HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
xen_free_irq(irq); xen_free_irq(info);
irq = -ENOSPC; ret = -ENOSPC;
goto out; goto out;
} }
ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF, ret = xen_irq_info_pirq_setup(info, 0, pirq, gsi, DOMID_SELF,
shareable ? PIRQ_SHAREABLE : 0); shareable ? PIRQ_SHAREABLE : 0);
if (ret < 0) { if (ret < 0) {
__unbind_from_irq(irq); __unbind_from_irq(info, info->irq);
irq = ret;
goto out; goto out;
} }
pirq_query_unmask(irq); pirq_query_unmask(info);
/* We try to use the handler with the appropriate semantic for the /* We try to use the handler with the appropriate semantic for the
* type of interrupt: if the interrupt is an edge triggered * type of interrupt: if the interrupt is an edge triggered
* interrupt we use handle_edge_irq. * interrupt we use handle_edge_irq.
...@@ -1065,16 +1067,18 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, ...@@ -1065,16 +1067,18 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
* is the right choice either way. * is the right choice either way.
*/ */
if (shareable) if (shareable)
irq_set_chip_and_handler_name(irq, &xen_pirq_chip, irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip,
handle_fasteoi_irq, name); handle_fasteoi_irq, name);
else else
irq_set_chip_and_handler_name(irq, &xen_pirq_chip, irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip,
handle_edge_irq, name); handle_edge_irq, name);
ret = info->irq;
out: out:
mutex_unlock(&irq_mapping_update_lock); mutex_unlock(&irq_mapping_update_lock);
return irq; return ret;
} }
#ifdef CONFIG_PCI_MSI #ifdef CONFIG_PCI_MSI
...@@ -1096,17 +1100,22 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, ...@@ -1096,17 +1100,22 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
int pirq, int nvec, const char *name, domid_t domid) int pirq, int nvec, const char *name, domid_t domid)
{ {
int i, irq, ret; int i, irq, ret;
struct irq_info *info;
mutex_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
irq = xen_allocate_irqs_dynamic(nvec); irq = irq_alloc_descs(-1, 0, nvec, -1);
if (irq < 0) if (irq < 0)
goto out; goto out;
for (i = 0; i < nvec; i++) { for (i = 0; i < nvec; i++) {
info = xen_irq_init(irq + i);
if (!info)
goto error_irq;
irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name); irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid, ret = xen_irq_info_pirq_setup(info, 0, pirq + i, 0, domid,
i == 0 ? 0 : PIRQ_MSI_GROUP); i == 0 ? 0 : PIRQ_MSI_GROUP);
if (ret < 0) if (ret < 0)
goto error_irq; goto error_irq;
...@@ -1118,9 +1127,12 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, ...@@ -1118,9 +1127,12 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
out: out:
mutex_unlock(&irq_mapping_update_lock); mutex_unlock(&irq_mapping_update_lock);
return irq; return irq;
error_irq: error_irq:
while (nvec--) while (nvec--) {
__unbind_from_irq(irq + nvec); info = info_for_irq(irq + nvec);
__unbind_from_irq(info, irq + nvec);
}
mutex_unlock(&irq_mapping_update_lock); mutex_unlock(&irq_mapping_update_lock);
return ret; return ret;
} }
...@@ -1156,67 +1168,45 @@ int xen_destroy_irq(int irq) ...@@ -1156,67 +1168,45 @@ int xen_destroy_irq(int irq)
} }
} }
xen_free_irq(irq); xen_free_irq(info);
out: out:
mutex_unlock(&irq_mapping_update_lock); mutex_unlock(&irq_mapping_update_lock);
return rc; return rc;
} }
int xen_irq_from_pirq(unsigned pirq)
{
int irq;
struct irq_info *info;
mutex_lock(&irq_mapping_update_lock);
list_for_each_entry(info, &xen_irq_list_head, list) {
if (info->type != IRQT_PIRQ)
continue;
irq = info->irq;
if (info->u.pirq.pirq == pirq)
goto out;
}
irq = -1;
out:
mutex_unlock(&irq_mapping_update_lock);
return irq;
}
int xen_pirq_from_irq(unsigned irq) int xen_pirq_from_irq(unsigned irq)
{ {
return pirq_from_irq(irq); struct irq_info *info = info_for_irq(irq);
return pirq_from_irq(info);
} }
EXPORT_SYMBOL_GPL(xen_pirq_from_irq); EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip, static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
struct xenbus_device *dev) struct xenbus_device *dev)
{ {
int irq; int ret = -ENOMEM;
int ret; struct irq_info *info;
if (evtchn >= xen_evtchn_max_channels()) if (evtchn >= xen_evtchn_max_channels())
return -ENOMEM; return -ENOMEM;
mutex_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
irq = get_evtchn_to_irq(evtchn); info = evtchn_to_info(evtchn);
if (irq == -1) { if (!info) {
irq = xen_allocate_irq_dynamic(); info = xen_allocate_irq_dynamic();
if (irq < 0) if (!info)
goto out; goto out;
irq_set_chip_and_handler_name(irq, chip, irq_set_chip_and_handler_name(info->irq, chip,
handle_edge_irq, "event"); handle_edge_irq, "event");
ret = xen_irq_info_evtchn_setup(irq, evtchn, dev); ret = xen_irq_info_evtchn_setup(info, evtchn, dev);
if (ret < 0) { if (ret < 0) {
__unbind_from_irq(irq); __unbind_from_irq(info, info->irq);
irq = ret;
goto out; goto out;
} }
/* /*
...@@ -1226,17 +1216,17 @@ static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip, ...@@ -1226,17 +1216,17 @@ static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
* affinity setting is not invoked on them so nothing would * affinity setting is not invoked on them so nothing would
* bind the channel. * bind the channel.
*/ */
bind_evtchn_to_cpu(evtchn, 0, false); bind_evtchn_to_cpu(info, 0, false);
} else { } else if (!WARN_ON(info->type != IRQT_EVTCHN)) {
struct irq_info *info = info_for_irq(irq);
if (!WARN_ON(!info || info->type != IRQT_EVTCHN))
info->refcnt++; info->refcnt++;
} }
ret = info->irq;
out: out:
mutex_unlock(&irq_mapping_update_lock); mutex_unlock(&irq_mapping_update_lock);
return irq; return ret;
} }
int bind_evtchn_to_irq(evtchn_port_t evtchn) int bind_evtchn_to_irq(evtchn_port_t evtchn)
...@@ -1255,18 +1245,19 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) ...@@ -1255,18 +1245,19 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
{ {
struct evtchn_bind_ipi bind_ipi; struct evtchn_bind_ipi bind_ipi;
evtchn_port_t evtchn; evtchn_port_t evtchn;
int ret, irq; struct irq_info *info;
int ret;
mutex_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
irq = per_cpu(ipi_to_irq, cpu)[ipi]; ret = per_cpu(ipi_to_irq, cpu)[ipi];
if (irq == -1) { if (ret == -1) {
irq = xen_allocate_irq_dynamic(); info = xen_allocate_irq_dynamic();
if (irq < 0) if (!info)
goto out; goto out;
irq_set_chip_and_handler_name(irq, &xen_percpu_chip, irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip,
handle_percpu_irq, "ipi"); handle_percpu_irq, "ipi");
bind_ipi.vcpu = xen_vcpu_nr(cpu); bind_ipi.vcpu = xen_vcpu_nr(cpu);
...@@ -1275,25 +1266,25 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) ...@@ -1275,25 +1266,25 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
BUG(); BUG();
evtchn = bind_ipi.port; evtchn = bind_ipi.port;
ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi); ret = xen_irq_info_ipi_setup(info, cpu, evtchn, ipi);
if (ret < 0) { if (ret < 0) {
__unbind_from_irq(irq); __unbind_from_irq(info, info->irq);
irq = ret;
goto out; goto out;
} }
/* /*
* Force the affinity mask to the target CPU so proc shows * Force the affinity mask to the target CPU so proc shows
* the correct target. * the correct target.
*/ */
bind_evtchn_to_cpu(evtchn, cpu, true); bind_evtchn_to_cpu(info, cpu, true);
ret = info->irq;
} else { } else {
struct irq_info *info = info_for_irq(irq); info = info_for_irq(ret);
WARN_ON(info == NULL || info->type != IRQT_IPI); WARN_ON(info == NULL || info->type != IRQT_IPI);
} }
out: out:
mutex_unlock(&irq_mapping_update_lock); mutex_unlock(&irq_mapping_update_lock);
return irq; return ret;
} }
static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev, static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev,
...@@ -1361,22 +1352,23 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu) ...@@ -1361,22 +1352,23 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
{ {
struct evtchn_bind_virq bind_virq; struct evtchn_bind_virq bind_virq;
evtchn_port_t evtchn = 0; evtchn_port_t evtchn = 0;
int irq, ret; struct irq_info *info;
int ret;
mutex_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
irq = per_cpu(virq_to_irq, cpu)[virq]; ret = per_cpu(virq_to_irq, cpu)[virq];
if (irq == -1) { if (ret == -1) {
irq = xen_allocate_irq_dynamic(); info = xen_allocate_irq_dynamic();
if (irq < 0) if (!info)
goto out; goto out;
if (percpu) if (percpu)
irq_set_chip_and_handler_name(irq, &xen_percpu_chip, irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip,
handle_percpu_irq, "virq"); handle_percpu_irq, "virq");
else else
irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, irq_set_chip_and_handler_name(info->irq, &xen_dynamic_chip,
handle_edge_irq, "virq"); handle_edge_irq, "virq");
bind_virq.virq = virq; bind_virq.virq = virq;
...@@ -1391,10 +1383,9 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu) ...@@ -1391,10 +1383,9 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
BUG_ON(ret < 0); BUG_ON(ret < 0);
} }
ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq); ret = xen_irq_info_virq_setup(info, cpu, evtchn, virq);
if (ret < 0) { if (ret < 0) {
__unbind_from_irq(irq); __unbind_from_irq(info, info->irq);
irq = ret;
goto out; goto out;
} }
...@@ -1402,22 +1393,26 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu) ...@@ -1402,22 +1393,26 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
* Force the affinity mask for percpu interrupts so proc * Force the affinity mask for percpu interrupts so proc
* shows the correct target. * shows the correct target.
*/ */
bind_evtchn_to_cpu(evtchn, cpu, percpu); bind_evtchn_to_cpu(info, cpu, percpu);
ret = info->irq;
} else { } else {
struct irq_info *info = info_for_irq(irq); info = info_for_irq(ret);
WARN_ON(info == NULL || info->type != IRQT_VIRQ); WARN_ON(info == NULL || info->type != IRQT_VIRQ);
} }
out: out:
mutex_unlock(&irq_mapping_update_lock); mutex_unlock(&irq_mapping_update_lock);
return irq; return ret;
} }
static void unbind_from_irq(unsigned int irq) static void unbind_from_irq(unsigned int irq)
{ {
struct irq_info *info;
mutex_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
__unbind_from_irq(irq); info = info_for_irq(irq);
__unbind_from_irq(info, irq);
mutex_unlock(&irq_mapping_update_lock); mutex_unlock(&irq_mapping_update_lock);
} }
...@@ -1568,13 +1563,7 @@ EXPORT_SYMBOL_GPL(xen_set_irq_priority); ...@@ -1568,13 +1563,7 @@ EXPORT_SYMBOL_GPL(xen_set_irq_priority);
int evtchn_make_refcounted(evtchn_port_t evtchn, bool is_static) int evtchn_make_refcounted(evtchn_port_t evtchn, bool is_static)
{ {
int irq = get_evtchn_to_irq(evtchn); struct irq_info *info = evtchn_to_info(evtchn);
struct irq_info *info;
if (irq == -1)
return -ENOENT;
info = info_for_irq(irq);
if (!info) if (!info)
return -ENOENT; return -ENOENT;
...@@ -1590,7 +1579,6 @@ EXPORT_SYMBOL_GPL(evtchn_make_refcounted); ...@@ -1590,7 +1579,6 @@ EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
int evtchn_get(evtchn_port_t evtchn) int evtchn_get(evtchn_port_t evtchn)
{ {
int irq;
struct irq_info *info; struct irq_info *info;
int err = -ENOENT; int err = -ENOENT;
...@@ -1599,11 +1587,7 @@ int evtchn_get(evtchn_port_t evtchn) ...@@ -1599,11 +1587,7 @@ int evtchn_get(evtchn_port_t evtchn)
mutex_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
irq = get_evtchn_to_irq(evtchn); info = evtchn_to_info(evtchn);
if (irq == -1)
goto done;
info = info_for_irq(irq);
if (!info) if (!info)
goto done; goto done;
...@@ -1623,16 +1607,17 @@ EXPORT_SYMBOL_GPL(evtchn_get); ...@@ -1623,16 +1607,17 @@ EXPORT_SYMBOL_GPL(evtchn_get);
void evtchn_put(evtchn_port_t evtchn) void evtchn_put(evtchn_port_t evtchn)
{ {
int irq = get_evtchn_to_irq(evtchn); struct irq_info *info = evtchn_to_info(evtchn);
if (WARN_ON(irq == -1))
if (WARN_ON(!info))
return; return;
unbind_from_irq(irq); unbind_from_irq(info->irq);
} }
EXPORT_SYMBOL_GPL(evtchn_put); EXPORT_SYMBOL_GPL(evtchn_put);
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
{ {
int irq; evtchn_port_t evtchn;
#ifdef CONFIG_X86 #ifdef CONFIG_X86
if (unlikely(vector == XEN_NMI_VECTOR)) { if (unlikely(vector == XEN_NMI_VECTOR)) {
...@@ -1643,9 +1628,9 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) ...@@ -1643,9 +1628,9 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
return; return;
} }
#endif #endif
irq = per_cpu(ipi_to_irq, cpu)[vector]; evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
BUG_ON(irq < 0); BUG_ON(evtchn == 0);
notify_remote_via_irq(irq); notify_remote_via_evtchn(evtchn);
} }
struct evtchn_loop_ctrl { struct evtchn_loop_ctrl {
...@@ -1656,12 +1641,10 @@ struct evtchn_loop_ctrl { ...@@ -1656,12 +1641,10 @@ struct evtchn_loop_ctrl {
void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl) void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
{ {
int irq; struct irq_info *info = evtchn_to_info(port);
struct irq_info *info;
struct xenbus_device *dev; struct xenbus_device *dev;
irq = get_evtchn_to_irq(port); if (!info)
if (irq == -1)
return; return;
/* /*
...@@ -1686,7 +1669,6 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl) ...@@ -1686,7 +1669,6 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
} }
} }
info = info_for_irq(irq);
if (xchg_acquire(&info->is_active, 1)) if (xchg_acquire(&info->is_active, 1))
return; return;
...@@ -1700,7 +1682,7 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl) ...@@ -1700,7 +1682,7 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
info->eoi_time = get_jiffies_64() + event_eoi_delay; info->eoi_time = get_jiffies_64() + event_eoi_delay;
} }
generic_handle_irq(irq); generic_handle_irq(info->irq);
} }
int xen_evtchn_do_upcall(void) int xen_evtchn_do_upcall(void)
...@@ -1758,16 +1740,17 @@ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq) ...@@ -1758,16 +1740,17 @@ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
mutex_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
/* After resume the irq<->evtchn mappings are all cleared out */ /* After resume the irq<->evtchn mappings are all cleared out */
BUG_ON(get_evtchn_to_irq(evtchn) != -1); BUG_ON(evtchn_to_info(evtchn));
/* Expect irq to have been bound before, /* Expect irq to have been bound before,
so there should be a proper type */ so there should be a proper type */
BUG_ON(info->type == IRQT_UNBOUND); BUG_ON(info->type == IRQT_UNBOUND);
(void)xen_irq_info_evtchn_setup(irq, evtchn, NULL); info->irq = irq;
(void)xen_irq_info_evtchn_setup(info, evtchn, NULL);
mutex_unlock(&irq_mapping_update_lock); mutex_unlock(&irq_mapping_update_lock);
bind_evtchn_to_cpu(evtchn, info->cpu, false); bind_evtchn_to_cpu(info, info->cpu, false);
/* Unmask the event channel. */ /* Unmask the event channel. */
enable_irq(irq); enable_irq(irq);
...@@ -1801,7 +1784,7 @@ static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu) ...@@ -1801,7 +1784,7 @@ static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
* it, but don't do the xenlinux-level rebind in that case. * it, but don't do the xenlinux-level rebind in that case.
*/ */
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
bind_evtchn_to_cpu(evtchn, tcpu, false); bind_evtchn_to_cpu(info, tcpu, false);
do_unmask(info, EVT_MASK_REASON_TEMPORARY); do_unmask(info, EVT_MASK_REASON_TEMPORARY);
...@@ -1858,28 +1841,30 @@ static void enable_dynirq(struct irq_data *data) ...@@ -1858,28 +1841,30 @@ static void enable_dynirq(struct irq_data *data)
do_unmask(info, EVT_MASK_REASON_EXPLICIT); do_unmask(info, EVT_MASK_REASON_EXPLICIT);
} }
static void disable_dynirq(struct irq_data *data) static void do_ack_dynirq(struct irq_info *info)
{ {
struct irq_info *info = info_for_irq(data->irq); evtchn_port_t evtchn = info->evtchn;
evtchn_port_t evtchn = info ? info->evtchn : 0;
if (VALID_EVTCHN(evtchn)) if (VALID_EVTCHN(evtchn))
do_mask(info, EVT_MASK_REASON_EXPLICIT); event_handler_exit(info);
} }
static void ack_dynirq(struct irq_data *data) static void ack_dynirq(struct irq_data *data)
{ {
struct irq_info *info = info_for_irq(data->irq); struct irq_info *info = info_for_irq(data->irq);
evtchn_port_t evtchn = info ? info->evtchn : 0;
if (VALID_EVTCHN(evtchn)) if (info)
event_handler_exit(info); do_ack_dynirq(info);
} }
static void mask_ack_dynirq(struct irq_data *data) static void mask_ack_dynirq(struct irq_data *data)
{ {
disable_dynirq(data); struct irq_info *info = info_for_irq(data->irq);
ack_dynirq(data);
if (info) {
do_disable_dynirq(info);
do_ack_dynirq(info);
}
} }
static void lateeoi_ack_dynirq(struct irq_data *data) static void lateeoi_ack_dynirq(struct irq_data *data)
...@@ -1952,13 +1937,13 @@ static void restore_pirqs(void) ...@@ -1952,13 +1937,13 @@ static void restore_pirqs(void)
if (rc) { if (rc) {
pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n", pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
gsi, irq, pirq, rc); gsi, irq, pirq, rc);
xen_free_irq(irq); xen_free_irq(info);
continue; continue;
} }
printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
__startup_pirq(irq); __startup_pirq(info);
} }
} }
...@@ -1966,13 +1951,15 @@ static void restore_cpu_virqs(unsigned int cpu) ...@@ -1966,13 +1951,15 @@ static void restore_cpu_virqs(unsigned int cpu)
{ {
struct evtchn_bind_virq bind_virq; struct evtchn_bind_virq bind_virq;
evtchn_port_t evtchn; evtchn_port_t evtchn;
struct irq_info *info;
int virq, irq; int virq, irq;
for (virq = 0; virq < NR_VIRQS; virq++) { for (virq = 0; virq < NR_VIRQS; virq++) {
if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
continue; continue;
info = info_for_irq(irq);
BUG_ON(virq_from_irq(irq) != virq); BUG_ON(virq_from_irq(info) != virq);
/* Get a new binding from Xen. */ /* Get a new binding from Xen. */
bind_virq.virq = virq; bind_virq.virq = virq;
...@@ -1983,9 +1970,9 @@ static void restore_cpu_virqs(unsigned int cpu) ...@@ -1983,9 +1970,9 @@ static void restore_cpu_virqs(unsigned int cpu)
evtchn = bind_virq.port; evtchn = bind_virq.port;
/* Record the new mapping. */ /* Record the new mapping. */
(void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq); xen_irq_info_virq_setup(info, cpu, evtchn, virq);
/* The affinity mask is still valid */ /* The affinity mask is still valid */
bind_evtchn_to_cpu(evtchn, cpu, false); bind_evtchn_to_cpu(info, cpu, false);
} }
} }
...@@ -1993,13 +1980,15 @@ static void restore_cpu_ipis(unsigned int cpu) ...@@ -1993,13 +1980,15 @@ static void restore_cpu_ipis(unsigned int cpu)
{ {
struct evtchn_bind_ipi bind_ipi; struct evtchn_bind_ipi bind_ipi;
evtchn_port_t evtchn; evtchn_port_t evtchn;
struct irq_info *info;
int ipi, irq; int ipi, irq;
for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
continue; continue;
info = info_for_irq(irq);
BUG_ON(ipi_from_irq(irq) != ipi); BUG_ON(ipi_from_irq(info) != ipi);
/* Get a new binding from Xen. */ /* Get a new binding from Xen. */
bind_ipi.vcpu = xen_vcpu_nr(cpu); bind_ipi.vcpu = xen_vcpu_nr(cpu);
...@@ -2009,9 +1998,9 @@ static void restore_cpu_ipis(unsigned int cpu) ...@@ -2009,9 +1998,9 @@ static void restore_cpu_ipis(unsigned int cpu)
evtchn = bind_ipi.port; evtchn = bind_ipi.port;
/* Record the new mapping. */ /* Record the new mapping. */
(void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi); xen_irq_info_ipi_setup(info, cpu, evtchn, ipi);
/* The affinity mask is still valid */ /* The affinity mask is still valid */
bind_evtchn_to_cpu(evtchn, cpu, false); bind_evtchn_to_cpu(info, cpu, false);
} }
} }
...@@ -2025,13 +2014,6 @@ void xen_clear_irq_pending(int irq) ...@@ -2025,13 +2014,6 @@ void xen_clear_irq_pending(int irq)
event_handler_exit(info); event_handler_exit(info);
} }
EXPORT_SYMBOL(xen_clear_irq_pending); EXPORT_SYMBOL(xen_clear_irq_pending);
void xen_set_irq_pending(int irq)
{
evtchn_port_t evtchn = evtchn_from_irq(irq);
if (VALID_EVTCHN(evtchn))
set_evtchn(evtchn);
}
bool xen_test_irq_pending(int irq) bool xen_test_irq_pending(int irq)
{ {
......
...@@ -33,7 +33,6 @@ struct evtchn_ops { ...@@ -33,7 +33,6 @@ struct evtchn_ops {
extern const struct evtchn_ops *evtchn_ops; extern const struct evtchn_ops *evtchn_ops;
int get_evtchn_to_irq(evtchn_port_t evtchn);
void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl); void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl);
unsigned int cpu_from_evtchn(evtchn_port_t evtchn); unsigned int cpu_from_evtchn(evtchn_port_t evtchn);
......
...@@ -47,6 +47,9 @@ ...@@ -47,6 +47,9 @@
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h> #include <asm/xen/hypercall.h>
#ifdef CONFIG_ACPI
#include <acpi/processor.h>
#endif
/* /*
* @cpu_id: Xen physical cpu logic number * @cpu_id: Xen physical cpu logic number
...@@ -400,4 +403,23 @@ bool __init xen_processor_present(uint32_t acpi_id) ...@@ -400,4 +403,23 @@ bool __init xen_processor_present(uint32_t acpi_id)
return online; return online;
} }
void xen_sanitize_proc_cap_bits(uint32_t *cap)
{
struct xen_platform_op op = {
.cmd = XENPF_set_processor_pminfo,
.u.set_pminfo.id = -1,
.u.set_pminfo.type = XEN_PM_PDC,
};
u32 buf[3] = { ACPI_PDC_REVISION_ID, 1, *cap };
int ret;
set_xen_guest_handle(op.u.set_pminfo.pdc, buf);
ret = HYPERVISOR_platform_op(&op);
if (ret)
pr_err("sanitize of _PDC buffer bits from Xen failed: %d\n",
ret);
else
*cap = buf[2];
}
#endif #endif
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include <xen/xen-front-pgdir-shbuf.h> #include <xen/xen-front-pgdir-shbuf.h>
/** /*
* This structure represents the structure of a shared page * This structure represents the structure of a shared page
* that contains grant references to the pages of the shared * that contains grant references to the pages of the shared
* buffer. This structure is common to many Xen para-virtualized * buffer. This structure is common to many Xen para-virtualized
...@@ -33,7 +33,7 @@ struct xen_page_directory { ...@@ -33,7 +33,7 @@ struct xen_page_directory {
grant_ref_t gref[]; /* Variable length */ grant_ref_t gref[]; /* Variable length */
}; };
/** /*
* Shared buffer ops which are differently implemented * Shared buffer ops which are differently implemented
* depending on the allocation mode, e.g. if the buffer * depending on the allocation mode, e.g. if the buffer
* is allocated by the corresponding backend or frontend. * is allocated by the corresponding backend or frontend.
...@@ -61,7 +61,7 @@ struct xen_front_pgdir_shbuf_ops { ...@@ -61,7 +61,7 @@ struct xen_front_pgdir_shbuf_ops {
int (*unmap)(struct xen_front_pgdir_shbuf *buf); int (*unmap)(struct xen_front_pgdir_shbuf *buf);
}; };
/** /*
* Get granted reference to the very first page of the * Get granted reference to the very first page of the
* page directory. Usually this is passed to the backend, * page directory. Usually this is passed to the backend,
* so it can find/fill the grant references to the buffer's * so it can find/fill the grant references to the buffer's
...@@ -81,7 +81,7 @@ xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf) ...@@ -81,7 +81,7 @@ xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf)
} }
EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_get_dir_start); EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_get_dir_start);
/** /*
* Map granted references of the shared buffer. * Map granted references of the shared buffer.
* *
* Depending on the shared buffer mode of allocation * Depending on the shared buffer mode of allocation
...@@ -102,7 +102,7 @@ int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf) ...@@ -102,7 +102,7 @@ int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf)
} }
EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_map); EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_map);
/** /*
* Unmap granted references of the shared buffer. * Unmap granted references of the shared buffer.
* *
* Depending on the shared buffer mode of allocation * Depending on the shared buffer mode of allocation
...@@ -123,7 +123,7 @@ int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf) ...@@ -123,7 +123,7 @@ int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf)
} }
EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_unmap); EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_unmap);
/** /*
* Free all the resources of the shared buffer. * Free all the resources of the shared buffer.
* *
* \param buf shared buffer which resources to be freed. * \param buf shared buffer which resources to be freed.
...@@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_free); ...@@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_free);
offsetof(struct xen_page_directory, \ offsetof(struct xen_page_directory, \
gref)) / sizeof(grant_ref_t)) gref)) / sizeof(grant_ref_t))
/** /*
* Get the number of pages the page directory consumes itself. * Get the number of pages the page directory consumes itself.
* *
* \param buf shared buffer. * \param buf shared buffer.
...@@ -160,7 +160,7 @@ static int get_num_pages_dir(struct xen_front_pgdir_shbuf *buf) ...@@ -160,7 +160,7 @@ static int get_num_pages_dir(struct xen_front_pgdir_shbuf *buf)
return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE); return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE);
} }
/** /*
* Calculate the number of grant references needed to share the buffer * Calculate the number of grant references needed to share the buffer
* and its pages when backend allocates the buffer. * and its pages when backend allocates the buffer.
* *
...@@ -172,7 +172,7 @@ static void backend_calc_num_grefs(struct xen_front_pgdir_shbuf *buf) ...@@ -172,7 +172,7 @@ static void backend_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
buf->num_grefs = get_num_pages_dir(buf); buf->num_grefs = get_num_pages_dir(buf);
} }
/** /*
* Calculate the number of grant references needed to share the buffer * Calculate the number of grant references needed to share the buffer
* and its pages when frontend allocates the buffer. * and its pages when frontend allocates the buffer.
* *
...@@ -190,7 +190,7 @@ static void guest_calc_num_grefs(struct xen_front_pgdir_shbuf *buf) ...@@ -190,7 +190,7 @@ static void guest_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
#define xen_page_to_vaddr(page) \ #define xen_page_to_vaddr(page) \
((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page))) ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
/** /*
* Unmap the buffer previously mapped with grant references * Unmap the buffer previously mapped with grant references
* provided by the backend. * provided by the backend.
* *
...@@ -238,7 +238,7 @@ static int backend_unmap(struct xen_front_pgdir_shbuf *buf) ...@@ -238,7 +238,7 @@ static int backend_unmap(struct xen_front_pgdir_shbuf *buf)
return ret; return ret;
} }
/** /*
* Map the buffer with grant references provided by the backend. * Map the buffer with grant references provided by the backend.
* *
* \param buf shared buffer. * \param buf shared buffer.
...@@ -320,7 +320,7 @@ static int backend_map(struct xen_front_pgdir_shbuf *buf) ...@@ -320,7 +320,7 @@ static int backend_map(struct xen_front_pgdir_shbuf *buf)
return ret; return ret;
} }
/** /*
* Fill page directory with grant references to the pages of the * Fill page directory with grant references to the pages of the
* page directory itself. * page directory itself.
* *
...@@ -350,7 +350,7 @@ static void backend_fill_page_dir(struct xen_front_pgdir_shbuf *buf) ...@@ -350,7 +350,7 @@ static void backend_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
page_dir->gref_dir_next_page = XEN_GREF_LIST_END; page_dir->gref_dir_next_page = XEN_GREF_LIST_END;
} }
/** /*
* Fill page directory with grant references to the pages of the * Fill page directory with grant references to the pages of the
* page directory and the buffer we share with the backend. * page directory and the buffer we share with the backend.
* *
...@@ -389,7 +389,7 @@ static void guest_fill_page_dir(struct xen_front_pgdir_shbuf *buf) ...@@ -389,7 +389,7 @@ static void guest_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
} }
} }
/** /*
* Grant references to the frontend's buffer pages. * Grant references to the frontend's buffer pages.
* *
* These will be shared with the backend, so it can * These will be shared with the backend, so it can
...@@ -418,7 +418,7 @@ static int guest_grant_refs_for_buffer(struct xen_front_pgdir_shbuf *buf, ...@@ -418,7 +418,7 @@ static int guest_grant_refs_for_buffer(struct xen_front_pgdir_shbuf *buf,
return 0; return 0;
} }
/** /*
* Grant all the references needed to share the buffer. * Grant all the references needed to share the buffer.
* *
* Grant references to the page directory pages and, if * Grant references to the page directory pages and, if
...@@ -466,7 +466,7 @@ static int grant_references(struct xen_front_pgdir_shbuf *buf) ...@@ -466,7 +466,7 @@ static int grant_references(struct xen_front_pgdir_shbuf *buf)
return 0; return 0;
} }
/** /*
* Allocate all required structures to mange shared buffer. * Allocate all required structures to mange shared buffer.
* *
* \param buf shared buffer. * \param buf shared buffer.
...@@ -506,7 +506,7 @@ static const struct xen_front_pgdir_shbuf_ops local_ops = { ...@@ -506,7 +506,7 @@ static const struct xen_front_pgdir_shbuf_ops local_ops = {
.grant_refs_for_buffer = guest_grant_refs_for_buffer, .grant_refs_for_buffer = guest_grant_refs_for_buffer,
}; };
/** /*
* Allocate a new instance of a shared buffer. * Allocate a new instance of a shared buffer.
* *
* \param cfg configuration to be used while allocating a new shared buffer. * \param cfg configuration to be used while allocating a new shared buffer.
......
...@@ -88,7 +88,6 @@ void xen_irq_resume(void); ...@@ -88,7 +88,6 @@ void xen_irq_resume(void);
/* Clear an irq's pending state, in preparation for polling on it */ /* Clear an irq's pending state, in preparation for polling on it */
void xen_clear_irq_pending(int irq); void xen_clear_irq_pending(int irq);
void xen_set_irq_pending(int irq);
bool xen_test_irq_pending(int irq); bool xen_test_irq_pending(int irq);
/* Poll waiting for an irq to become pending. In the usual case, the /* Poll waiting for an irq to become pending. In the usual case, the
...@@ -101,8 +100,8 @@ void xen_poll_irq_timeout(int irq, u64 timeout); ...@@ -101,8 +100,8 @@ void xen_poll_irq_timeout(int irq, u64 timeout);
/* Determine the IRQ which is bound to an event channel */ /* Determine the IRQ which is bound to an event channel */
unsigned int irq_from_evtchn(evtchn_port_t evtchn); unsigned int irq_from_evtchn(evtchn_port_t evtchn);
int irq_from_virq(unsigned int cpu, unsigned int virq); int irq_evtchn_from_virq(unsigned int cpu, unsigned int virq,
evtchn_port_t evtchn_from_irq(unsigned irq); evtchn_port_t *evtchn);
int xen_set_callback_via(uint64_t via); int xen_set_callback_via(uint64_t via);
int xen_evtchn_do_upcall(void); int xen_evtchn_do_upcall(void);
...@@ -122,9 +121,6 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, ...@@ -122,9 +121,6 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
/* De-allocates the above mentioned physical interrupt. */ /* De-allocates the above mentioned physical interrupt. */
int xen_destroy_irq(int irq); int xen_destroy_irq(int irq);
/* Return irq from pirq */
int xen_irq_from_pirq(unsigned pirq);
/* Return the pirq allocated to the irq. */ /* Return the pirq allocated to the irq. */
int xen_pirq_from_irq(unsigned irq); int xen_pirq_from_irq(unsigned irq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment