Commit 4907581d authored by Gavin Shan's avatar Gavin Shan Committed by Benjamin Herrenschmidt

powerpc/eeh: Export confirm_error_lock

An EEH event is created and queued to the event queue for each
ingress EEH error. When there're mutiple EEH errors, we need serialize
the process to keep consistent PE state (flags). The spinlock
"confirm_error_lock" was introduced for the purpose. We'll inject
EEH event upon error reporting interrupts on PowerNV platform. So
we export the spinlock for that to use for consistent PE state.
Signed-off-by: default avatarGavin Shan <shangw@linux.vnet.ibm.com>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 99866595
...@@ -150,6 +150,7 @@ struct eeh_ops { ...@@ -150,6 +150,7 @@ struct eeh_ops {
extern struct eeh_ops *eeh_ops; extern struct eeh_ops *eeh_ops;
extern int eeh_subsystem_enabled; extern int eeh_subsystem_enabled;
extern struct mutex eeh_mutex; extern struct mutex eeh_mutex;
extern raw_spinlock_t confirm_error_lock;
extern int eeh_probe_mode; extern int eeh_probe_mode;
#define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */ #define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */
...@@ -180,6 +181,16 @@ static inline void eeh_unlock(void) ...@@ -180,6 +181,16 @@ static inline void eeh_unlock(void)
mutex_unlock(&eeh_mutex); mutex_unlock(&eeh_mutex);
} }
static inline void eeh_serialize_lock(unsigned long *flags)
{
raw_spin_lock_irqsave(&confirm_error_lock, *flags);
}
static inline void eeh_serialize_unlock(unsigned long flags)
{
raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
}
/* /*
* Max number of EEH freezes allowed before we consider the device * Max number of EEH freezes allowed before we consider the device
* to be permanently disabled. * to be permanently disabled.
......
...@@ -107,7 +107,7 @@ int eeh_probe_mode; ...@@ -107,7 +107,7 @@ int eeh_probe_mode;
DEFINE_MUTEX(eeh_mutex); DEFINE_MUTEX(eeh_mutex);
/* Lock to avoid races due to multiple reports of an error */ /* Lock to avoid races due to multiple reports of an error */
static DEFINE_RAW_SPINLOCK(confirm_error_lock); DEFINE_RAW_SPINLOCK(confirm_error_lock);
/* Buffer for reporting pci register dumps. Its here in BSS, and /* Buffer for reporting pci register dumps. Its here in BSS, and
* not dynamically alloced, so that it ends up in RMO where RTAS * not dynamically alloced, so that it ends up in RMO where RTAS
...@@ -325,7 +325,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) ...@@ -325,7 +325,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
* in one slot might report errors simultaneously, and we * in one slot might report errors simultaneously, and we
* only want one error recovery routine running. * only want one error recovery routine running.
*/ */
raw_spin_lock_irqsave(&confirm_error_lock, flags); eeh_serialize_lock(&flags);
rc = 1; rc = 1;
if (pe->state & EEH_PE_ISOLATED) { if (pe->state & EEH_PE_ISOLATED) {
pe->check_count++; pe->check_count++;
...@@ -374,7 +374,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) ...@@ -374,7 +374,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
* bridges. * bridges.
*/ */
eeh_pe_state_mark(pe, EEH_PE_ISOLATED); eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
raw_spin_unlock_irqrestore(&confirm_error_lock, flags); eeh_serialize_unlock(flags);
eeh_send_failure_event(pe); eeh_send_failure_event(pe);
...@@ -386,7 +386,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) ...@@ -386,7 +386,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
return 1; return 1;
dn_unlock: dn_unlock:
raw_spin_unlock_irqrestore(&confirm_error_lock, flags); eeh_serialize_unlock(flags);
return rc; return rc;
} }
...@@ -702,8 +702,6 @@ int __init eeh_init(void) ...@@ -702,8 +702,6 @@ int __init eeh_init(void)
return ret; return ret;
} }
raw_spin_lock_init(&confirm_error_lock);
/* Initialize EEH event */ /* Initialize EEH event */
ret = eeh_event_init(); ret = eeh_event_init();
if (ret) if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment