Commit 172da89e authored by Vineeth Vijayan's avatar Vineeth Vijayan Committed by Heiko Carstens

s390/cio: avoid excessive path-verification requests

Currently when removing a device from cio_ignore list, we trigger a
path-verification for all the subchannels available in the system. This
could lead to path-verification requests on subchannels with an online
device, which could cause unwanted delay. Instead of all the
subchannels, trigger the path-verifications to those without an online
device.
Reported-by: default avatarJulian Wiedmann <jwi@linux.ibm.com>
Fixes: 2297791c ("s390/cio: dont unregister subchannel from child-drivers")
Signed-off-by: default avatarVineeth Vijayan <vneethv@linux.ibm.com>
Reviewed-by: default avatarPeter Oberparleiter <oberpar@linux.ibm.com>
Signed-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
parent 5816b3e6
...@@ -262,10 +262,12 @@ static int blacklist_parse_proc_parameters(char *buf) ...@@ -262,10 +262,12 @@ static int blacklist_parse_proc_parameters(char *buf)
if (strcmp("free", parm) == 0) { if (strcmp("free", parm) == 0) {
rc = blacklist_parse_parameters(buf, free, 0); rc = blacklist_parse_parameters(buf, free, 0);
/* There could be subchannels without proper devices connected. /*
* evaluate all the entries * Evaluate the subchannels without an online device. This way,
* no path-verification will be triggered on those subchannels
* and it avoids unnecessary delays.
*/ */
css_schedule_eval_all(); css_schedule_eval_cond(CSS_EVAL_NOT_ONLINE, 0);
} else if (strcmp("add", parm) == 0) } else if (strcmp("add", parm) == 0)
rc = blacklist_parse_parameters(buf, add, 0); rc = blacklist_parse_parameters(buf, add, 0);
else if (strcmp("purge", parm) == 0) else if (strcmp("purge", parm) == 0)
......
...@@ -788,27 +788,49 @@ static int __unset_registered(struct device *dev, void *data) ...@@ -788,27 +788,49 @@ static int __unset_registered(struct device *dev, void *data)
return 0; return 0;
} }
void css_schedule_eval_all_unreg(unsigned long delay) static int __unset_online(struct device *dev, void *data)
{
struct idset *set = data;
struct subchannel *sch = to_subchannel(dev);
struct ccw_device *cdev = sch_get_cdev(sch);
if (cdev && cdev->online)
idset_sch_del(set, sch->schid);
return 0;
}
void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)
{ {
unsigned long flags; unsigned long flags;
struct idset *unreg_set; struct idset *set;
/* Find unregistered subchannels. */ /* Find unregistered subchannels. */
unreg_set = idset_sch_new(); set = idset_sch_new();
if (!unreg_set) { if (!set) {
/* Fallback. */ /* Fallback. */
css_schedule_eval_all(); css_schedule_eval_all();
return; return;
} }
idset_fill(unreg_set); idset_fill(set);
bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered); switch (cond) {
case CSS_EVAL_UNREG:
bus_for_each_dev(&css_bus_type, NULL, set, __unset_registered);
break;
case CSS_EVAL_NOT_ONLINE:
bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);
break;
default:
break;
}
/* Apply to slow_subchannel_set. */ /* Apply to slow_subchannel_set. */
spin_lock_irqsave(&slow_subchannel_lock, flags); spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_add_set(slow_subchannel_set, unreg_set); idset_add_set(slow_subchannel_set, set);
atomic_set(&css_eval_scheduled, 1); atomic_set(&css_eval_scheduled, 1);
queue_delayed_work(cio_work_q, &slow_path_work, delay); queue_delayed_work(cio_work_q, &slow_path_work, delay);
spin_unlock_irqrestore(&slow_subchannel_lock, flags); spin_unlock_irqrestore(&slow_subchannel_lock, flags);
idset_free(unreg_set); idset_free(set);
} }
void css_wait_for_slow_path(void) void css_wait_for_slow_path(void)
...@@ -820,7 +842,7 @@ void css_wait_for_slow_path(void) ...@@ -820,7 +842,7 @@ void css_wait_for_slow_path(void)
void css_schedule_reprobe(void) void css_schedule_reprobe(void)
{ {
/* Schedule with a delay to allow merging of subsequent calls. */ /* Schedule with a delay to allow merging of subsequent calls. */
css_schedule_eval_all_unreg(1 * HZ); css_schedule_eval_cond(CSS_EVAL_UNREG, 1 * HZ);
} }
EXPORT_SYMBOL_GPL(css_schedule_reprobe); EXPORT_SYMBOL_GPL(css_schedule_reprobe);
......
...@@ -34,6 +34,14 @@ ...@@ -34,6 +34,14 @@
#define SNID_STATE3_MULTI_PATH 1 #define SNID_STATE3_MULTI_PATH 1
#define SNID_STATE3_SINGLE_PATH 0 #define SNID_STATE3_SINGLE_PATH 0
/*
* Conditions used to specify which subchannels need evaluation
*/
enum css_eval_cond {
CSS_EVAL_UNREG, /* unregistered subchannels */
CSS_EVAL_NOT_ONLINE /* sch without an online-device */
};
struct path_state { struct path_state {
__u8 state1 : 2; /* path state value 1 */ __u8 state1 : 2; /* path state value 1 */
__u8 state2 : 2; /* path state value 2 */ __u8 state2 : 2; /* path state value 2 */
...@@ -136,7 +144,7 @@ static inline struct channel_subsystem *css_by_id(u8 cssid) ...@@ -136,7 +144,7 @@ static inline struct channel_subsystem *css_by_id(u8 cssid)
/* Helper functions to build lists for the slow path. */ /* Helper functions to build lists for the slow path. */
void css_schedule_eval(struct subchannel_id schid); void css_schedule_eval(struct subchannel_id schid);
void css_schedule_eval_all(void); void css_schedule_eval_all(void);
void css_schedule_eval_all_unreg(unsigned long delay); void css_schedule_eval_cond(enum css_eval_cond, unsigned long delay);
int css_complete_work(void); int css_complete_work(void);
int sch_is_pseudo_sch(struct subchannel *); int sch_is_pseudo_sch(struct subchannel *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment