Commit 53d5fc89 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 's390-5.15-4' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 fix from Vasily Gorbik:
 "One fix for 5.15-rc4: Avoid CIO excessive path-verification requests,
  which might cause unwanted delays"

* tag 's390-5.15-4' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/cio: avoid excessive path-verification requests
parents f5b667de 172da89e
......@@ -262,10 +262,12 @@ static int blacklist_parse_proc_parameters(char *buf)
if (strcmp("free", parm) == 0) {
rc = blacklist_parse_parameters(buf, free, 0);
/* There could be subchannels without proper devices connected.
* evaluate all the entries
/*
* Evaluate the subchannels without an online device. This way,
* no path-verification will be triggered on those subchannels
* and it avoids unnecessary delays.
*/
css_schedule_eval_all();
css_schedule_eval_cond(CSS_EVAL_NOT_ONLINE, 0);
} else if (strcmp("add", parm) == 0)
rc = blacklist_parse_parameters(buf, add, 0);
else if (strcmp("purge", parm) == 0)
......
......@@ -788,27 +788,49 @@ static int __unset_registered(struct device *dev, void *data)
return 0;
}
void css_schedule_eval_all_unreg(unsigned long delay)
static int __unset_online(struct device *dev, void *data)
{
struct idset *set = data;
struct subchannel *sch = to_subchannel(dev);
struct ccw_device *cdev = sch_get_cdev(sch);
if (cdev && cdev->online)
idset_sch_del(set, sch->schid);
return 0;
}
void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)
{
unsigned long flags;
struct idset *unreg_set;
struct idset *set;
/* Find unregistered subchannels. */
unreg_set = idset_sch_new();
if (!unreg_set) {
set = idset_sch_new();
if (!set) {
/* Fallback. */
css_schedule_eval_all();
return;
}
idset_fill(unreg_set);
bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
idset_fill(set);
switch (cond) {
case CSS_EVAL_UNREG:
bus_for_each_dev(&css_bus_type, NULL, set, __unset_registered);
break;
case CSS_EVAL_NOT_ONLINE:
bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);
break;
default:
break;
}
/* Apply to slow_subchannel_set. */
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_add_set(slow_subchannel_set, unreg_set);
idset_add_set(slow_subchannel_set, set);
atomic_set(&css_eval_scheduled, 1);
queue_delayed_work(cio_work_q, &slow_path_work, delay);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
idset_free(unreg_set);
idset_free(set);
}
void css_wait_for_slow_path(void)
......@@ -820,7 +842,7 @@ void css_wait_for_slow_path(void)
void css_schedule_reprobe(void)
{
/* Schedule with a delay to allow merging of subsequent calls. */
css_schedule_eval_all_unreg(1 * HZ);
css_schedule_eval_cond(CSS_EVAL_UNREG, 1 * HZ);
}
EXPORT_SYMBOL_GPL(css_schedule_reprobe);
......
......@@ -34,6 +34,14 @@
#define SNID_STATE3_MULTI_PATH 1
#define SNID_STATE3_SINGLE_PATH 0
/*
* Conditions used to specify which subchannels need evaluation
*/
enum css_eval_cond {
CSS_EVAL_UNREG, /* unregistered subchannels */
CSS_EVAL_NOT_ONLINE /* sch without an online-device */
};
struct path_state {
__u8 state1 : 2; /* path state value 1 */
__u8 state2 : 2; /* path state value 2 */
......@@ -136,7 +144,7 @@ static inline struct channel_subsystem *css_by_id(u8 cssid)
/* Helper functions to build lists for the slow path. */
void css_schedule_eval(struct subchannel_id schid);
void css_schedule_eval_all(void);
void css_schedule_eval_all_unreg(unsigned long delay);
void css_schedule_eval_cond(enum css_eval_cond, unsigned long delay);
int css_complete_work(void);
int sch_is_pseudo_sch(struct subchannel *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment