Commit 53e7ea7f authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] s390: common i/o layer

From: Martin Schwidefsky <schwidefsky@de.ibm.com>

 - Make blacklist busid-aware. Add "all" keyword and ! operator to cio_ignore
   kernel parameter.
 - Add state change notify function for ccw devices (not mandatory) and
   introduce the "device disconnected" state.
 - Remove auto offline from remove function for ccw devices to be able to
   distinguish between user initiated offline and implicit offline due to
   device removal.
 - Store pointer to subchannel structure in the (hardware) subchannel intparm
   and remove the ioinfo array (hurray...). Remove intparm parameter of
   cio_start.
 - Use busid instead of subchannel number for debug output.
 - Use an opm mask to track which paths are logically online for a subchannel.
 - Pathgroup every device it was requested for, even single path devices.
 - Give i/o on a logically switched off path a grace period to complete, then
   kill the i/o to get the path offline.
 - Correctly initialize all spin_locks with spin_lock_init.
 - Handle status pending/busy while disabling subchannel.
 - Set busid already in cio_validate_subchannel.
 - Add s390_root_dev_{register,unregister} functions.
 - Do stcrw() inside a kernel thread. Add crw overflow handling.
 - Use subchannel lock directly instead of ccw device lock pointer in
   ccw_device_recognition to avoid accessing an already free structure.
 - Take/release ccw device lock in ccw_device_console_enable.
 - Don't wipe out the busid field in ccw_device_console_enable.
 - Call ccw_device_unregister() directly on a notoper event - delaying it via
   queue_work is harmful (subchannel may be removed before ccw_device).
 - Handle not opertional condition in ccw_device_cancel_halt_clear.
 - Correct status pending handling: don't collect pending status directly
   but wait for the interrupt to show up.
 - Enable subchannel when trying a steal lock operation.
 - Introduce doverify bit for delayed path verification.
 - Fix locking in __ccw_device_retry_loop/read_conf_data/read_dev/chars.
 - Make SPID retry mechanism more obvious.
 - qdio: check return code of ccw_device_{halt,clear} in qdio_cleanup. Don't
   try to wait for an interrupt we won't get.
 - qdio: fix shared indicators.
 - qdio: add code to handle i/o killed by cio with active queues.
 - qdio: don't do a shutdown on timeout in interrupt context.
 - Update cio documentation.
parent e966ec57
......@@ -32,6 +32,9 @@ cutype: The control unit type / model.
devtype: The device type / model, if applicable.
online: An interface to set the device online and offline.
In the special case of the device being disconnected (see the
notify function under 1.2), piping 0 to online will focibly delete
the device.
The device drivers can add entries to export per-device data and interfaces.
......@@ -82,6 +85,7 @@ struct ccw_driver {
int (*remove) (struct ccw_device *);
int (*set_online) (struct ccw_device *);
int (*set_offline) (struct ccw_device *);
int (*notify) (struct ccw_device *, int);
struct device_driver driver;
char *name;
};
......@@ -143,6 +147,28 @@ Parameters: cdev - the device to be deactivated. The common layer has
verified that the device is online.
notify: This function is called by the common I/O layer for some state changes
of the device.
Signalled to the driver are:
* In online state, device detached (CIO_GONE) or last path gone
(CIO_NO_PATH). The driver must return !0 to keep the device; for
return code 0, the device will be deleted as usual (also when no
notify function is registerd). If the driver wants to keep the
device, it is moved into disconnected state.
* In disconnected state, device operational again (CIO_OPER). The
common I/O layer performs some sanity checks on device number and
Device / CU to be reasonably sure if it is still the same device.
If not, the old device is removed and a new one registered. By the
return code of the notify function the device driver signals if it
wants the device back: !0 for keeping, 0 to make the device being
removed and re-registered.
int (*notify) (struct ccw_device *, int);
Parameters: cdev - the device whose state changed.
event - the event that happened. This can be one of CIO_GONE,
CIO_NO_PATH or CIO_OPER.
The handler field of the struct ccw_device is meant to be set to the interrupt
handler for the device. In order to accommodate drivers which use several
distinct handlers (e.g. multi subchannel devices), this is a member of ccw_device
......
/*
* drivers/s390/cio/blacklist.c
* S/390 common I/O routines -- blacklisting of specific devices
* $Revision: 1.24 $
* $Revision: 1.27 $
*
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
......@@ -62,51 +62,117 @@ blacklist_range (range_action action, unsigned int from, unsigned int to)
}
/*
* function: blacklist_strtoul
* Strip leading '0x' and interpret the values as Hex
* Function: blacklist_busid
* Get devno/busid from given string.
* Shamelessly grabbed from dasd_devmap.c.
*/
static inline int
blacklist_strtoul (const char *str, char **stra)
blacklist_busid(char **str, int *id0, int *id1, int *devno)
{
if (*str == '0') {
if (*(++str) == 'x') /* strip leading zero */
str++; /* strip leading x */
int val, old_style;
/* check for leading '0x' */
old_style = 0;
if ((*str)[0] == '0' && (*str)[1] == 'x') {
*str += 2;
old_style = 1;
}
return simple_strtoul (str, stra, 16); /* interpret anything as hex */
if (!isxdigit((*str)[0])) /* We require at least one hex digit */
return -EINVAL;
val = simple_strtoul(*str, str, 16);
if (old_style || (*str)[0] != '.') {
*id0 = *id1 = 0;
if (val < 0 || val > 0xffff)
return -EINVAL;
*devno = val;
return 0;
}
/* New style x.y.z busid */
if (val < 0 || val > 0xff)
return -EINVAL;
*id0 = val;
(*str)++;
if (!isxdigit((*str)[0])) /* We require at least one hex digit */
return -EINVAL;
val = simple_strtoul(*str, str, 16);
if (val < 0 || val > 0xff || (*str)++[0] != '.')
return -EINVAL;
*id1 = val;
if (!isxdigit((*str)[0])) /* We require at least one hex digit */
return -EINVAL;
val = simple_strtoul(*str, str, 16);
if (val < 0 || val > 0xffff)
return -EINVAL;
*devno = val;
return 0;
}
static inline int
blacklist_parse_parameters (char *str, range_action action)
{
unsigned int from, to;
unsigned int from, to, from_id0, to_id0, from_id1, to_id1;
char *sav;
sav = str;
while (*str != 0 && *str != '\n') {
if (!isxdigit(*str)) {
printk(KERN_WARNING "blacklist_setup: error parsing "
"\"%s\"\n", str);
return 0;
range_action ra = action;
if (*str == '!') {
ra = !action;
++str;
}
from = blacklist_strtoul (str, &str);
to = (*str == '-') ? blacklist_strtoul (str+1, &str) : from;
/*
* Since we have to parse the proc commands and the
* kernel arguments we have to check four cases
*/
if (strncmp(str,"all,",4) == 0 || strcmp(str,"all") == 0 ||
strncmp(str,"all\n",4) == 0 || strncmp(str,"all ",4) == 0) {
from = 0;
to = __MAX_SUBCHANNELS;
str += 3;
} else {
int rc;
rc = blacklist_busid(&str, &from_id0,
&from_id1, &from);
if (rc)
goto out_err;
to = from;
to_id0 = from_id0;
to_id1 = from_id1;
if (*str == '-') {
str++;
rc = blacklist_busid(&str, &to_id0, &to_id1,
&to);
if (rc)
goto out_err;
}
if ((from_id0 != to_id0) || (from_id1 != to_id1))
goto out_err;
}
/* FIXME: ignoring id0 and id1 here. */
pr_debug("blacklist_setup: adding range "
"from 0x%04x to 0x%04x\n", from, to);
blacklist_range (action, from, to);
"from 0.0.%04x to 0.0.%04x\n", from, to);
blacklist_range (ra, from, to);
if (*str == ',')
str++;
}
return 1;
out_err:
printk(KERN_WARNING "blacklist_setup: error parsing \"%s\"\n", sav);
return 0;
}
/* Parsing the commandline for blacklist parameters, e.g. to blacklist
* device IDs 0x1234, 0x1235 and 0x1236, you could use any of:
* bus ids 0.0.1234, 0.0.1235 and 0.0.1236, you could use any of:
* - cio_ignore=1234-1236
* - cio_ignore=0x1234-0x1235,1236
* - cio_ignore=0x1234,1235-1236
* - cio_ignore=1236 cio_ignore=1234-0x1236
* - cio_ignore=1234 cio_ignore=1236 cio_ignore=0x1235
* - cio_ignore=0.0.1234-0.0.1236
* - cio_ignore=0.0.1234,0x1235,1236
* - ...
*/
static int __init
......@@ -134,6 +200,7 @@ is_blacklisted (int devno)
#ifdef CONFIG_PROC_FS
extern void css_reiterate_subchannels(void);
/*
* Function: s390_redo_validation
* Look for no longer blacklisted devices
......@@ -141,12 +208,9 @@ is_blacklisted (int devno)
static inline void
s390_redo_validation (void)
{
int irq;
CIO_TRACE_EVENT (0, "redoval");
for (irq = 0; irq <= highest_subchannel; irq++)
css_probe_device(irq);
css_reiterate_subchannels();
}
/*
......@@ -157,10 +221,7 @@ static inline void
blacklist_parse_proc_parameters (char *buf)
{
if (strncmp (buf, "free ", 5) == 0) {
if (strstr (buf + 5, "all"))
blacklist_range (free, 0, __MAX_SUBCHANNELS);
else
blacklist_parse_parameters (buf + 5, free);
blacklist_parse_parameters (buf + 5, free);
} else if (strncmp (buf, "add ", 4) == 0) {
/*
* We don't need to check for known devices since
......@@ -179,10 +240,11 @@ blacklist_parse_proc_parameters (char *buf)
s390_redo_validation ();
}
/* FIXME: These should be real bus ids and not home-grown ones! */
static int cio_ignore_read (char *page, char **start, off_t off,
int count, int *eof, void *data)
{
const unsigned int entry_size = 14; /* "0xABCD-0xEFGH\n" */
const unsigned int entry_size = 18; /* "0.0.ABCD-0.0.EFGH\n" */
long devno;
int len;
......@@ -192,12 +254,12 @@ static int cio_ignore_read (char *page, char **start, off_t off,
devno <= __MAX_SUBCHANNELS && len + entry_size < count; devno++) {
if (!test_bit(devno, bl_dev))
continue;
len += sprintf(page + len, "0x%04lx", devno);
len += sprintf(page + len, "0.0.%04lx", devno);
if (test_bit(devno + 1, bl_dev)) { /* print range */
while (++devno < __MAX_SUBCHANNELS)
if (!test_bit(devno, bl_dev))
break;
len += sprintf(page + len, "-0x%04lx", --devno);
len += sprintf(page + len, "-0.0.%04lx", --devno);
}
len += sprintf(page + len, "\n");
}
......
/*
* drivers/s390/cio/ccwgroup.c
* bus driver for ccwgroup
* $Revision: 1.17 $
* $Revision: 1.19 $
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
......@@ -337,7 +337,6 @@ ccwgroup_remove (struct device *dev)
{
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
int ret;
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(dev->driver);
......@@ -345,11 +344,10 @@ ccwgroup_remove (struct device *dev)
pr_debug("%s: device %s\n", __func__, gdev->dev.name);
device_remove_file(dev, &dev_attr_online);
ccwgroup_set_offline(gdev);
ret = (gdrv && gdrv->remove) ? gdrv->remove(gdev) : 0;
return ret;
if (gdrv && gdrv->remove)
gdrv->remove(gdev);
return 0;
}
int
......@@ -418,20 +416,20 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
return gdev;
}
int
void
ccwgroup_remove_ccwdev(struct ccw_device *cdev)
{
struct ccwgroup_device *gdev;
/* Ignore offlining errors, device is gone anyway. */
ccw_device_set_offline(cdev);
/* If one of its devices is gone, the whole group is done for. */
gdev = __ccwgroup_get_gdev_by_cdev(cdev);
if (gdev) {
ccwgroup_set_offline(gdev);
__ccwgroup_remove_symlinks(gdev);
device_unregister(&gdev->dev);
put_device(&gdev->dev);
}
return 0;
}
MODULE_LICENSE("GPL");
......
/*
* drivers/s390/cio/chsc.c
* S/390 common I/O routines -- channel subsystem call
* $Revision: 1.78 $
* $Revision: 1.92 $
*
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
......@@ -17,40 +17,40 @@
#include <linux/device.h>
#include <asm/cio.h>
#include <asm/ccwdev.h> // FIXME: layering violation, remove this
#include "css.h"
#include "cio.h"
#include "cio_debug.h"
#include "device.h" // FIXME: layering violation, remove this
#include "ioasm.h"
#include "chsc.h"
#define CHPID_LONGS (256 / (8 * sizeof(long))) /* 256 chpids */
static unsigned long chpids[CHPID_LONGS];
static unsigned long chpids_logical[CHPID_LONGS] = {[0 ... CHPID_LONGS-1] -1};
static unsigned long chpids_known[CHPID_LONGS];
static struct channel_path *chps[NR_CHPIDS];
static int cio_chsc_desc_avail;
static int new_channel_path(int chpid, int status);
static int
set_chp_status(int chp, int status)
static inline void
set_chp_online(int chp, int onoff)
{
if (chps[chp] == NULL)
return -EINVAL;
chps[chp]->state.online = onoff;
}
chps[chp]->state = status;
return 0;
static inline void
set_chp_logically_online(int chp, int onoff)
{
chps[chp]->state.logically_online = onoff;
}
static inline int
chsc_chpid_logical (struct subchannel *sch, int chp)
static int
get_chp_status(int chp)
{
return test_bit (sch->schib.pmcw.chpid[chp], chpids_logical);
int ret;
if (!chps[chp])
return 0;
ret = chps[chp]->state.online ? CHP_ONLINE : CHP_STANDBY;
return (chps[chp]->state.logically_online ?
ret : ret | CHP_LOGICALLY_OFFLINE);
}
void
......@@ -58,24 +58,21 @@ chsc_validate_chpids(struct subchannel *sch)
{
int mask, chp;
if (sch->lpm == 0)
return;
for (chp = 0; chp <= 7; chp++) {
mask = 0x80 >> chp;
if (sch->lpm & mask)
if (!chsc_chpid_logical(sch, chp))
/* disable using this path */
sch->lpm &= ~mask;
if (get_chp_status(sch->schib.pmcw.chpid[chp])
& CHP_LOGICALLY_OFFLINE)
/* disable using this path */
sch->opm &= ~mask;
}
}
/* FIXME: this is _always_ called for every subchannel. shouldn't we
* process more than one at a time? */
static int
chsc_get_sch_desc_irq(int irq, void *page)
chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
{
int ccode, chpid, j;
int ccode, j;
struct {
struct chsc_header request;
......@@ -106,8 +103,8 @@ chsc_get_sch_desc_irq(int irq, void *page)
.code = 0x0004,
};
ssd_area->f_sch = irq;
ssd_area->l_sch = irq;
ssd_area->f_sch = sch->irq;
ssd_area->l_sch = sch->irq;
ccode = chsc(ssd_area);
if (ccode > 0) {
......@@ -120,19 +117,17 @@ chsc_get_sch_desc_irq(int irq, void *page)
break;
case 0x0002:
CIO_CRW_EVENT(2, "Invalid command!\n");
return -EINVAL;
case 0x0003:
CIO_CRW_EVENT(2, "Error in chsc request block!\n");
return -EINVAL;
break;
case 0x0004:
CIO_CRW_EVENT(2, "Model does not provide ssd\n");
return -EOPNOTSUPP;
break;
default:
CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
ssd_area->response.code);
return -EIO;
break;
}
/*
......@@ -151,7 +146,7 @@ chsc_get_sch_desc_irq(int irq, void *page)
*/
if (ssd_area->st > 3) { /* uhm, that looks strange... */
CIO_CRW_EVENT(0, "Strange subchannel type %d"
" for sch %x\n", ssd_area->st, irq);
" for sch %s\n", ssd_area->st, sch->dev.bus_id);
/*
* There may have been a new subchannel type defined in the
* time since this code was written; since we don't know which
......@@ -160,14 +155,11 @@ chsc_get_sch_desc_irq(int irq, void *page)
return 0;
} else {
const char *type[4] = {"I/O", "chsc", "message", "ADM"};
CIO_CRW_EVENT(6, "ssd: sch %x is %s subchannel\n",
irq, type[ssd_area->st]);
if (ioinfo[irq] == NULL)
/* FIXME: we should do device rec. here... */
return 0;
ioinfo[irq]->ssd_info.valid = 1;
ioinfo[irq]->ssd_info.type = ssd_area->st;
CIO_CRW_EVENT(6, "ssd: sch %s is %s subchannel\n",
sch->dev.bus_id, type[ssd_area->st]);
sch->ssd_info.valid = 1;
sch->ssd_info.type = ssd_area->st;
}
if (ssd_area->st == 0 || ssd_area->st == 2) {
......@@ -175,89 +167,73 @@ chsc_get_sch_desc_irq(int irq, void *page)
if (!((0x80 >> j) & ssd_area->path_mask &
ssd_area->fla_valid_mask))
continue;
chpid = ssd_area->chpid[j];
if (chpid
&& (!test_and_set_bit (chpid, chpids_known))
&& (test_bit (chpid, chpids_logical)))
set_bit (chpid, chpids);
ioinfo[irq]->ssd_info.chpid[j] = chpid;
ioinfo[irq]->ssd_info.fla[j] = ssd_area->fla[j];
sch->ssd_info.chpid[j] = ssd_area->chpid[j];
sch->ssd_info.fla[j] = ssd_area->fla[j];
}
}
return 0;
}
static int
chsc_get_sch_descriptions(void)
int
css_get_ssd_info(struct subchannel *sch)
{
int irq;
int err;
int ret;
void *page;
CIO_TRACE_EVENT( 4, "gsdesc");
/*
* get information about chpids and link addresses
* by executing the chsc command 'store subchannel description'
*/
page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!page)
return -ENOMEM;
err = 0;
for (irq = 0; irq <= highest_subchannel; irq++) {
/*
* retrieve information for each sch
*/
err = chsc_get_sch_desc_irq(irq, page);
if (err) {
static int cio_chsc_err_msg;
if (!cio_chsc_err_msg) {
printk(KERN_ERR
"chsc_get_sch_descriptions:"
" Error %d while doing chsc; "
"processing some machine checks may "
"not work\n", err);
cio_chsc_err_msg = 1;
}
goto out;
spin_lock_irq(&sch->lock);
ret = chsc_get_sch_desc_irq(sch, page);
if (ret) {
static int cio_chsc_err_msg;
if (!cio_chsc_err_msg) {
printk(KERN_ERR
"chsc_get_sch_descriptions:"
" Error %d while doing chsc; "
"processing some machine checks may "
"not work\n", ret);
cio_chsc_err_msg = 1;
}
clear_page(page);
}
cio_chsc_desc_avail = 1;
out:
spin_unlock_irq(&sch->lock);
free_page((unsigned long)page);
return err;
if (!ret) {
int j, chpid;
/* Allocate channel path structures, if needed. */
for (j = 0; j < 8; j++) {
chpid = sch->ssd_info.chpid[j];
if (chpid && !get_chp_status(chpid))
new_channel_path(chpid, CHP_ONLINE);
}
}
return ret;
}
__initcall(chsc_get_sch_descriptions);
static inline void
s390_subchannel_remove_chpid(struct subchannel *sch, __u8 chpid)
static int
s390_subchannel_remove_chpid(struct device *dev, void *data)
{
int j;
int mask;
struct subchannel *sch;
__u8 *chpid;
sch = to_subchannel(dev);
chpid = data;
for (j = 0; j < 8; j++)
if (sch->schib.pmcw.chpid[j] == chpid)
if (sch->schib.pmcw.chpid[j] == *chpid)
break;
if (j >= 8)
return;
return 0;
mask = 0x80 >> j;
spin_lock(&sch->lock);
chsc_validate_chpids(sch);
stsch(sch->irq, &sch->schib);
if (sch->vpm == mask) {
dev_fsm_event(sch->dev.driver_data, DEV_EVENT_NOTOPER);
goto out_unlock;
}
if (sch->vpm == mask)
goto out_unreg;
if ((sch->schib.scsw.actl & (SCSW_ACTL_CLEAR_PEND |
SCSW_ACTL_HALT_PEND |
SCSW_ACTL_START_PEND |
......@@ -265,83 +241,64 @@ s390_subchannel_remove_chpid(struct subchannel *sch, __u8 chpid)
(sch->schib.pmcw.lpum == mask)) {
int cc = cio_cancel(sch);
if (cc == -ENODEV) {
dev_fsm_event(sch->dev.driver_data, DEV_EVENT_NOTOPER);
goto out_unlock;
}
if (cc == -ENODEV)
goto out_unreg;
if (cc == -EINVAL) {
struct ccw_device *cdev;
cc = cio_clear(sch);
if (cc == -ENODEV) {
dev_fsm_event(sch->dev.driver_data,
DEV_EVENT_NOTOPER);
goto out_unlock;
}
if (cc == -ENODEV)
goto out_unreg;
/* Call handler. */
cdev = sch->dev.driver_data;
cdev->private->state = DEV_STATE_CLEAR_VERIFY;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-EIO));
if (sch->driver && sch->driver->termination)
sch->driver->termination(&sch->dev);
goto out_unlock;
}
} else if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
(sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
(sch->schib.pmcw.lpum == mask)) {
struct ccw_device *cdev;
int cc;
cc = cio_clear(sch);
if (cc == -ENODEV) {
dev_fsm_event(sch->dev.driver_data, DEV_EVENT_NOTOPER);
goto out_unlock;
}
if (cc == -ENODEV)
goto out_unreg;
/* Call handler. */
cdev = sch->dev.driver_data;
cdev->private->state = DEV_STATE_CLEAR_VERIFY;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-EIO));
if (sch->driver && sch->driver->termination)
sch->driver->termination(&sch->dev);
goto out_unlock;
}
/* trigger path verification. */
dev_fsm_event(sch->dev.driver_data, DEV_EVENT_VERIFY);
if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev);
out_unlock:
spin_unlock(&sch->lock);
return 0;
out_unreg:
spin_unlock(&sch->lock);
if (sch->driver && sch->driver->notify &&
sch->driver->notify(&sch->dev, CIO_NO_PATH))
return 0;
device_unregister(&sch->dev);
sch->schib.pmcw.intparm = 0;
cio_modify(sch);
return 0;
}
/* FIXME: don't iterate all subchannels but use driver_for_each_dev */
static inline void
s390_set_chpid_offline( __u8 chpid)
{
char dbf_txt[15];
struct subchannel *sch;
int irq;
sprintf(dbf_txt, "chpr%x", chpid);
CIO_TRACE_EVENT(2, dbf_txt);
clear_bit(chpid, chpids);
if (!test_and_clear_bit(chpid, chpids_known))
if (!get_chp_status(chpid))
return; /* we didn't know the chpid anyway */
set_chp_status(chpid, CHP_OFFLINE);
#if 0
driver_for_each_dev(io_subchannel_driver, chpid, s390_subchannel_remove_chpid);
#else
for (irq = 0; irq <= highest_subchannel; irq++) {
sch = ioinfo[irq];
if (sch == NULL)
continue; /* we don't know the device anyway */
set_chp_online(chpid, 0);
s390_subchannel_remove_chpid(sch, chpid);
}
#endif
bus_for_each_dev(&css_bus_type, NULL, &chpid,
s390_subchannel_remove_chpid);
}
static int
......@@ -353,7 +310,7 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
int ccode;
/* Update our ssd_info */
if (chsc_get_sch_desc_irq(sch->irq, page))
if (chsc_get_sch_desc_irq(sch, page))
return 0;
found = 0;
......@@ -407,7 +364,7 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
* will we have to do.
*/
if (!test_bit(chpid, chpids_logical))
if (get_chp_status(chpid) & CHP_LOGICALLY_OFFLINE)
return; /* no need to do the rest */
page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
......@@ -417,7 +374,7 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
int chp_mask;
sch = ioinfo[irq];
sch = get_subchannel_by_schid(irq);
if (!sch) {
/*
* We don't know the device yet, but since a path
......@@ -450,17 +407,16 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
continue;
}
sch->lpm = (sch->schib.pmcw.pim &
sch->schib.pmcw.pam &
sch->schib.pmcw.pom)
| chp_mask;
chsc_validate_chpids(sch);
sch->lpm = ((sch->schib.pmcw.pim &
sch->schib.pmcw.pam &
sch->schib.pmcw.pom)
| chp_mask) & sch->opm;
dev_fsm_event(sch->dev.driver_data, DEV_EVENT_VERIFY);
if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev);
spin_unlock_irq(&sch->lock);
put_device(&sch->dev);
if (fla_mask != 0)
break;
}
......@@ -497,8 +453,8 @@ __get_chpid_from_lir(void *data)
return (u16) (lir->indesc[0]&0x000000ff);
}
static void
do_process_crw(void *ignore)
void
chsc_process_crw(void)
{
int chpid;
struct {
......@@ -608,7 +564,7 @@ do_process_crw(void *ignore)
if (chps[sei_area->rsid] == NULL)
new_channel_path(sei_area->rsid, CHP_ONLINE);
else
set_chp_status(sei_area->rsid, CHP_ONLINE);
set_chp_online(sei_area->rsid, 1);
if ((sei_area->vf & 0x80) == 0) {
pr_debug("chpid: %x\n", sei_area->rsid);
......@@ -639,14 +595,6 @@ do_process_crw(void *ignore)
free_page((unsigned long)sei_area);
}
void
chsc_process_crw(void)
{
static DECLARE_WORK(work, do_process_crw, 0);
queue_work(ccw_device_work, &work);
}
static void
chp_add(int chpid)
{
......@@ -654,7 +602,7 @@ chp_add(int chpid)
int irq, ret;
char dbf_txt[15];
if (!test_bit(chpid, chpids_logical))
if (get_chp_status(chpid) & CHP_LOGICALLY_OFFLINE)
return; /* no need to do the rest */
sprintf(dbf_txt, "cadd%x", chpid);
......@@ -663,7 +611,7 @@ chp_add(int chpid)
for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
int i;
sch = ioinfo[irq];
sch = get_subchannel_by_schid(irq);
if (!sch) {
ret = css_probe_device(irq);
if (ret == -ENXIO)
......@@ -686,16 +634,16 @@ chp_add(int chpid)
spin_unlock(&sch->lock);
return;
}
sch->lpm = (sch->schib.pmcw.pim &
sch->schib.pmcw.pam &
sch->schib.pmcw.pom)
| 0x80 >> i;
chsc_validate_chpids(sch);
sch->lpm = ((sch->schib.pmcw.pim &
sch->schib.pmcw.pam &
sch->schib.pmcw.pom)
| 0x80 >> i) & sch->opm;
dev_fsm_event(sch->dev.driver_data, DEV_EVENT_VERIFY);
if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev);
spin_unlock(&sch->lock);
put_device(&sch->dev);
}
}
......@@ -716,12 +664,80 @@ chp_process_crw(int chpid, int on)
if (chps[chpid] == NULL)
new_channel_path(chpid, CHP_ONLINE);
else
set_chp_status(chpid, CHP_ONLINE);
set_chp_online(chpid, 1);
/* Avoid the extra overhead in process_rec_acc. */
chp_add(chpid);
}
}
static inline void
__check_for_io_and_kill(struct subchannel *sch, int index)
{
int cc;
cc = stsch(sch->irq, &sch->schib);
if (cc)
return;
if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index))
device_set_waiting(sch);
}
static inline void
__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
{
int chp;
if (!sch->ssd_info.valid)
return;
for (chp = 0; chp < 8; chp++) {
if (sch->ssd_info.chpid[chp] == chpid) {
if (on) {
sch->opm |= (0x80 >> chp);
sch->lpm |= (0x80 >> chp);
} else {
sch->opm &= ~(0x80 >> chp);
sch->lpm &= ~(0x80 >> chp);
/*
* Give running I/O a grace period in which it
* can successfully terminate, even using the
* just varied off path. Then kill it.
*/
__check_for_io_and_kill(sch, chp);
}
if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev);
break;
}
}
}
static int
s390_subchannel_vary_chpid_off(struct device *dev, void *data)
{
struct subchannel *sch;
__u8 *chpid;
sch = to_subchannel(dev);
chpid = data;
__s390_subchannel_vary_chpid(sch, *chpid, 0);
return 0;
}
static int
s390_subchannel_vary_chpid_on(struct device *dev, void *data)
{
struct subchannel *sch;
__u8 *chpid;
sch = to_subchannel(dev);
chpid = data;
__s390_subchannel_vary_chpid(sch, *chpid, 1);
return 0;
}
/*
* Function: s390_vary_chpid
* Varies the specified chpid online or offline
......@@ -730,66 +746,33 @@ static int
s390_vary_chpid( __u8 chpid, int on)
{
char dbf_text[15];
struct subchannel *sch;
int irq;
int status;
sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
CIO_TRACE_EVENT( 2, dbf_text);
chsc_get_sch_descriptions();
if (!cio_chsc_desc_avail) {
printk(KERN_ERR "Could not get chpid status, "
"vary on/off not available\n");
return -EPERM;
}
if (!test_bit(chpid, chpids_known)) {
status = get_chp_status(chpid);
if (!status) {
printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
return -EINVAL;
}
if (test_bit(chpid, chpids) == on) {
if ((on && !(status & CHP_LOGICALLY_OFFLINE)) ||
(!on && (status & CHP_LOGICALLY_OFFLINE))) {
printk(KERN_ERR "chpid %x is "
"already %sline\n", chpid, on ? "on" : "off");
return -EINVAL;
}
if (on) {
set_bit(chpid, chpids_logical);
set_bit(chpid, chpids);
set_chp_status(chpid, CHP_ONLINE);
} else {
clear_bit(chpid, chpids_logical);
clear_bit(chpid, chpids);
set_chp_status(chpid, CHP_LOGICALLY_OFFLINE);
}
set_chp_logically_online(chpid, on);
/*
* Redo PathVerification on the devices the chpid connects to
*/
for (irq = 0; irq <= highest_subchannel; irq++) {
int chp;
/*
* We don't need to adjust the lpm, as this will be done in
* DevicePathVerification...
*/
sch = ioinfo[irq];
if (sch == NULL || sch->st || !sch->ssd_info.valid)
continue;
for (chp = 0; chp < 8; chp++) {
if (sch->ssd_info.chpid[chp] == chpid) {
if (on)
sch->lpm |= (0x80 >> chp);
else
sch->lpm &= ~(0x80 >> chp);
dev_fsm_event(sch->dev.driver_data,
DEV_EVENT_VERIFY);
break;
}
}
}
bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
s390_subchannel_vary_chpid_on :
s390_subchannel_vary_chpid_off);
return 0;
}
......@@ -800,22 +783,16 @@ static ssize_t
chp_status_show(struct device *dev, char *buf)
{
struct channel_path *chp = container_of(dev, struct channel_path, dev);
int state;
if (!chp)
return 0;
switch(chp->state) {
case CHP_OFFLINE:
state = get_chp_status(chp->id);
if (state & CHP_STANDBY)
return sprintf(buf, "n/a\n");
case CHP_LOGICALLY_OFFLINE:
return sprintf(buf, "logically offline\n");
case CHP_STANDBY:
return sprintf(buf, "n/a\n");
case CHP_ONLINE:
return sprintf(buf, "online\n");
default:
return 0;
}
return (state & CHP_LOGICALLY_OFFLINE) ?
sprintf(buf, "logically offline\n") :
sprintf(buf, "online\n");
}
static ssize_t
......@@ -847,6 +824,10 @@ static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
static void
chp_release(struct device *dev)
{
struct channel_path *cp;
cp = container_of(dev, struct channel_path, dev);
kfree(cp);
}
/*
......@@ -868,7 +849,19 @@ new_channel_path(int chpid, int status)
/* fill in status, etc. */
chp->id = chpid;
chp->state = status;
switch (status) {
case CHP_STANDBY:
chp->state.online = 0;
chp->state.logically_online = 1;
break;
case CHP_LOGICALLY_OFFLINE:
chp->state.logically_online = 0;
chp->state.online = 1;
break;
case CHP_ONLINE:
chp->state.online = 1;
chp->state.logically_online = 1;
}
chp->dev = (struct device) {
.parent = &css_bus_device,
.release = chp_release,
......@@ -889,32 +882,3 @@ new_channel_path(int chpid, int status)
return ret;
}
static int __init
register_channel_paths(void)
{
int i;
int ret;
/* walk through the chpids arrays */
for (i = 0; i < NR_CHPIDS; i++) {
/* we are only interested in known chpids */
if (!test_bit(i, chpids_known))
continue;
if (!test_bit(i, chpids))
/* standby */
ret = new_channel_path(i, CHP_STANDBY);
else if (test_bit(i, chpids_logical))
/* online */
ret = new_channel_path(i, CHP_ONLINE);
else
/* logically offline */
ret = new_channel_path(i, CHP_LOGICALLY_OFFLINE);
if (ret)
return ret;
}
return 0;
}
module_init(register_channel_paths);
......@@ -3,10 +3,9 @@
#define NR_CHPIDS 256
#define CHP_OFFLINE 0
#define CHP_LOGICALLY_OFFLINE 1
#define CHP_STANDBY 2
#define CHP_ONLINE 3
#define CHP_STANDBY 1
#define CHP_LOGICALLY_OFFLINE 2
#define CHP_ONLINE 4
#define CHSC_SEI_ACC_CHPID 1
#define CHSC_SEI_ACC_LINKADDR 2
......@@ -19,7 +18,10 @@ struct chsc_header {
struct channel_path {
int id;
int state;
struct {
unsigned int online:1;
unsigned int logically_online:1;
}__attribute__((packed)) state;
struct device dev;
};
......
/*
* drivers/s390/cio/cio.c
* S/390 common I/O routines -- low level i/o calls
* $Revision: 1.105 $
* $Revision: 1.114 $
*
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
......@@ -147,7 +147,7 @@ cio_tpi(void)
if (tsch (tpi_info->irq, irb) != 0)
/* Not status pending or not operational. */
return 1;
sch = ioinfo[tpi_info->irq];
sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
if (!sch)
return 1;
irq_enter ();
......@@ -184,22 +184,22 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
int
cio_start (struct subchannel *sch, /* subchannel structure */
struct ccw1 * cpa, /* logical channel prog addr */
unsigned int intparm, /* interruption parameter */
__u8 lpm) /* logical path mask */
{
char dbf_txt[15];
int ccode;
sprintf (dbf_txt, "stIO%x", sch->irq);
CIO_TRACE_EVENT (4, dbf_txt);
CIO_TRACE_EVENT (4, "stIO");
CIO_TRACE_EVENT (4, sch->dev.bus_id);
sch->orb.intparm = intparm;
/* sch is always under 2G. */
sch->orb.intparm = (__u32)(unsigned long)sch;
sch->orb.fmt = 1;
sch->orb.pfch = sch->options.prefetch == 0;
sch->orb.spnd = sch->options.suspend;
sch->orb.ssic = sch->options.suspend && sch->options.inter;
sch->orb.lpm = (lpm != 0) ? lpm : sch->lpm;
sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm;
#ifdef CONFIG_ARCH_S390X
/*
* for 64 bit we always support 64 bit IDAWs with 4k page size only
......@@ -240,8 +240,8 @@ cio_resume (struct subchannel *sch)
char dbf_txt[15];
int ccode;
sprintf (dbf_txt, "resIO%x", sch->irq);
CIO_TRACE_EVENT (4, dbf_txt);
CIO_TRACE_EVENT (4, "resIO");
CIO_TRACE_EVENT (4, sch->dev.bus_id);
ccode = rsch (sch->irq);
......@@ -277,8 +277,8 @@ cio_halt(struct subchannel *sch)
if (!sch)
return -ENODEV;
sprintf (dbf_txt, "haltIO%x", sch->irq);
CIO_TRACE_EVENT (2, dbf_txt);
CIO_TRACE_EVENT (2, "haltIO");
CIO_TRACE_EVENT (2, sch->dev.bus_id);
/*
* Issue "Halt subchannel" and process condition code
......@@ -312,8 +312,8 @@ cio_clear(struct subchannel *sch)
if (!sch)
return -ENODEV;
sprintf (dbf_txt, "clearIO%x", sch->irq);
CIO_TRACE_EVENT (2, dbf_txt);
CIO_TRACE_EVENT (2, "clearIO");
CIO_TRACE_EVENT (2, sch->dev.bus_id);
/*
* Issue "Clear subchannel" and process condition code
......@@ -348,8 +348,8 @@ cio_cancel (struct subchannel *sch)
if (!sch)
return -ENODEV;
sprintf (dbf_txt, "cancelIO%x", sch->irq);
CIO_TRACE_EVENT (2, dbf_txt);
CIO_TRACE_EVENT (2, "cancelIO");
CIO_TRACE_EVENT (2, sch->dev.bus_id);
ccode = xsch (sch->irq);
......@@ -374,7 +374,7 @@ cio_cancel (struct subchannel *sch)
* Function: cio_modify
* Issues a "Modify Subchannel" on the specified subchannel
*/
static int
int
cio_modify (struct subchannel *sch)
{
int ccode, retry, ret;
......@@ -411,8 +411,8 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
int retry;
int ret;
sprintf (dbf_txt, "ensch%x", sch->irq);
CIO_TRACE_EVENT (2, dbf_txt);
CIO_TRACE_EVENT (2, "ensch");
CIO_TRACE_EVENT (2, sch->dev.bus_id);
ccode = stsch (sch->irq, &sch->schib);
if (ccode)
......@@ -420,6 +420,7 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
sch->schib.pmcw.ena = 1;
sch->schib.pmcw.isc = isc;
sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
for (retry = 5, ret = 0; retry > 0; retry--) {
ret = cio_modify(sch);
if (ret == -ENODEV)
......@@ -457,8 +458,8 @@ cio_disable_subchannel (struct subchannel *sch)
int retry;
int ret;
sprintf (dbf_txt, "dissch%x", sch->irq);
CIO_TRACE_EVENT (2, dbf_txt);
CIO_TRACE_EVENT (2, "dissch");
CIO_TRACE_EVENT (2, sch->dev.bus_id);
ccode = stsch (sch->irq, &sch->schib);
if (ccode == 3) /* Not operational. */
......@@ -477,6 +478,13 @@ cio_disable_subchannel (struct subchannel *sch)
ret = cio_modify(sch);
if (ret == -ENODEV)
break;
if (ret == -EBUSY)
/*
* The subchannel is busy or status pending.
* We'll disable when the next interrupt was delivered
* via the state machine.
*/
break;
if (ret == 0) {
stsch (sch->irq, &sch->schib);
if (!sch->schib.pmcw.ena)
......@@ -512,6 +520,11 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
/* Nuke all fields. */
memset(sch, 0, sizeof(struct subchannel));
spin_lock_init(&sch->lock);
/* Set a name for the subchannel */
snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", irq);
/*
* The first subchannel that is not-operational (ccode==3)
* indicates that there aren't any more devices available.
......@@ -551,12 +564,12 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
"at devno %04X\n", sch->schib.pmcw.dev);
return -ENODEV;
}
sch->opm = 0xff;
chsc_validate_chpids(sch);
sch->lpm = sch->schib.pmcw.pim &
sch->schib.pmcw.pam &
sch->schib.pmcw.pom;
chsc_validate_chpids(sch);
sch->schib.pmcw.pom &
sch->opm;
CIO_DEBUG(KERN_INFO, 0,
"Detected device %04X on subchannel %s"
......@@ -575,7 +588,6 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
sch->schib.pmcw.isc = 3; /* could be smth. else */
sch->schib.pmcw.csense = 1; /* concurrent sense */
sch->schib.pmcw.ena = 0;
sch->schib.pmcw.intparm = sch->schib.pmcw.dev;
if ((sch->lpm & (sch->lpm - 1)) != 0)
sch->schib.pmcw.mp = 1; /* multipath mode */
return 0;
......@@ -612,7 +624,7 @@ do_IRQ (struct pt_regs *regs)
do_adapter_IO();
continue;
}
sch = ioinfo[tpi_info->irq];
sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
if (sch)
spin_lock(&sch->lock);
/* Store interrupt response block to lowcore. */
......@@ -733,16 +745,40 @@ cio_probe_console(void)
*/
ctl_set_bit(6, 24);
console_subchannel.schib.pmcw.isc = 7;
ioinfo[irq] = &console_subchannel;
console_subchannel.schib.pmcw.intparm =
(__u32)(unsigned long)&console_subchannel;
ret = cio_modify(&console_subchannel);
if (ret) {
console_subchannel_in_use = 0;
return ERR_PTR(ret);
}
return &console_subchannel;
}
void
cio_release_console(void)
{
ioinfo[console_subchannel.irq] = NULL;
console_subchannel.schib.pmcw.intparm = 0;
cio_modify(&console_subchannel);
ctl_clear_bit(6, 24);
console_subchannel_in_use = 0;
}
/* Bah... hack to catch console special sausages. */
int
cio_is_console(int irq)
{
if (!console_subchannel_in_use)
return 0;
return (irq == console_subchannel.irq);
}
struct subchannel *
cio_get_console_subchannel(void)
{
if (!console_subchannel_in_use)
return 0;
return &console_subchannel;
}
#endif
......@@ -98,6 +98,7 @@ struct subchannel {
__u8 vpm; /* verified path mask */
__u8 lpm; /* logical path mask */
__u8 opm; /* operational path mask */
struct schib schib; /* subchannel information block */
struct orb orb; /* operation request block */
struct ccw1 sense_ccw; /* static ccw for sense command */
......@@ -117,14 +118,16 @@ extern int cio_cancel (struct subchannel *);
extern int cio_clear (struct subchannel *);
extern int cio_resume (struct subchannel *);
extern int cio_halt (struct subchannel *);
extern int cio_start (struct subchannel *, struct ccw1 *, unsigned int, __u8);
extern int cio_start (struct subchannel *, struct ccw1 *, __u8);
extern int cio_cancel (struct subchannel *);
extern int cio_set_options (struct subchannel *, int);
extern int cio_get_options (struct subchannel *);
extern int cio_modify (struct subchannel *);
/* Use with care. */
extern struct subchannel *cio_probe_console(void);
extern void cio_release_console(void);
extern int cio_is_console(int irq);
extern struct subchannel *cio_get_console_subchannel(void);
extern int cio_show_msg;
......
/*
* drivers/s390/cio/css.c
* driver for channel subsystem
* $Revision: 1.49 $
* $Revision: 1.65 $
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
......@@ -14,15 +14,11 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <asm/ccwdev.h> // FIXME: layering violation, remove this
#include "css.h"
#include "cio.h"
#include "cio_debug.h"
#include "device.h" // FIXME: dito
#include "ioasm.h"
struct subchannel *ioinfo[__MAX_SUBCHANNELS];
unsigned int highest_subchannel;
int css_init_done = 0;
......@@ -30,23 +26,19 @@ struct device css_bus_device = {
.bus_id = "css0",
};
static int
static struct subchannel *
css_alloc_subchannel(int irq)
{
struct subchannel *sch;
int ret;
if (ioinfo[irq])
/* There already is a struct subchannel for this irq. */
return -EBUSY;
sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
if (sch == NULL)
return -ENOMEM;
return ERR_PTR(-ENOMEM);
ret = cio_validate_subchannel (sch, irq);
if (ret < 0) {
kfree(sch);
return ret;
return ERR_PTR(ret);
}
if (irq > highest_subchannel)
highest_subchannel = irq;
......@@ -54,27 +46,47 @@ css_alloc_subchannel(int irq)
if (sch->st != SUBCHANNEL_TYPE_IO) {
/* For now we ignore all non-io subchannels. */
kfree(sch);
return -EINVAL;
return ERR_PTR(-EINVAL);
}
ioinfo[irq] = sch;
return 0;
/*
* Set intparm to subchannel address.
* This is fine even on 64bit since the subchannel is always located
* under 2G.
*/
sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
ret = cio_modify(sch);
if (ret) {
kfree(sch);
return ERR_PTR(ret);
}
return sch;
}
static void
css_free_subchannel(int irq)
css_free_subchannel(struct subchannel *sch)
{
struct subchannel *sch;
sch = ioinfo[irq];
if (sch) {
ioinfo[irq] = NULL;
/* Reset intparm to zeroes. */
sch->schib.pmcw.intparm = 0;
cio_modify(sch);
kfree(sch);
}
}
static void
css_subchannel_release(struct device *dev)
{
struct subchannel *sch;
sch = to_subchannel(dev);
if (!cio_is_console(sch->irq))
kfree(sch);
}
extern int css_get_ssd_info(struct subchannel *sch);
static int
css_register_subchannel(struct subchannel *sch)
{
......@@ -83,15 +95,15 @@ css_register_subchannel(struct subchannel *sch)
/* Initialize the subchannel structure */
sch->dev.parent = &css_bus_device;
sch->dev.bus = &css_bus_type;
/* Set a name for the subchannel */
snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", sch->irq);
sch->dev.release = &css_subchannel_release;
/* make it known to the system */
ret = device_register(&sch->dev);
if (ret)
printk (KERN_WARNING "%s: could not register %s\n",
__func__, sch->dev.bus_id);
else
css_get_ssd_info(sch);
return ret;
}
......@@ -99,28 +111,159 @@ int
css_probe_device(int irq)
{
int ret;
struct subchannel *sch;
ret = css_alloc_subchannel(irq);
sch = css_alloc_subchannel(irq);
if (IS_ERR(sch))
return PTR_ERR(sch);
ret = css_register_subchannel(sch);
if (ret)
return ret;
ret = css_register_subchannel(ioinfo[irq]);
if (ret)
css_free_subchannel(irq);
css_free_subchannel(sch);
return ret;
}
static struct subchannel *
__get_subchannel_by_stsch(int irq)
{
struct subchannel *sch;
int cc;
struct schib schib;
cc = stsch(irq, &schib);
if (cc)
return NULL;
sch = (struct subchannel *)(unsigned long)schib.pmcw.intparm;
if (!sch)
return NULL;
if (get_device(&sch->dev))
return sch;
return NULL;
}
struct subchannel *
get_subchannel_by_schid(int irq)
{
struct subchannel *sch;
struct list_head *entry;
struct device *dev;
if (!get_bus(&css_bus_type))
return NULL;
/* Try to get subchannel from pmcw first. */
sch = __get_subchannel_by_stsch(irq);
if (sch)
goto out;
if (!get_driver(&io_subchannel_driver.drv))
goto out;
down_read(&css_bus_type.subsys.rwsem);
list_for_each(entry, &io_subchannel_driver.drv.devices) {
dev = get_device(container_of(entry,
struct device, driver_list));
if (!dev)
continue;
sch = to_subchannel(dev);
if (sch->irq == irq)
break;
put_device(dev);
sch = NULL;
}
up_read(&css_bus_type.subsys.rwsem);
put_driver(&io_subchannel_driver.drv);
out:
put_bus(&css_bus_type);
return sch;
}
static inline int
css_get_subchannel_status(struct subchannel *sch, int schid)
{
struct schib schib;
int cc;
cc = stsch(schid, &schib);
if (cc)
return CIO_GONE;
if (!schib.pmcw.dnv)
return CIO_GONE;
if (sch && (schib.pmcw.dev != sch->schib.pmcw.dev))
return CIO_REVALIDATE;
return CIO_OPER;
}
static inline int
css_evaluate_subchannel(int irq)
{
int event, ret, disc;
struct subchannel *sch;
sch = get_subchannel_by_schid(irq);
disc = sch ? device_is_disconnected(sch) : 0;
event = css_get_subchannel_status(sch, irq);
switch (event) {
case CIO_GONE:
if (!sch) {
/* Never used this subchannel. Ignore. */
ret = 0;
break;
}
if (sch->driver && sch->driver->notify &&
sch->driver->notify(&sch->dev, CIO_GONE)) {
device_set_disconnected(sch);
ret = 0;
break;
}
/*
* Unregister subchannel.
* The device will be killed automatically.
*/
device_unregister(&sch->dev);
/* Reset intparm to zeroes. */
sch->schib.pmcw.intparm = 0;
cio_modify(sch);
put_device(&sch->dev);
ret = 0;
break;
case CIO_REVALIDATE:
/*
* Revalidation machine check. Sick.
* We don't notify the driver since we have to throw the device
* away in any case.
*/
device_unregister(&sch->dev);
/* Reset intparm to zeroes. */
sch->schib.pmcw.intparm = 0;
cio_modify(sch);
put_device(&sch->dev);
ret = css_probe_device(irq);
break;
case CIO_OPER:
if (disc)
/* Get device operational again. */
device_trigger_reprobe(sch);
ret = sch ? 0 : css_probe_device(irq);
break;
default:
BUG();
ret = 0;
}
return ret;
}
/*
* Rescan for new devices. FIXME: This is slow.
* This function is called when we have lost CRWs due to overflows and we have
* to do subchannel housekeeping.
*/
static void
do_process_crw(void *ignore)
void
css_reiterate_subchannels(void)
{
int irq, ret;
for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
if (ioinfo[irq])
continue;
ret = css_probe_device(irq);
for (irq = 0; irq <= __MAX_SUBCHANNELS; irq++) {
ret = css_evaluate_subchannel(irq);
/* No more memory. It doesn't make sense to continue. No
* panic because this can happen in midflight and just
* because we can't use a new device is no reason to crash
......@@ -135,39 +278,25 @@ do_process_crw(void *ignore)
/*
* Called from the machine check handler for subchannel report words.
* Note: this is called disabled from the machine check handler itself.
*/
void
css_process_crw(int irq)
{
static DECLARE_WORK(work, do_process_crw, 0);
struct subchannel *sch;
int ccode, devno;
CIO_CRW_EVENT(2, "source is subchannel %04X\n", irq);
sch = ioinfo[irq];
if (sch == NULL) {
queue_work(ccw_device_work, &work);
return;
}
if (!sch->dev.driver_data)
return;
devno = sch->schib.pmcw.dev;
/* FIXME: css_process_crw must not know about ccw_device */
dev_fsm_event(sch->dev.driver_data, DEV_EVENT_NOTOPER);
ccode = stsch(irq, &sch->schib);
if (!ccode)
if (devno != sch->schib.pmcw.dev)
queue_work(ccw_device_work, &work);
/*
* Since we are always presented with IPI in the CRW, we have to
* use stsch() to find out if the subchannel in question has come
* or gone.
*/
css_evaluate_subchannel(irq);
}
/*
* some of the initialization has already been done from init_IRQ(),
* here we do the rest now that the driver core is running.
* Currently, this functions scans all the subchannel structures for
* devices. The long term plan is to remove ioinfo[] and then the
* struct subchannel's will be created during probing.
* The struct subchannel's are created during probing (except for the
* static console subchannel).
*/
static int __init
init_channel_subsystem (void)
......@@ -184,8 +313,16 @@ init_channel_subsystem (void)
ctl_set_bit(6, 28);
for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
if (!ioinfo[irq]) {
ret = css_alloc_subchannel(irq);
struct subchannel *sch;
if (cio_is_console(irq))
sch = cio_get_console_subchannel();
else {
sch = css_alloc_subchannel(irq);
if (IS_ERR(sch))
ret = PTR_ERR(sch);
else
ret = 0;
if (ret == -ENOMEM)
panic("Out of memory in "
"init_channel_subsystem\n");
......@@ -202,7 +339,7 @@ init_channel_subsystem (void)
* not working) so we do it now. This is true e.g. for the
* console subchannel.
*/
css_register_subchannel(ioinfo[irq]);
css_register_subchannel(sch);
}
return 0;
......@@ -236,6 +373,46 @@ struct bus_type css_bus_type = {
subsys_initcall(init_channel_subsystem);
/*
* Register root devices for some drivers. The release function must not be
* in the device drivers, so we do it here.
*/
static void
s390_root_dev_release(struct device *dev)
{
kfree(dev);
}
struct device *
s390_root_dev_register(const char *name)
{
struct device *dev;
int ret;
if (!strlen(name))
return ERR_PTR(-EINVAL);
dev = kmalloc(sizeof(struct device), GFP_KERNEL);
if (!dev)
return ERR_PTR(-ENOMEM);
memset(dev, 0, sizeof(struct device));
strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE));
dev->release = s390_root_dev_release;
ret = device_register(dev);
if (ret) {
kfree(dev);
return ERR_PTR(ret);
}
return dev;
}
void
s390_root_dev_unregister(struct device *dev)
{
if (dev)
device_unregister(dev);
}
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(css_bus_type);
EXPORT_SYMBOL(s390_root_dev_register);
EXPORT_SYMBOL(s390_root_dev_unregister);
......@@ -79,6 +79,7 @@ struct ccw_device_private {
unsigned int esid:1; /* Ext. SenseID supported by HW */
unsigned int dosense:1; /* delayed SENSE required */
unsigned int doverify:1; /* delayed path verification */
unsigned int donotify:1; /* call notify function */
} __attribute__((packed)) flags;
unsigned long intparm; /* user interruption parameter */
struct qdio_irq *qdio_data;
......@@ -100,22 +101,32 @@ struct css_driver {
unsigned int subchannel_type;
struct device_driver drv;
void (*irq)(struct device *);
int (*notify)(struct device *, int);
void (*verify)(struct device *);
void (*termination)(struct device *);
};
/*
* all css_drivers have the css_bus_type
*/
extern struct bus_type css_bus_type;
extern struct css_driver io_subchannel_driver;
int css_probe_device(int irq);
extern struct subchannel * get_subchannel_by_schid(int irq);
extern unsigned int highest_subchannel;
extern int css_init_done;
#define __MAX_SUBCHANNELS 65536
extern struct subchannel *ioinfo[__MAX_SUBCHANNELS];
extern struct bus_type css_bus_type;
extern struct device css_bus_device;
/* Some helper functions for disconnected state. */
int device_is_disconnected(struct subchannel *);
void device_set_disconnected(struct subchannel *);
void device_trigger_reprobe(struct subchannel *);
/* Helper function for vary on/off. */
void device_set_waiting(struct subchannel *);
#endif
/*
* drivers/s390/cio/device.c
* bus driver for ccw devices
* $Revision: 1.70 $
* $Revision: 1.85 $
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
......@@ -115,16 +115,24 @@ struct bus_type ccw_bus_type = {
};
static int io_subchannel_probe (struct device *);
static int io_subchannel_remove (struct device *);
void io_subchannel_irq (struct device *);
static int io_subchannel_notify(struct device *, int);
static void io_subchannel_verify(struct device *);
static void io_subchannel_ioterm(struct device *);
static struct css_driver io_subchannel_driver = {
struct css_driver io_subchannel_driver = {
.subchannel_type = SUBCHANNEL_TYPE_IO,
.drv = {
.name = "io_subchannel",
.bus = &css_bus_type,
.probe = &io_subchannel_probe,
.remove = &io_subchannel_remove,
},
.irq = io_subchannel_irq,
.notify = io_subchannel_notify,
.verify = io_subchannel_verify,
.termination = io_subchannel_ioterm,
};
struct workqueue_struct *ccw_device_work;
......@@ -232,41 +240,58 @@ online_show (struct device *dev, char *buf)
return sprintf(buf, cdev->online ? "1\n" : "0\n");
}
void
int
ccw_device_set_offline(struct ccw_device *cdev)
{
int ret;
if (!cdev)
return;
return -ENODEV;
if (!cdev->online || !cdev->drv)
return;
if (cdev->drv->set_offline)
if (cdev->drv->set_offline(cdev) != 0)
return;
return -EINVAL;
if (cdev->private->state == DEV_STATE_DISCONNECTED) {
struct subchannel *sch;
/*
* Forced offline in disconnected state means
* 'throw away device'.
*/
sch = to_subchannel(cdev->dev.parent);
device_unregister(&sch->dev);
/* Reset intparm to zeroes. */
sch->schib.pmcw.intparm = 0;
cio_modify(sch);
put_device(&sch->dev);
return 0;
}
if (cdev->drv->set_offline) {
ret = cdev->drv->set_offline(cdev);
if (ret != 0)
return ret;
}
cdev->online = 0;
spin_lock_irq(cdev->ccwlock);
ret = ccw_device_offline(cdev);
spin_unlock_irq(cdev->ccwlock);
if (ret == 0)
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
else
//FIXME: we can't fail!
else {
pr_debug("ccw_device_offline returned %d, device %s\n",
ret, cdev->dev.bus_id);
cdev->online = 1;
}
return ret;
}
void
int
ccw_device_set_online(struct ccw_device *cdev)
{
int ret;
if (!cdev)
return;
return -ENODEV;
if (cdev->online || !cdev->drv)
return;
return -EINVAL;
spin_lock_irq(cdev->ccwlock);
ret = ccw_device_online(cdev);
......@@ -276,13 +301,13 @@ ccw_device_set_online(struct ccw_device *cdev)
else {
pr_debug("ccw_device_online returned %d, device %s\n",
ret, cdev->dev.bus_id);
return;
return ret;
}
if (cdev->private->state != DEV_STATE_ONLINE)
return;
return -ENODEV;
if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) {
cdev->online = 1;
return;
return 0;
}
spin_lock_irq(cdev->ccwlock);
ret = ccw_device_offline(cdev);
......@@ -292,6 +317,7 @@ ccw_device_set_online(struct ccw_device *cdev)
else
pr_debug("ccw_device_offline returned %d, device %s\n",
ret, cdev->dev.bus_id);
return (ret = 0) ? -ENODEV : ret;
}
static ssize_t
......@@ -340,7 +366,7 @@ stlck_store(struct device *dev, const char *buf, size_t count)
ccw_device_unbox_recog, (void *) cdev);
queue_work(ccw_device_work, &cdev->private->kick_work);
return 0;
return count;
}
static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
......@@ -430,7 +456,7 @@ ccw_device_add_stlck(void *data)
/* this is a simple abstraction for device_register that sets the
* correct bus type and adds the bus specific files */
static int
int
ccw_device_register(struct ccw_device *cdev)
{
struct device *dev = &cdev->dev;
......@@ -448,16 +474,20 @@ ccw_device_register(struct ccw_device *cdev)
}
void
ccw_device_unregister(void *data)
ccw_device_do_unreg_rereg(void *data)
{
struct device *dev;
dev = (struct device *)data;
device_remove_files(dev);
device_unregister(dev);
device_del(dev);
if (device_add(dev)) {
put_device(dev);
return;
}
if (device_add_files(dev))
device_unregister(dev);
}
static void
ccw_device_release(struct device *dev)
......@@ -482,6 +512,10 @@ io_subchannel_register(void *data)
cdev = (struct ccw_device *) data;
sch = to_subchannel(cdev->dev.parent);
if (!list_empty(&sch->dev.children)) {
bus_rescan_devices(&ccw_bus_type);
goto out;
}
/* make it known to the system */
ret = ccw_device_register(cdev);
if (ret) {
......@@ -495,8 +529,8 @@ io_subchannel_register(void *data)
ret = subchannel_add_files(cdev->dev.parent);
if (ret)
printk(KERN_WARNING "%s: could not add attributes to %04x\n",
__func__, sch->irq);
printk(KERN_WARNING "%s: could not add attributes to %s\n",
__func__, sch->dev.bus_id);
if (cdev->private->state == DEV_STATE_BOXED)
device_create_file(&cdev->dev, &dev_attr_steal_lock);
out:
......@@ -562,9 +596,9 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
atomic_inc(&ccw_device_init_count);
/* Start async. device sensing. */
spin_lock_irq(cdev->ccwlock);
spin_lock_irq(&sch->lock);
rc = ccw_device_recognition(cdev);
spin_unlock_irq(cdev->ccwlock);
spin_unlock_irq(&sch->lock);
if (rc) {
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
......@@ -637,6 +671,60 @@ io_subchannel_probe (struct device *pdev)
return 0;
}
static int
io_subchannel_remove (struct device *dev)
{
struct ccw_device *cdev;
if (!dev->driver_data)
return 0;
cdev = dev->driver_data;
/* Set ccw device to not operational and drop reference. */
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
put_device(&cdev->dev);
dev->driver_data = NULL;
return 0;
}
static int
io_subchannel_notify(struct device *dev, int event)
{
struct ccw_device *cdev;
cdev = dev->driver_data;
if (!cdev)
return 0;
if (!cdev->drv)
return 0;
if (!cdev->online)
return 0;
return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
}
static void
io_subchannel_verify(struct device *dev)
{
struct ccw_device *cdev;
cdev = dev->driver_data;
if (cdev)
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
}
static void
io_subchannel_ioterm(struct device *dev)
{
struct ccw_device *cdev;
cdev = dev->driver_data;
if (!cdev)
return;
cdev->private->state = DEV_STATE_CLEAR_VERIFY;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-EIO));
}
#ifdef CONFIG_CCW_CONSOLE
static struct ccw_device console_cdev;
static struct ccw_device_private console_private;
......@@ -652,25 +740,28 @@ ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch)
.parent = &sch->dev,
};
/* Initialize the subchannel structure */
sch->dev = (struct device) {
.parent = &css_bus_device,
.bus = &css_bus_type,
};
sch->dev.parent = &css_bus_device;
sch->dev.bus = &css_bus_type;
rc = io_subchannel_recog(cdev, sch);
if (rc)
return rc;
/* Now wait for the async. recognition to come to an end. */
spin_lock_irq(cdev->ccwlock);
while (!dev_fsm_final_state(cdev))
wait_cons_dev();
rc = -EIO;
if (cdev->private->state != DEV_STATE_OFFLINE)
return -EIO;
goto out_unlock;
ccw_device_online(cdev);
while (!dev_fsm_final_state(cdev))
wait_cons_dev();
if (cdev->private->state != DEV_STATE_ONLINE)
return -EIO;
goto out_unlock;
rc = 0;
out_unlock:
spin_unlock_irq(cdev->ccwlock);
return 0;
}
......@@ -767,18 +858,28 @@ ccw_device_remove (struct device *dev)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct ccw_driver *cdrv = cdev->drv;
int ret;
/*
* Set device offline, so device drivers don't end up with
* doubled code.
* This is safe because of the checks in ccw_device_set_offline.
*/
pr_debug("removing device %s, sch %d, devno %x\n",
cdev->dev.name,
cdev->dev.bus_id,
cdev->private->irq,
cdev->private->devno);
ccw_device_set_offline(cdev);
return cdrv->remove ? cdrv->remove(cdev) : 0;
if (cdrv->remove)
cdrv->remove(cdev);
if (cdev->online) {
cdev->online = 0;
spin_lock_irq(cdev->ccwlock);
ret = ccw_device_offline(cdev);
spin_unlock_irq(cdev->ccwlock);
if (ret == 0)
wait_event(cdev->private->wait_q,
dev_fsm_final_state(cdev));
else
//FIXME: we can't fail!
pr_debug("ccw_device_offline returned %d, device %s\n",
ret, cdev->dev.bus_id);
}
return 0;
}
int
......
......@@ -17,6 +17,10 @@ enum dev_state {
/* states to wait for i/o completion before doing something */
DEV_STATE_CLEAR_VERIFY,
DEV_STATE_TIMEOUT_KILL,
DEV_STATE_WAIT4IO,
/* special states for devices gone not operational */
DEV_STATE_DISCONNECTED,
DEV_STATE_DISCONNECTED_SENSE_ID,
/* last element! */
NR_DEV_STATES
};
......@@ -63,7 +67,9 @@ extern struct workqueue_struct *ccw_device_work;
void io_subchannel_recog_done(struct ccw_device *cdev);
void ccw_device_unregister(void *);
int ccw_device_register(struct ccw_device *);
void ccw_device_do_unreg_rereg(void *);
int ccw_device_recognition(struct ccw_device *);
int ccw_device_online(struct ccw_device *);
......
......@@ -19,8 +19,44 @@
#include "cio_debug.h"
#include "css.h"
#include "device.h"
#include "ioasm.h"
#include "qdio.h"
int
device_is_disconnected(struct subchannel *sch)
{
struct ccw_device *cdev;
if (!sch->dev.driver_data)
return 0;
cdev = sch->dev.driver_data;
return (cdev->private->state == DEV_STATE_DISCONNECTED ||
cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
}
void
device_set_disconnected(struct subchannel *sch)
{
struct ccw_device *cdev;
if (!sch->dev.driver_data)
return;
cdev = sch->dev.driver_data;
cdev->private->state = DEV_STATE_DISCONNECTED;
}
void
device_set_waiting(struct subchannel *sch)
{
struct ccw_device *cdev;
if (!sch->dev.driver_data)
return;
cdev = sch->dev.driver_data;
ccw_device_set_timeout(cdev, 10*HZ);
cdev->private->state = DEV_STATE_WAIT4IO;
}
/*
* Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
*/
......@@ -60,39 +96,76 @@ ccw_device_set_timeout(struct ccw_device *cdev, int expires)
* Cancel running i/o. This is called repeatedly since halt/clear are
* asynchronous operations. We do one try with cio_cancel, two tries
* with cio_halt, 255 tries with cio_clear. If everythings fails panic.
* Returns 0 if device now idle, -ENODEV for device not operational and
* -EBUSY if an interrupt is expected (either from halt/clear or from a
* status pending).
*/
static int
ccw_device_cancel_halt_clear(struct ccw_device *cdev)
{
struct subchannel *sch;
int ret;
sch = to_subchannel(cdev->dev.parent);
if (sch->schib.scsw.actl == 0)
ret = stsch(sch->irq, &sch->schib);
if (ret || !sch->schib.pmcw.dnv)
return -ENODEV;
if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
/* Not operational or no activity -> done. */
return 0;
/* Stage 1: cancel io. */
if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
if (cio_cancel (sch) == 0)
return 0;
ret = cio_cancel(sch);
if (ret != -EINVAL)
return ret;
/* cancel io unsuccessful. From now on it is asynchronous. */
cdev->private->iretry = 3; /* 3 halt retries. */
}
if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
/* Stage 2: halt io. */
while (cdev->private->iretry-- > 0)
if (cio_halt (sch) == 0)
return -EBUSY;
if (cdev->private->iretry) {
cdev->private->iretry--;
ret = cio_halt(sch);
return (ret == 0) ? -EBUSY : ret;
}
/* halt io unsuccessful. */
cdev->private->iretry = 255; /* 255 clear retries. */
}
/* Stage 3: clear io. */
while (cdev->private->iretry-- > 0)
if (cio_clear (sch) == 0)
return -EBUSY;
if (cdev->private->iretry) {
cdev->private->iretry--;
ret = cio_clear (sch);
return (ret == 0) ? -EBUSY : ret;
}
panic("Can't stop i/o on subchannel.\n");
}
static void
ccw_device_handle_oper(struct ccw_device *cdev)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
/*
* Check if cu type and device type still match. If
* not, it is certainly another device and we have to
* de- and re-register.
*/
if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
cdev->id.cu_model != cdev->private->senseid.cu_model ||
cdev->id.dev_type != cdev->private->senseid.dev_type ||
cdev->id.dev_model != cdev->private->senseid.dev_model) {
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_do_unreg_rereg, (void *)&cdev->dev);
queue_work(ccw_device_work, &cdev->private->kick_work);
return;
}
cdev->private->flags.donotify = 1;
/* Get device online again. */
ccw_device_online(cdev);
}
/*
* Stop device recognition.
*/
......@@ -100,41 +173,54 @@ static void
ccw_device_recog_done(struct ccw_device *cdev, int state)
{
struct subchannel *sch;
int notify;
sch = to_subchannel(cdev->dev.parent);
ccw_device_set_timeout(cdev, 0);
cio_disable_subchannel(sch);
cdev->private->state = state;
if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
if (state == DEV_STATE_NOT_OPER) {
cdev->private->state = DEV_STATE_DISCONNECTED;
return;
}
/* Boxed devices don't need extra treatment. */
}
notify = 0;
switch (state) {
case DEV_STATE_NOT_OPER:
CIO_DEBUG(KERN_WARNING, 2,
"SenseID : unknown device %04X on subchannel %04X\n",
sch->schib.pmcw.dev, sch->irq);
"SenseID : unknown device %s on subchannel %s\n",
cdev->dev.bus_id, sch->dev.bus_id);
break;
case DEV_STATE_OFFLINE:
/* fill out sense information */
cdev->id = (struct ccw_device_id) {
.cu_type = cdev->private->senseid.cu_type,
.cu_model = cdev->private->senseid.cu_model,
.dev_type = cdev->private->senseid.dev_type,
.dev_model = cdev->private->senseid.dev_model,
};
if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
notify = 1;
else /* fill out sense information */
cdev->id = (struct ccw_device_id) {
.cu_type = cdev->private->senseid.cu_type,
.cu_model = cdev->private->senseid.cu_model,
.dev_type = cdev->private->senseid.dev_type,
.dev_model = cdev->private->senseid.dev_model,
};
/* Issue device info message. */
CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04X reports: "
CIO_DEBUG(KERN_INFO, 2, "SenseID : device %s reports: "
"CU Type/Mod = %04X/%02X, Dev Type/Mod = "
"%04X/%02X\n", sch->schib.pmcw.dev,
"%04X/%02X\n", cdev->dev.bus_id,
cdev->id.cu_type, cdev->id.cu_model,
cdev->id.dev_type, cdev->id.dev_model);
break;
case DEV_STATE_BOXED:
CIO_DEBUG(KERN_WARNING, 2,
"SenseID : boxed device %04X on subchannel %04X\n",
sch->schib.pmcw.dev, sch->irq);
"SenseID : boxed device %s on subchannel %s\n",
cdev->dev.bus_id, sch->dev.bus_id);
break;
}
io_subchannel_recog_done(cdev);
cdev->private->state = state;
if (notify && state == DEV_STATE_OFFLINE)
ccw_device_handle_oper(cdev);
else
io_subchannel_recog_done(cdev);
if (state != DEV_STATE_NOT_OPER)
wake_up(&cdev->private->wait_q);
}
......@@ -158,6 +244,24 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
}
}
static void
ccw_device_oper_notify(void *data)
{
struct ccw_device *cdev;
struct subchannel *sch;
int ret;
cdev = (struct ccw_device *)data;
sch = to_subchannel(cdev->dev.parent);
ret = (sch->driver && sch->driver->notify) ?
sch->driver->notify(&sch->dev, CIO_OPER) : 0;
if (!ret)
/* Driver doesn't want device back. */
ccw_device_do_unreg_rereg((void *)&cdev->dev);
else
wake_up(&cdev->private->wait_q);
}
/*
* Finished with online/offline processing.
*/
......@@ -179,13 +283,19 @@ ccw_device_done(struct ccw_device *cdev, int state)
if (state == DEV_STATE_BOXED) {
CIO_DEBUG(KERN_WARNING, 2,
"Boxed device %04X on subchannel %04X\n",
sch->schib.pmcw.dev, sch->irq);
"Boxed device %s on subchannel %s\n",
cdev->dev.bus_id, sch->dev.bus_id);
INIT_WORK(&cdev->private->kick_work,
ccw_device_add_stlck, (void *) cdev);
queue_work(ccw_device_work, &cdev->private->kick_work);
}
if (cdev->private->flags.donotify) {
cdev->private->flags.donotify = 0;
PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
(void *)cdev);
queue_work(ccw_device_work, &cdev->private->kick_work);
}
wake_up(&cdev->private->wait_q);
if (css_init_done && state != DEV_STATE_ONLINE)
......@@ -229,13 +339,15 @@ int
ccw_device_recognition(struct ccw_device *cdev)
{
struct subchannel *sch;
int ret;
if (cdev->private->state != DEV_STATE_NOT_OPER)
return -EINVAL;
sch = to_subchannel(cdev->dev.parent);
if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0)
ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
if (ret != 0)
/* Couldn't enable the subchannel for i/o. Sick device. */
return -ENODEV;
return ret;
/* After 60s the device recognition is considered to have failed. */
ccw_device_set_timeout(cdev, 60*HZ);
......@@ -259,10 +371,19 @@ ccw_device_recognition(struct ccw_device *cdev)
static void
ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
{
if (ccw_device_cancel_halt_clear(cdev) == 0)
int ret;
ret = ccw_device_cancel_halt_clear(cdev);
switch (ret) {
case 0:
ccw_device_recog_done(cdev, DEV_STATE_BOXED);
else
break;
case -ENODEV:
ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
break;
default:
ccw_device_set_timeout(cdev, 3*HZ);
}
}
......@@ -290,16 +411,19 @@ int
ccw_device_online(struct ccw_device *cdev)
{
struct subchannel *sch;
int ret;
if (cdev->private->state != DEV_STATE_OFFLINE)
return -EINVAL;
sch = to_subchannel(cdev->dev.parent);
if (css_init_done && !get_device(&cdev->dev))
return -ENODEV;
if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0) {
ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
if (ret != 0) {
/* Couldn't enable the subchannel for i/o. Sick device. */
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
return -ENODEV;
if (ret == -ENODEV)
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
return ret;
}
/* Do we want to do path grouping? */
if (!cdev->private->options.pgroup) {
......@@ -363,10 +487,19 @@ ccw_device_offline(struct ccw_device *cdev)
static void
ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
{
if (ccw_device_cancel_halt_clear(cdev) == 0)
int ret;
ret = ccw_device_cancel_halt_clear(cdev);
switch (ret) {
case 0:
ccw_device_done(cdev, DEV_STATE_BOXED);
else
break;
case -ENODEV:
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break;
default:
ccw_device_set_timeout(cdev, 3*HZ);
}
}
/*
......@@ -375,7 +508,10 @@ ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
static void
ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
{
ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
cdev->private->state = DEV_STATE_DISCONNECTED;
else
ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
}
/*
......@@ -385,9 +521,7 @@ static void
ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
{
cdev->private->state = DEV_STATE_NOT_OPER;
INIT_WORK(&cdev->private->kick_work,
ccw_device_unregister, (void *) &cdev->dev);
queue_work(ccw_device_work, &cdev->private->kick_work);
device_unregister(&cdev->dev);
wake_up(&cdev->private->wait_q);
}
......@@ -406,9 +540,20 @@ ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
// FIXME: not-oper indication to device driver ?
ccw_device_call_handler(cdev);
}
INIT_WORK(&cdev->private->kick_work,
ccw_device_unregister, (void *) &cdev->dev);
queue_work(ccw_device_work, &cdev->private->kick_work);
device_unregister(&cdev->dev);
wake_up(&cdev->private->wait_q);
}
static void
ccw_device_disconnected_notoper(struct ccw_device *cdev,
enum dev_event dev_event)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
cdev->private->state = DEV_STATE_NOT_OPER;
cio_disable_subchannel(sch);
device_unregister(&cdev->dev);
wake_up(&cdev->private->wait_q);
}
......@@ -478,13 +623,18 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
static void
ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
{
int ret;
ccw_device_set_timeout(cdev, 0);
if (ccw_device_cancel_halt_clear(cdev) != 0) {
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, 3*HZ);
cdev->private->state = DEV_STATE_TIMEOUT_KILL;
return;
}
if (cdev->handler)
if (ret == -ENODEV)
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
else if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-ETIMEDOUT));
}
......@@ -552,10 +702,17 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
static void
ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
{
if (ccw_device_cancel_halt_clear(cdev) != 0) {
int ret;
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, 3*HZ);
return;
}
if (ret == -ENODEV) {
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
return;
}
//FIXME: Can we get here?
cdev->private->state = DEV_STATE_ONLINE;
if (cdev->handler)
......@@ -563,6 +720,68 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
ERR_PTR(-ETIMEDOUT));
}
static void
ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
{
struct irb *irb;
struct subchannel *sch;
irb = (struct irb *) __LC_IRB;
/* Check for unsolicited interrupt. */
if (irb->scsw.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
if (cdev->handler)
cdev->handler (cdev, 0, irb);
return;
}
/*
* Accumulate status and find out if a basic sense is needed.
* This is fine since we have already adapted the lpm.
*/
ccw_device_accumulate_irb(cdev, irb);
if (cdev->private->flags.dosense) {
if (ccw_device_do_sense(cdev, irb) == 0) {
cdev->private->state = DEV_STATE_W4SENSE;
}
return;
}
/* Iff device is idle, reset timeout. */
sch = to_subchannel(cdev->dev.parent);
if (!stsch(sch->irq, &sch->schib))
if (sch->schib.scsw.actl == 0)
ccw_device_set_timeout(cdev, 0);
/* Call the handler. */
if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
ccw_device_online_verify(cdev, 0);
}
static void
ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
{
int ret;
ccw_device_set_timeout(cdev, 0);
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, 3*HZ);
cdev->private->state = DEV_STATE_TIMEOUT_KILL;
return;
}
if (ret == -ENODEV)
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
else if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-ETIMEDOUT));
}
static void
ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event)
{
/* When the I/O has terminated, we have to start verification. */
if (cdev->private->options.pgroup)
cdev->private->flags.doverify = 1;
}
static void
ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
{
......@@ -586,6 +805,59 @@ ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
wake_up(&cdev->private->wait_q);
}
static void
ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0)
/* Couldn't enable the subchannel for i/o. Sick device. */
return;
/* After 60s the device recognition is considered to have failed. */
ccw_device_set_timeout(cdev, 60*HZ);
cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
ccw_device_sense_id_start(cdev);
}
void
device_trigger_reprobe(struct subchannel *sch)
{
struct ccw_device *cdev;
unsigned long flags;
if (!sch->dev.driver_data)
return;
cdev = sch->dev.driver_data;
if (cdev->private->state != DEV_STATE_DISCONNECTED)
return;
spin_lock_irqsave(&sch->lock, flags);
/* Re-set some bits in the pmcw that were lost. */
sch->schib.pmcw.isc = 3;
sch->schib.pmcw.csense = 1;
sch->schib.pmcw.ena = 0;
if ((sch->lpm & (sch->lpm - 1)) != 0)
sch->schib.pmcw.mp = 1;
sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
ccw_device_start_id(cdev, 0);
spin_unlock_irqrestore(&sch->lock, flags);
}
static void
ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
/*
* An interrupt in state offline means a previous disable was not
* successful. Try again.
*/
cio_disable_subchannel(sch);
}
/*
* No operation action. This is used e.g. to ignore a timeout event in
* state offline.
......@@ -630,7 +902,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
},
[DEV_STATE_OFFLINE] {
[DEV_EVENT_NOTOPER] ccw_device_offline_notoper,
[DEV_EVENT_INTERRUPT] ccw_device_bug,
[DEV_EVENT_INTERRUPT] ccw_device_offline_irq,
[DEV_EVENT_TIMEOUT] ccw_device_nop,
[DEV_EVENT_VERIFY] ccw_device_nop,
},
......@@ -677,6 +949,25 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_TIMEOUT] ccw_device_killing_timeout,
[DEV_EVENT_VERIFY] ccw_device_nop, //FIXME
},
[DEV_STATE_WAIT4IO] {
[DEV_EVENT_NOTOPER] ccw_device_online_notoper,
[DEV_EVENT_INTERRUPT] ccw_device_wait4io_irq,
[DEV_EVENT_TIMEOUT] ccw_device_wait4io_timeout,
[DEV_EVENT_VERIFY] ccw_device_wait4io_verify,
},
/* special states for devices gone not operational */
[DEV_STATE_DISCONNECTED] {
[DEV_EVENT_NOTOPER] ccw_device_disconnected_notoper,
[DEV_EVENT_INTERRUPT] ccw_device_start_id,
[DEV_EVENT_TIMEOUT] ccw_device_bug,
[DEV_EVENT_VERIFY] ccw_device_nop,
},
[DEV_STATE_DISCONNECTED_SENSE_ID] {
[DEV_EVENT_NOTOPER] ccw_device_recog_notoper,
[DEV_EVENT_INTERRUPT] ccw_device_sense_id_irq,
[DEV_EVENT_TIMEOUT] ccw_device_recog_timeout,
[DEV_EVENT_VERIFY] ccw_device_nop,
},
};
/*
......@@ -686,13 +977,12 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
void
io_subchannel_irq (struct device *pdev)
{
char dbf_txt[15];
struct ccw_device *cdev;
cdev = to_subchannel(pdev)->dev.driver_data;
sprintf (dbf_txt, "IRQ%04x", cdev->private->irq);
CIO_TRACE_EVENT (3, dbf_txt);
CIO_TRACE_EVENT (3, "IRQ");
CIO_TRACE_EVENT (3, pdev->bus_id);
dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
}
......
......@@ -22,6 +22,7 @@
#include "cio_debug.h"
#include "css.h"
#include "device.h"
#include "ioasm.h"
/*
* diag210 is used under VM to get information about a virtual device
......@@ -195,15 +196,11 @@ __ccw_device_sense_id_start(struct ccw_device *cdev)
ret = -ENODEV;
while (cdev->private->imask != 0) {
if ((sch->lpm & cdev->private->imask) != 0 &&
cdev->private->iretry-- > 0) {
/* 0x00E2C9C4 == ebcdic "SID" */
cdev->private->iretry > 0) {
cdev->private->iretry--;
ret = cio_start (sch, cdev->private->iccws,
0x00E2C9C4, cdev->private->imask);
cdev->private->imask);
/* ret is 0, -EBUSY, -EACCES or -ENODEV */
if (ret == -EBUSY) {
udelay(100);
continue;
}
if (ret != -EACCES)
return ret;
}
......@@ -223,7 +220,7 @@ ccw_device_sense_id_start(struct ccw_device *cdev)
cdev->private->imask = 0x80;
cdev->private->iretry = 5;
ret = __ccw_device_sense_id_start(cdev);
if (ret)
if (ret && ret != -EBUSY)
ccw_device_sense_id_done(cdev, ret);
}
......@@ -255,16 +252,16 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
* if the device doesn't support the SenseID
* command further retries wouldn't help ...
*/
CIO_MSG_EVENT(2, "SenseID : device %04X on Subchannel %04X "
CIO_MSG_EVENT(2, "SenseID : device %s on Subchannel %s "
"reports cmd reject or intervention required\n",
sch->schib.pmcw.dev, sch->irq);
cdev->dev.bus_id, sch->dev.bus_id);
return -EOPNOTSUPP;
}
if (irb->esw.esw0.erw.cons) {
CIO_MSG_EVENT(2, "SenseID : UC on dev %04X, "
CIO_MSG_EVENT(2, "SenseID : UC on dev %s, "
"lpum %02X, cnt %02d, sns :"
" %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
sch->schib.pmcw.dev,
cdev->dev.bus_id,
irb->esw.esw0.sublog.lpum,
irb->esw.esw0.erw.scnt,
irb->ecw[0], irb->ecw[1],
......@@ -274,15 +271,15 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
return -EAGAIN;
}
if (irb->scsw.cc == 3) {
CIO_MSG_EVENT(2, "SenseID : path %02X for device %04X on "
"subchannel %04X is 'not operational'\n",
sch->orb.lpm, sch->schib.pmcw.dev, sch->irq);
CIO_MSG_EVENT(2, "SenseID : path %02X for device %s on "
"subchannel %s is 'not operational'\n",
sch->orb.lpm, cdev->dev.bus_id, sch->dev.bus_id);
return -EACCES;
}
/* Hmm, whatever happened, try again. */
CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04X on "
"subchannel %04X returns status %02X%02X\n",
sch->schib.pmcw.dev, sch->irq,
CIO_MSG_EVENT(2, "SenseID : start_IO() for device %s on "
"subchannel %s returns status %02X%02X\n",
cdev->dev.bus_id, sch->dev.bus_id,
irb->scsw.dstat, irb->scsw.cstat);
return -EAGAIN;
}
......@@ -299,11 +296,17 @@ ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
sch = to_subchannel(cdev->dev.parent);
irb = (struct irb *) __LC_IRB;
/* Ignore unsolicited interrupts. */
/*
* Unsolicited interrupts may pertain to an earlier status pending or
* busy condition on the subchannel. Retry sense id.
*/
if (irb->scsw.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
ret = __ccw_device_sense_id_start(cdev);
if (ret && ret != -EBUSY)
ccw_device_sense_id_done(cdev, ret);
return;
}
if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
return;
ret = ccw_device_check_sense_id(cdev);
......
......@@ -49,6 +49,8 @@ ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
if (!cdev)
return -ENODEV;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE &&
cdev->private->state != DEV_STATE_W4SENSE)
return -EINVAL;
......@@ -73,6 +75,8 @@ ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
sch = to_subchannel(cdev->dev.parent);
if (!sch)
return -ENODEV;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE ||
sch->schib.scsw.actl != 0 ||
cdev->private->flags.doverify)
......@@ -80,8 +84,7 @@ ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
ret = cio_set_options (sch, flags);
if (ret)
return ret;
/* 0xe4e2c5d9 == ebcdic "USER" */
ret = cio_start (sch, cpa, 0xe4e2c5d9, lpm);
ret = cio_start (sch, cpa, lpm);
if (ret == 0)
cdev->private->intparm = intparm;
return ret;
......@@ -111,6 +114,8 @@ ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
if (!cdev)
return -ENODEV;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE &&
cdev->private->state != DEV_STATE_W4SENSE)
return -EINVAL;
......@@ -133,6 +138,8 @@ ccw_device_resume(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
if (!sch)
return -ENODEV;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE ||
!(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED))
return -EINVAL;
......@@ -251,7 +258,7 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic)
sch = to_subchannel(cdev->dev.parent);
do {
ret = cio_start (sch, ccw, magic, 0);
ret = cio_start (sch, ccw, 0);
if ((ret == -EBUSY) || (ret == -EACCES)) {
/* Try again later. */
spin_unlock_irq(&sch->lock);
......@@ -302,7 +309,6 @@ int
read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
{
void (*handler)(struct ccw_device *, unsigned long, struct irb *);
char dbf_txt[15];
struct subchannel *sch;
int ret;
struct ccw1 *rdc_ccw;
......@@ -313,8 +319,8 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
return -EINVAL;
sch = to_subchannel(cdev->dev.parent);
sprintf (dbf_txt, "rddevch%x", sch->irq);
CIO_TRACE_EVENT (4, dbf_txt);
CIO_TRACE_EVENT (4, "rddevch");
CIO_TRACE_EVENT (4, sch->dev.bus_id);
rdc_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
if (!rdc_ccw)
......@@ -359,7 +365,6 @@ int
read_conf_data (struct ccw_device *cdev, void **buffer, int *length)
{
void (*handler)(struct ccw_device *, unsigned long, struct irb *);
char dbf_txt[15];
struct subchannel *sch;
struct ciw *ciw;
char *rcd_buf;
......@@ -372,8 +377,8 @@ read_conf_data (struct ccw_device *cdev, void **buffer, int *length)
return -EINVAL;
sch = to_subchannel(cdev->dev.parent);
sprintf (dbf_txt, "rdconf%x", sch->irq);
CIO_TRACE_EVENT (4, dbf_txt);
CIO_TRACE_EVENT (4, "rdconf");
CIO_TRACE_EVENT (4, sch->dev.bus_id);
/*
* scan for RCD command in extended SenseID data
......@@ -449,21 +454,25 @@ ccw_device_stlck(struct ccw_device *cdev)
CIO_TRACE_EVENT(2, "stl lock");
CIO_TRACE_EVENT(2, cdev->dev.bus_id);
spin_lock_irqsave(&sch->lock, flags);
ret = cio_enable_subchannel(sch, 3);
if (ret)
goto out_unlock;
/* Setup ccw. This cmd code seems not to be in use elsewhere. */
cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK;
cdev->private->iccws[0].cda = (__u32) __pa(buf);
cdev->private->iccws[0].count = 32;
cdev->private->iccws[0].flags = CCW_FLAG_SLI;
spin_lock_irqsave(&sch->lock, flags);
ret = cio_start(sch, cdev->private->iccws, 0xE2D3C3D2, 0);
ret = cio_start(sch, cdev->private->iccws, 0);
if (ret) {
cio_disable_subchannel(sch); //FIXME: return code?
goto out_unlock;
}
spin_unlock_irqrestore(&sch->lock, flags);
if (ret)
return ret;
wait_event(cdev->private->wait_q, sch->schib.scsw.actl == 0);
wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0);
spin_lock_irqsave(&sch->lock, flags);
cio_disable_subchannel(sch); //FIXME: return code?
if ((cdev->private->irb.scsw.dstat !=
(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
(cdev->private->irb.scsw.cstat != 0))
......@@ -471,8 +480,8 @@ ccw_device_stlck(struct ccw_device *cdev)
/* Clear irb. */
memset(&cdev->private->irb, 0, sizeof(struct irb));
out_unlock:
spin_unlock_irqrestore(&sch->lock, flags);
return ret;
}
......
......@@ -48,25 +48,17 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev)
ret = -ENODEV;
while (cdev->private->imask != 0) {
/* Try every path multiple times. */
if (cdev->private->iretry-- > 0) {
/* 0xe2d5c9c4 == ebcdic "SNID" */
if (cdev->private->iretry > 0) {
cdev->private->iretry--;
ret = cio_start (sch, cdev->private->iccws,
0xE2D5C9C4, cdev->private->imask);
cdev->private->imask);
/* ret is 0, -EBUSY, -EACCES or -ENODEV */
if (ret == -EBUSY) {
CIO_MSG_EVENT(2,
"SNID - device %04X, start_io() "
"reports rc : %d, retrying ...\n",
sch->schib.pmcw.dev, ret);
udelay(100);
continue;
}
if (ret != -EACCES)
return ret;
CIO_MSG_EVENT(2, "SNID - Device %04X on Subchannel "
"%04X, lpm %02X, became 'not "
CIO_MSG_EVENT(2, "SNID - Device %s on Subchannel "
"%s, lpm %02X, became 'not "
"operational'\n",
sch->schib.pmcw.dev, sch->irq,
cdev->dev.bus_id, sch->dev.bus_id,
cdev->private->imask);
}
......@@ -86,7 +78,7 @@ ccw_device_sense_pgid_start(struct ccw_device *cdev)
cdev->private->iretry = 5;
memset (&cdev->private->pgid, 0, sizeof (struct pgid));
ret = __ccw_device_sense_pgid_start(cdev);
if (ret)
if (ret && ret != -EBUSY)
ccw_device_sense_pgid_done(cdev, ret);
}
......@@ -113,10 +105,10 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
return -EOPNOTSUPP;
}
if (irb->esw.esw0.erw.cons) {
CIO_MSG_EVENT(2, "SNID - device %04X, unit check, "
CIO_MSG_EVENT(2, "SNID - device %s, unit check, "
"lpum %02X, cnt %02d, sns : "
"%02X%02X%02X%02X %02X%02X%02X%02X ...\n",
sch->schib.pmcw.dev,
cdev->dev.bus_id,
irb->esw.esw0.sublog.lpum,
irb->esw.esw0.erw.scnt,
irb->ecw[0], irb->ecw[1],
......@@ -126,16 +118,15 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
return -EAGAIN;
}
if (irb->scsw.cc == 3) {
CIO_MSG_EVENT(2, "SNID - Device %04X on Subchannel "
"%04X, lpm %02X, became 'not "
"operational'\n",
sch->schib.pmcw.dev, sch->irq, sch->orb.lpm);
CIO_MSG_EVENT(2, "SNID - Device %s on Subchannel "
"%s, lpm %02X, became 'not operational'\n",
cdev->dev.bus_id, sch->dev.bus_id, sch->orb.lpm);
return -EACCES;
}
if (cdev->private->pgid.inf.ps.state2 == SNID_STATE2_RESVD_ELSE) {
CIO_MSG_EVENT(2, "SNID - Device %04X on Subchannel %04X "
CIO_MSG_EVENT(2, "SNID - Device %s on Subchannel %s "
"is reserved by someone else\n",
sch->schib.pmcw.dev, sch->irq);
cdev->dev.bus_id, sch->dev.bus_id);
return -EUSERS;
}
return 0;
......@@ -150,34 +141,30 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
struct subchannel *sch;
struct irb *irb;
int ret;
int opm;
int i;
irb = (struct irb *) __LC_IRB;
/* Ignore unsolicited interrupts. */
/*
* Unsolicited interrupts may pertain to an earlier status pending or
* busy condition on the subchannel. Retry sense pgid.
*/
if (irb->scsw.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
ret = __ccw_device_sense_pgid_start(cdev);
if (ret && ret != -EBUSY)
ccw_device_sense_pgid_done(cdev, ret);
return;
}
if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
return;
sch = to_subchannel(cdev->dev.parent);
switch (__ccw_device_check_sense_pgid(cdev)) {
/* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */
case 0: /* Sense Path Group ID successful. */
opm = sch->schib.pmcw.pim &
sch->schib.pmcw.pam &
sch->schib.pmcw.pom;
for (i=0;i<8;i++) {
if (opm == (0x80 << i)) {
/* Don't group single path devices. */
cdev->private->options.pgroup = 0;
break;
}
}
if (cdev->private->pgid.inf.ps.state1 == SNID_STATE1_RESET)
memcpy(&cdev->private->pgid, &global_pgid,
sizeof(struct pgid));
/* fall through. */
ccw_device_sense_pgid_done(cdev, 0);
break;
case -EOPNOTSUPP: /* Sense Path Group ID not supported */
ccw_device_sense_pgid_done(cdev, -EOPNOTSUPP);
break;
......@@ -235,26 +222,20 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
/* Try multiple times. */
ret = -ENODEV;
while (cdev->private->iretry-- > 0) {
/* 0xE2D7C9C4 == ebcdic "SPID" */
if (cdev->private->iretry > 0) {
cdev->private->iretry--;
ret = cio_start (sch, cdev->private->iccws,
0xE2D7C9C4, cdev->private->imask);
cdev->private->imask);
/* ret is 0, -EBUSY, -EACCES or -ENODEV */
if (ret == -EACCES)
break;
if (ret != -EBUSY)
if (ret != -EACCES)
return ret;
udelay(100);
continue;
}
/* PGID command failed on this path. Switch it off. */
sch->lpm &= ~cdev->private->imask;
sch->vpm &= ~cdev->private->imask;
CIO_MSG_EVENT(2, "SPID - Device %04X on Subchannel "
"%04X, lpm %02X, became 'not "
"operational'\n",
sch->schib.pmcw.dev, sch->irq,
cdev->private->imask);
CIO_MSG_EVENT(2, "SPID - Device %s on Subchannel "
"%s, lpm %02X, became 'not operational'\n",
cdev->dev.bus_id, sch->dev.bus_id, cdev->private->imask);
return ret;
}
......@@ -276,9 +257,9 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
if (irb->ecw[0] & SNS0_CMD_REJECT)
return -EOPNOTSUPP;
/* Hmm, whatever happened, try again. */
CIO_MSG_EVENT(2, "SPID - device %04X, unit check, cnt %02d, "
CIO_MSG_EVENT(2, "SPID - device %s, unit check, cnt %02d, "
"sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
sch->schib.pmcw.dev, irb->esw.esw0.erw.scnt,
cdev->dev.bus_id, irb->esw.esw0.erw.scnt,
irb->ecw[0], irb->ecw[1],
irb->ecw[2], irb->ecw[3],
irb->ecw[4], irb->ecw[5],
......@@ -286,10 +267,9 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
return -EAGAIN;
}
if (irb->scsw.cc == 3) {
CIO_MSG_EVENT(2, "SPID - Device %04X on Subchannel "
"%04X, lpm %02X, became 'not "
"operational'\n",
sch->schib.pmcw.dev, sch->irq,
CIO_MSG_EVENT(2, "SPID - Device %s on Subchannel "
"%s, lpm %02X, became 'not operational'\n",
cdev->dev.bus_id, sch->dev.bus_id,
cdev->private->imask);
return -EACCES;
}
......@@ -313,7 +293,7 @@ __ccw_device_verify_start(struct ccw_device *cdev)
func = (sch->vpm & imask) ?
SPID_FUNC_RESIGN : SPID_FUNC_ESTABLISH;
ret = __ccw_device_do_pgid(cdev, func);
if (ret == 0)
if (ret == 0 || ret == -EBUSY)
return;
cdev->private->iretry = 5;
}
......@@ -330,10 +310,15 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
struct irb *irb;
irb = (struct irb *) __LC_IRB;
/* Ignore unsolicited interrupts. */
/*
* Unsolicited interrupts may pertain to an earlier status pending or
* busy condition on the subchannel. Restart path verification.
*/
if (irb->scsw.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
__ccw_device_verify_start(cdev);
return;
}
if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
return;
sch = to_subchannel(cdev->dev.parent);
......
......@@ -62,8 +62,8 @@ ccw_device_path_notoper(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
stsch (sch->irq, &sch->schib);
CIO_MSG_EVENT(0, "%s(%04X) - path(s) %02x are "
"not operational \n", __FUNCTION__, sch->irq,
CIO_MSG_EVENT(0, "%s(%s) - path(s) %02x are "
"not operational \n", __FUNCTION__, sch->dev.bus_id,
sch->schib.pmcw.pnom);
sch->lpm &= ~sch->schib.pmcw.pnom;
......@@ -321,8 +321,7 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
sch->sense_ccw.count = SENSE_MAX_COUNT;
sch->sense_ccw.flags = CCW_FLAG_SLI;
/* 0xe2C5D5E2 == "SENS" in ebcdic */
return cio_start (sch, &sch->sense_ccw, 0xE2C5D5E2, 0xff);
return cio_start (sch, &sch->sense_ccw, 0xff);
}
/*
......
......@@ -56,7 +56,7 @@
#include "ioasm.h"
#include "chsc.h"
#define VERSION_QDIO_C "$Revision: 1.62 $"
#define VERSION_QDIO_C "$Revision: 1.67 $"
/****************** MODULE PARAMETER VARIABLES ********************/
MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
......@@ -80,6 +80,7 @@ static int hydra_thinints;
static int indicator_used[INDICATORS_PER_CACHELINE];
static __u32 * volatile indicators;
static __u32 volatile spare_indicator;
static atomic_t spare_indicator_usecount;
static debug_info_t *qdio_dbf_setup;
static debug_info_t *qdio_dbf_sbal;
......@@ -238,7 +239,7 @@ qdio_get_indicator(void)
indicator_used[i]=1;
return indicators+i;
}
atomic_inc(&spare_indicator_usecount);
return (__u32 * volatile) &spare_indicator;
}
......@@ -252,6 +253,8 @@ qdio_put_indicator(__u32 *addr)
i=addr-indicators;
indicator_used[i]=0;
}
if (addr == &spare_indicator)
atomic_dec(&spare_indicator_usecount);
}
static inline volatile void
......@@ -622,7 +625,8 @@ qdio_outbound_processing(struct qdio_q *q)
if (q->is_iqdio_q) {
/*
* for asynchronous queues, we better check, if the fill
* level is too high
* level is too high. for synchronous queues, the fill
* level will never be that high.
*/
if (atomic_read(&q->number_of_buffers_used)>
IQDIO_FILL_LEVEL_TO_POLL)
......@@ -920,7 +924,7 @@ qdio_kick_inbound_handler(struct qdio_q *q)
}
static inline void
tiqdio_inbound_processing(struct qdio_q *q)
__tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
{
struct qdio_irq *irq_ptr;
struct qdio_q *oq;
......@@ -954,10 +958,21 @@ tiqdio_inbound_processing(struct qdio_q *q)
goto out;
}
if (!(*(q->dev_st_chg_ind)))
goto out;
/*
* we reset spare_ind_was_set, when the queue does not use the
* spare indicator
*/
if (spare_ind_was_set)
spare_ind_was_set = (q->dev_st_chg_ind == &spare_indicator);
tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind);
if (!(*(q->dev_st_chg_ind)) && !spare_ind_was_set)
goto out;
/*
* q->dev_st_chg_ind is the indicator, be it shared or not.
* only clear it, if indicator is non-shared
*/
if (!spare_ind_was_set)
tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind);
if (q->hydra_gives_outbound_pcis) {
if (!q->siga_sync_done_on_thinints) {
......@@ -1003,6 +1018,12 @@ tiqdio_inbound_processing(struct qdio_q *q)
qdio_release_q(q);
}
static void
tiqdio_inbound_processing(struct qdio_q *q)
{
__tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount));
}
static void
qdio_inbound_processing(struct qdio_q *q)
{
......@@ -1106,6 +1127,7 @@ static inline void
tiqdio_inbound_checks(void)
{
struct qdio_q *q;
int spare_ind_was_set=0;
#ifdef QDIO_USE_PROCESSING_STATE
int q_laps=0;
#endif /* QDIO_USE_PROCESSING_STATE */
......@@ -1117,11 +1139,17 @@ tiqdio_inbound_checks(void)
again:
#endif /* QDIO_USE_PROCESSING_STATE */
/* when the spare indicator is used and set, save that and clear it */
if ((atomic_read(&spare_indicator_usecount)) && spare_indicator) {
spare_ind_was_set = 1;
tiqdio_clear_summary_bit((__u32*)&spare_indicator);
}
q=(struct qdio_q*)tiq_list;
do {
if (!q)
break;
tiqdio_inbound_processing(q);
__tiqdio_inbound_processing(q, spare_ind_was_set);
q=(struct qdio_q*)q->list_next;
} while (q!=(struct qdio_q*)tiq_list);
......@@ -1582,6 +1610,16 @@ qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm,
}
static void
qdio_call_shutdown(void *data)
{
struct ccw_device *cdev;
cdev = (struct ccw_device *)data;
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
put_device(&cdev->dev);
}
static void
qdio_timeout_handler(struct ccw_device *cdev)
{
......@@ -1608,6 +1646,20 @@ qdio_timeout_handler(struct ccw_device *cdev)
irq_ptr->irq);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
break;
case QDIO_IRQ_STATE_ESTABLISHED:
case QDIO_IRQ_STATE_ACTIVE:
/* I/O has been terminated by common I/O layer. */
QDIO_PRINT_INFO("Queues on irq %04x killed by cio.\n",
irq_ptr->irq);
QDIO_DBF_TEXT2(1, trace, "cio:term");
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
if (get_device(&cdev->dev)) {
/* Can't call shutdown from interrupt context. */
PREPARE_WORK(&cdev->private->kick_work,
qdio_call_shutdown, (void *)cdev);
queue_work(ccw_device_work, &cdev->private->kick_work);
}
break;
default:
BUG();
}
......@@ -2098,7 +2150,7 @@ qdio_shutdown(struct ccw_device *cdev, int how)
!atomic_read(&irq_ptr->
input_qs[i]->
use_count),
QDIO_NO_USE_COUNT_TIMEOUT*HZ);
QDIO_NO_USE_COUNT_TIMEOUT);
if (atomic_read(&irq_ptr->input_qs[i]->use_count))
/*
* FIXME:
......@@ -2116,7 +2168,7 @@ qdio_shutdown(struct ccw_device *cdev, int how)
!atomic_read(&irq_ptr->
output_qs[i]->
use_count),
QDIO_NO_USE_COUNT_TIMEOUT*HZ);
QDIO_NO_USE_COUNT_TIMEOUT);
if (atomic_read(&irq_ptr->output_qs[i]->use_count))
/*
* FIXME:
......@@ -2134,23 +2186,34 @@ qdio_shutdown(struct ccw_device *cdev, int how)
/* cleanup subchannel */
spin_lock_irqsave(get_ccwdev_lock(cdev),flags);
if (how&QDIO_FLAG_CLEANUP_USING_CLEAR) {
ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
result = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
timeout=QDIO_CLEANUP_CLEAR_TIMEOUT;
} else if (how&QDIO_FLAG_CLEANUP_USING_HALT) {
ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
result = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
timeout=QDIO_CLEANUP_HALT_TIMEOUT;
} else { /* default behaviour */
ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
result = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
timeout=QDIO_CLEANUP_HALT_TIMEOUT;
}
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
ccw_device_set_timeout(cdev, timeout);
spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags);
wait_event(cdev->private->wait_q,
irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
irq_ptr->state == QDIO_IRQ_STATE_ERR);
if (result == -ENODEV) {
/* No need to wait for device no longer present. */
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
result = 0; /* No error. */
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
} else if (result == 0) {
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
ccw_device_set_timeout(cdev, timeout);
spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags);
wait_event(cdev->private->wait_q,
irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
irq_ptr->state == QDIO_IRQ_STATE_ERR);
} else {
QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for "
"device %s\n", result, cdev->dev.bus_id);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
goto out;
}
if (irq_ptr->is_thinint_irq) {
qdio_put_indicator((__u32*)irq_ptr->dev_st_chg_ind);
tiqdio_set_subchannel_ind(irq_ptr,1);
......@@ -2796,7 +2859,7 @@ qdio_activate(struct ccw_device *cdev, int flags)
QDIO_IRQ_STATE_STOPPED) ||
(irq_ptr->state ==
QDIO_IRQ_STATE_ERR)),
(QDIO_ACTIVATE_TIMEOUT>>10)*HZ);
QDIO_ACTIVATE_TIMEOUT);
switch (irq_ptr->state) {
case QDIO_IRQ_STATE_STOPPED:
......
#ifndef _CIO_QDIO_H
#define _CIO_QDIO_H
#define VERSION_CIO_QDIO_H "$Revision: 1.20 $"
#define VERSION_CIO_QDIO_H "$Revision: 1.22 $"
//#define QDIO_DBF_LIKE_HELL
......@@ -56,14 +56,14 @@
#define QDIO_STATS_CLASSES 2
#define QDIO_STATS_COUNT_NEEDED 2*/
#define QDIO_NO_USE_COUNT_TIMEOUT 1000 /* wait for 1 sec on each q before
exiting without having use_count
of the queue to 0 */
#define QDIO_NO_USE_COUNT_TIMEOUT (1*HZ) /* wait for 1 sec on each q before
exiting without having use_count
of the queue to 0 */
#define QDIO_ESTABLISH_TIMEOUT 1000
#define QDIO_ACTIVATE_TIMEOUT 5
#define QDIO_CLEANUP_CLEAR_TIMEOUT 20000
#define QDIO_CLEANUP_HALT_TIMEOUT 10000
#define QDIO_ESTABLISH_TIMEOUT (1*HZ)
#define QDIO_ACTIVATE_TIMEOUT ((5*HZ)>>10)
#define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ)
#define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ)
enum qdio_irq_states {
QDIO_IRQ_STATE_INACTIVE,
......
......@@ -19,9 +19,12 @@
#define DBG printk
// #define DBG(args,...) do {} while (0);
static struct semaphore s_sem;
extern void css_process_crw(int);
extern void chsc_process_crw(void);
extern void chp_process_crw(int, int);
extern void css_reiterate_subchannels(void);
static void
s390_handle_damage(char *msg)
......@@ -38,12 +41,19 @@ s390_handle_damage(char *msg)
*
* Note : we currently process CRWs for io and chsc subchannels only
*/
static void
s390_collect_crw_info(void)
static int
s390_collect_crw_info(void *param)
{
struct crw crw;
int ccode;
struct semaphore *sem;
sem = (struct semaphore *)param;
/* Set a nice name. */
daemonize("kmcheck");
repeat:
down_interruptible(sem);
while (1) {
ccode = stcrw(&crw);
if (ccode != 0)
......@@ -52,6 +62,12 @@ s390_collect_crw_info(void)
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw.slct, crw.oflw, crw.chn, crw.rsc, crw.anc,
crw.erc, crw.rsid);
/* Check for overflows. */
if (crw.oflw) {
pr_debug("%s: crw overflow detected!\n", __FUNCTION__);
css_reiterate_subchannels();
continue;
}
switch (crw.rsc) {
case CRW_RSC_SCH:
pr_debug("source is subchannel %04X\n", crw.rsid);
......@@ -86,6 +102,8 @@ s390_collect_crw_info(void)
break;
}
}
goto repeat;
return 0;
}
/*
......@@ -122,7 +140,7 @@ s390_do_machine_check(void)
"check\n");
if (mci->cp) /* channel report word pending */
s390_collect_crw_info();
up(&s_sem);
#ifdef CONFIG_MACHCHK_WARNING
/*
......@@ -154,6 +172,7 @@ s390_do_machine_check(void)
static int
machine_check_init(void)
{
init_MUTEX_LOCKED( &s_sem );
ctl_clear_bit(14, 25); /* disable damage MCH */
ctl_set_bit(14, 26); /* enable degradation MCH */
ctl_set_bit(14, 27); /* enable system recovery MCH */
......@@ -176,6 +195,7 @@ arch_initcall(machine_check_init);
static int __init
machine_check_crw_init (void)
{
kernel_thread(s390_collect_crw_info, &s_sem, CLONE_FS|CLONE_FILES);
ctl_set_bit(14, 28); /* enable channel report MCH */
return 0;
}
......
......@@ -89,10 +89,11 @@ struct ccw_driver {
struct module *owner; /* for automatic MOD_INC_USE_COUNT */
struct ccw_device_id *ids; /* probe driver with these devs */
int (*probe) (struct ccw_device *); /* ask driver to probe dev */
int (*remove) (struct ccw_device *);
void (*remove) (struct ccw_device *);
/* device is no longer available */
int (*set_online) (struct ccw_device *);
int (*set_offline) (struct ccw_device *);
int (*notify) (struct ccw_device *, int);
struct device_driver driver; /* higher level structure, don't init
this from your driver */
char *name;
......@@ -149,8 +150,8 @@ extern int ccw_device_clear(struct ccw_device *, unsigned long);
extern int read_dev_chars(struct ccw_device *cdev, void **buffer, int length);
extern int read_conf_data(struct ccw_device *cdev, void **buffer, int *length);
extern void ccw_device_set_online(struct ccw_device *cdev);
extern void ccw_device_set_offline(struct ccw_device *cdev);
extern int ccw_device_set_online(struct ccw_device *cdev);
extern int ccw_device_set_offline(struct ccw_device *cdev);
extern struct ciw *ccw_device_get_ciw(struct ccw_device *, __u32 cmd);
......@@ -167,4 +168,7 @@ extern struct ccw_device *ccw_device_probe_console(void);
extern int _ccw_device_get_device_number(struct ccw_device *);
extern int _ccw_device_get_subchannel_number(struct ccw_device *);
extern struct device *s390_root_dev_register(const char *);
extern void s390_root_dev_unregister(struct device *);
#endif /* _S390_CCWDEV_H_ */
......@@ -21,8 +21,7 @@ struct ccwgroup_driver {
unsigned long driver_id;
int (*probe) (struct ccwgroup_device *);
int (*remove) (struct ccwgroup_device *);
int (*release) (struct ccwgroup_driver *);
void (*remove) (struct ccwgroup_device *);
int (*set_online) (struct ccwgroup_device *);
int (*set_offline) (struct ccwgroup_device *);
......@@ -37,7 +36,7 @@ extern int ccwgroup_create (struct device *root,
int argc, char *argv[]);
extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev);
extern int ccwgroup_remove_ccwdev(struct ccw_device *cdev);
extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev);
#define to_ccwgroupdev(x) container_of((x), struct ccwgroup_device, dev)
#define to_ccwgroupdrv(x) container_of((x), struct ccwgroup_driver, driver)
......
......@@ -247,6 +247,14 @@ struct ciw {
#define DOIO_DENY_PREFETCH 0x0002 /* don't allow for CCW prefetch */
#define DOIO_SUPPRESS_INTER 0x0004 /* suppress intermediate inter. */
/* ... for suspended CCWs */
/* Device or subchannel gone. */
#define CIO_GONE 0x0001
/* No path to device. */
#define CIO_NO_PATH 0x0002
/* Device has appeared. */
#define CIO_OPER 0x0004
/* Sick revalidation of device. */
#define CIO_REVALIDATE 0x0008
struct diag210 {
__u16 vrdcdvno : 16; /* device number (input) */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment