Commit c0d1a7eb authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Martin Schwidefsky:
 "common I/O layer
   - Fix bit-fields crossing storage-unit boundaries in css_general_char

  dasd driver
   - Avoid a sparse warning in regard to the queue lock
   - Allocate the struct dasd_ccw_req as per request data. Only for
     internal I/O is the structure allocated separately
   - Remove the unused function dasd_kmalloc_set_cda
   - Save a few bytes in struct dasd_ccw_req by reordering fields
   - Convert remaining users of dasd_kmalloc_request to
     dasd_smalloc_request and remove the now unused function

  vfio/ccw
   - Refactor and improve pfn_array_alloc_pin/pfn_array_pin
   - Add a new tracepoint for failed vfio/ccw requests
   - Add a CCW translation improvement to accept more requests as valid
   - Bug fixes"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/dasd: only use preallocated requests
  s390/dasd: reshuffle struct dasd_ccw_req
  s390/dasd: remove dasd_kmalloc_set_cda
  s390/dasd: move dasd_ccw_req to per request data
  s390/dasd: simplify locking in process_final_queue
  s390/cio: sanitize css_general_characteristics definition
  vfio: ccw: add tracepoints for interesting error paths
  vfio: ccw: set ccw->cda to NULL defensively
  vfio: ccw: refactor and improve pfn_array_alloc_pin()
  vfio: ccw: shorten kernel doc description for pfn_array_pin()
  vfio: ccw: push down unsupported IDA check
  vfio: ccw: fix error return in vfio_ccw_sch_event
  s390/archrandom: Rework arch random implementation.
  s390/net: add pnetid support
parents 6cc22dc0 ec530174
...@@ -6,36 +6,38 @@ ...@@ -6,36 +6,38 @@
struct css_general_char { struct css_general_char {
u64 : 12; u64 : 12;
u32 dynio : 1; /* bit 12 */ u64 dynio : 1; /* bit 12 */
u32 : 4; u64 : 4;
u32 eadm : 1; /* bit 17 */ u64 eadm : 1; /* bit 17 */
u32 : 23; u64 : 23;
u32 aif : 1; /* bit 41 */ u64 aif : 1; /* bit 41 */
u32 : 3; u64 : 3;
u32 mcss : 1; /* bit 45 */ u64 mcss : 1; /* bit 45 */
u32 fcs : 1; /* bit 46 */ u64 fcs : 1; /* bit 46 */
u32 : 1; u64 : 1;
u32 ext_mb : 1; /* bit 48 */ u64 ext_mb : 1; /* bit 48 */
u32 : 7; u64 : 7;
u32 aif_tdd : 1; /* bit 56 */ u64 aif_tdd : 1; /* bit 56 */
u32 : 1; u64 : 1;
u32 qebsm : 1; /* bit 58 */ u64 qebsm : 1; /* bit 58 */
u32 : 2; u64 : 2;
u32 aiv : 1; /* bit 61 */ u64 aiv : 1; /* bit 61 */
u32 : 5; u64 : 2;
u32 aif_osa : 1; /* bit 67 */
u32 : 12; u64 : 3;
u32 eadm_rf : 1; /* bit 80 */ u64 aif_osa : 1; /* bit 67 */
u32 : 1; u64 : 12;
u32 cib : 1; /* bit 82 */ u64 eadm_rf : 1; /* bit 80 */
u32 : 5; u64 : 1;
u32 fcx : 1; /* bit 88 */ u64 cib : 1; /* bit 82 */
u32 : 19; u64 : 5;
u32 alt_ssi : 1; /* bit 108 */ u64 fcx : 1; /* bit 88 */
u32 : 1; u64 : 19;
u32 narf : 1; /* bit 110 */ u64 alt_ssi : 1; /* bit 108 */
u32 : 12; u64 : 1;
u32 util_str : 1;/* bit 123 */ u64 narf : 1; /* bit 110 */
u64 : 12;
u64 util_str : 1;/* bit 123 */
} __packed; } __packed;
extern struct css_general_char css_general_characteristics; extern struct css_general_char css_general_characteristics;
......
...@@ -1222,80 +1222,37 @@ static void dasd_hosts_init(struct dentry *base_dentry, ...@@ -1222,80 +1222,37 @@ static void dasd_hosts_init(struct dentry *base_dentry,
device->hosts_dentry = pde; device->hosts_dentry = pde;
} }
/* struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
* Allocate memory for a channel program with 'cplength' channel struct dasd_device *device,
* command words and 'datasize' additional space. There are two struct dasd_ccw_req *cqr)
* variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
* memory and 2) dasd_smalloc_request uses the static ccw memory
* that gets allocated for each device.
*/
struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
int datasize,
struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
/* Sanity checks */
BUG_ON(datasize > PAGE_SIZE ||
(cplength*sizeof(struct ccw1)) > PAGE_SIZE);
cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
if (cqr == NULL)
return ERR_PTR(-ENOMEM);
cqr->cpaddr = NULL;
if (cplength > 0) {
cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
GFP_ATOMIC | GFP_DMA);
if (cqr->cpaddr == NULL) {
kfree(cqr);
return ERR_PTR(-ENOMEM);
}
}
cqr->data = NULL;
if (datasize > 0) {
cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
if (cqr->data == NULL) {
kfree(cqr->cpaddr);
kfree(cqr);
return ERR_PTR(-ENOMEM);
}
}
cqr->magic = magic;
set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
dasd_get_device(device);
return cqr;
}
EXPORT_SYMBOL(dasd_kmalloc_request);
struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
int datasize,
struct dasd_device *device)
{ {
unsigned long flags; unsigned long flags;
struct dasd_ccw_req *cqr; char *data, *chunk;
char *data; int size = 0;
int size;
size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
if (cplength > 0) if (cplength > 0)
size += cplength * sizeof(struct ccw1); size += cplength * sizeof(struct ccw1);
if (datasize > 0) if (datasize > 0)
size += datasize; size += datasize;
if (!cqr)
size += (sizeof(*cqr) + 7L) & -8L;
spin_lock_irqsave(&device->mem_lock, flags); spin_lock_irqsave(&device->mem_lock, flags);
cqr = (struct dasd_ccw_req *) data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size);
dasd_alloc_chunk(&device->ccw_chunks, size);
spin_unlock_irqrestore(&device->mem_lock, flags); spin_unlock_irqrestore(&device->mem_lock, flags);
if (cqr == NULL) if (!chunk)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
memset(cqr, 0, sizeof(struct dasd_ccw_req)); if (!cqr) {
data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); cqr = (void *) data;
cqr->cpaddr = NULL; data += (sizeof(*cqr) + 7L) & -8L;
}
memset(cqr, 0, sizeof(*cqr));
cqr->mem_chunk = chunk;
if (cplength > 0) { if (cplength > 0) {
cqr->cpaddr = (struct ccw1 *) data; cqr->cpaddr = data;
data += cplength*sizeof(struct ccw1); data += cplength * sizeof(struct ccw1);
memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
} }
cqr->data = NULL;
if (datasize > 0) { if (datasize > 0) {
cqr->data = data; cqr->data = data;
memset(cqr->data, 0, datasize); memset(cqr->data, 0, datasize);
...@@ -1307,33 +1264,12 @@ struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, ...@@ -1307,33 +1264,12 @@ struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
} }
EXPORT_SYMBOL(dasd_smalloc_request); EXPORT_SYMBOL(dasd_smalloc_request);
/*
* Free memory of a channel program. This function needs to free all the
* idal lists that might have been created by dasd_set_cda and the
* struct dasd_ccw_req itself.
*/
void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
{
struct ccw1 *ccw;
/* Clear any idals used for the request. */
ccw = cqr->cpaddr;
do {
clear_normalized_cda(ccw);
} while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
kfree(cqr->cpaddr);
kfree(cqr->data);
kfree(cqr);
dasd_put_device(device);
}
EXPORT_SYMBOL(dasd_kfree_request);
void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&device->mem_lock, flags); spin_lock_irqsave(&device->mem_lock, flags);
dasd_free_chunk(&device->ccw_chunks, cqr); dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
spin_unlock_irqrestore(&device->mem_lock, flags); spin_unlock_irqrestore(&device->mem_lock, flags);
dasd_put_device(device); dasd_put_device(device);
} }
...@@ -1885,28 +1821,11 @@ static void __dasd_device_process_ccw_queue(struct dasd_device *device, ...@@ -1885,28 +1821,11 @@ static void __dasd_device_process_ccw_queue(struct dasd_device *device,
} }
} }
/* static void __dasd_process_cqr(struct dasd_device *device,
* the cqrs from the final queue are returned to the upper layer struct dasd_ccw_req *cqr)
* by setting a dasd_block state and calling the callback function
*/
static void __dasd_device_process_final_queue(struct dasd_device *device,
struct list_head *final_queue)
{ {
struct list_head *l, *n;
struct dasd_ccw_req *cqr;
struct dasd_block *block;
void (*callback)(struct dasd_ccw_req *, void *data);
void *callback_data;
char errorstring[ERRORLENGTH]; char errorstring[ERRORLENGTH];
list_for_each_safe(l, n, final_queue) {
cqr = list_entry(l, struct dasd_ccw_req, devlist);
list_del_init(&cqr->devlist);
block = cqr->block;
callback = cqr->callback;
callback_data = cqr->callback_data;
if (block)
spin_lock_bh(&block->queue_lock);
switch (cqr->status) { switch (cqr->status) {
case DASD_CQR_SUCCESS: case DASD_CQR_SUCCESS:
cqr->status = DASD_CQR_DONE; cqr->status = DASD_CQR_DONE;
...@@ -1925,11 +1844,33 @@ static void __dasd_device_process_final_queue(struct dasd_device *device, ...@@ -1925,11 +1844,33 @@ static void __dasd_device_process_final_queue(struct dasd_device *device,
"reason=%s\n", errorstring); "reason=%s\n", errorstring);
BUG(); BUG();
} }
if (cqr->callback != NULL) if (cqr->callback)
(callback)(cqr, callback_data); cqr->callback(cqr, cqr->callback_data);
if (block) }
/*
* the cqrs from the final queue are returned to the upper layer
* by setting a dasd_block state and calling the callback function
*/
static void __dasd_device_process_final_queue(struct dasd_device *device,
struct list_head *final_queue)
{
struct list_head *l, *n;
struct dasd_ccw_req *cqr;
struct dasd_block *block;
list_for_each_safe(l, n, final_queue) {
cqr = list_entry(l, struct dasd_ccw_req, devlist);
list_del_init(&cqr->devlist);
block = cqr->block;
if (!block) {
__dasd_process_cqr(device, cqr);
} else {
spin_lock_bh(&block->queue_lock);
__dasd_process_cqr(device, cqr);
spin_unlock_bh(&block->queue_lock); spin_unlock_bh(&block->queue_lock);
} }
}
} }
/* /*
...@@ -3041,7 +2982,6 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, ...@@ -3041,7 +2982,6 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
cqr->callback_data = req; cqr->callback_data = req;
cqr->status = DASD_CQR_FILLED; cqr->status = DASD_CQR_FILLED;
cqr->dq = dq; cqr->dq = dq;
*((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr;
blk_mq_start_request(req); blk_mq_start_request(req);
spin_lock(&block->queue_lock); spin_lock(&block->queue_lock);
...@@ -3072,7 +3012,7 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) ...@@ -3072,7 +3012,7 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
unsigned long flags; unsigned long flags;
int rc = 0; int rc = 0;
cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)); cqr = blk_mq_rq_to_pdu(req);
if (!cqr) if (!cqr)
return BLK_EH_DONE; return BLK_EH_DONE;
...@@ -3174,7 +3114,7 @@ static int dasd_alloc_queue(struct dasd_block *block) ...@@ -3174,7 +3114,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
int rc; int rc;
block->tag_set.ops = &dasd_mq_ops; block->tag_set.ops = &dasd_mq_ops;
block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *); block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES; block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES;
block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV; block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV;
block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
...@@ -4038,7 +3978,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, ...@@ -4038,7 +3978,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
struct ccw1 *ccw; struct ccw1 *ccw;
unsigned long *idaw; unsigned long *idaw;
cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
NULL);
if (IS_ERR(cqr)) { if (IS_ERR(cqr)) {
/* internal error 13 - Allocating the RDC request failed*/ /* internal error 13 - Allocating the RDC request failed*/
......
...@@ -407,9 +407,9 @@ static int read_unit_address_configuration(struct dasd_device *device, ...@@ -407,9 +407,9 @@ static int read_unit_address_configuration(struct dasd_device *device,
int rc; int rc;
unsigned long flags; unsigned long flags;
cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data)), (sizeof(struct dasd_psf_prssd_data)),
device); device, NULL);
if (IS_ERR(cqr)) if (IS_ERR(cqr))
return PTR_ERR(cqr); return PTR_ERR(cqr);
cqr->startdev = device; cqr->startdev = device;
...@@ -457,7 +457,7 @@ static int read_unit_address_configuration(struct dasd_device *device, ...@@ -457,7 +457,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
lcu->flags |= NEED_UAC_UPDATE; lcu->flags |= NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags); spin_unlock_irqrestore(&lcu->lock, flags);
} }
dasd_kfree_request(cqr, cqr->memdev); dasd_sfree_request(cqr, cqr->memdev);
return rc; return rc;
} }
......
...@@ -536,7 +536,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, ...@@ -536,7 +536,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
/* Build the request */ /* Build the request */
datasize = sizeof(struct dasd_diag_req) + datasize = sizeof(struct dasd_diag_req) +
count*sizeof(struct dasd_diag_bio); count*sizeof(struct dasd_diag_bio);
cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev); cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev,
blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr)) if (IS_ERR(cqr))
return cqr; return cqr;
......
...@@ -886,7 +886,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device, ...@@ -886,7 +886,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
} }
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
0, /* use rcd_buf as data ara */ 0, /* use rcd_buf as data ara */
device); device, NULL);
if (IS_ERR(cqr)) { if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s", DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate RCD request"); "Could not allocate RCD request");
...@@ -1442,7 +1442,7 @@ static int dasd_eckd_read_features(struct dasd_device *device) ...@@ -1442,7 +1442,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data) + (sizeof(struct dasd_psf_prssd_data) +
sizeof(struct dasd_rssd_features)), sizeof(struct dasd_rssd_features)),
device); device, NULL);
if (IS_ERR(cqr)) { if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not " DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
"allocate initialization request"); "allocate initialization request");
...@@ -1504,7 +1504,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device, ...@@ -1504,7 +1504,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
sizeof(struct dasd_psf_ssc_data), sizeof(struct dasd_psf_ssc_data),
device); device, NULL);
if (IS_ERR(cqr)) { if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s", DBF_DEV_EVENT(DBF_WARNING, device, "%s",
...@@ -1815,7 +1815,8 @@ dasd_eckd_analysis_ccw(struct dasd_device *device) ...@@ -1815,7 +1815,8 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
cplength = 8; cplength = 8;
datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data); datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device); cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
NULL);
if (IS_ERR(cqr)) if (IS_ERR(cqr))
return cqr; return cqr;
ccw = cqr->cpaddr; ccw = cqr->cpaddr;
...@@ -2092,7 +2093,8 @@ dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata, ...@@ -2092,7 +2093,8 @@ dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
*/ */
itcw_size = itcw_calc_size(0, count, 0); itcw_size = itcw_calc_size(0, count, 0);
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev); cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
NULL);
if (IS_ERR(cqr)) if (IS_ERR(cqr))
return cqr; return cqr;
...@@ -2186,7 +2188,7 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata, ...@@ -2186,7 +2188,7 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
cplength += count; cplength += count;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
startdev); startdev, NULL);
if (IS_ERR(cqr)) if (IS_ERR(cqr))
return cqr; return cqr;
...@@ -2332,7 +2334,7 @@ dasd_eckd_build_format(struct dasd_device *base, ...@@ -2332,7 +2334,7 @@ dasd_eckd_build_format(struct dasd_device *base,
} }
/* Allocate the format ccw request. */ /* Allocate the format ccw request. */
fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
datasize, startdev); datasize, startdev, NULL);
if (IS_ERR(fcp)) if (IS_ERR(fcp))
return fcp; return fcp;
...@@ -3103,7 +3105,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( ...@@ -3103,7 +3105,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
} }
/* Allocate the ccw request. */ /* Allocate the ccw request. */
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
startdev); startdev, blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr)) if (IS_ERR(cqr))
return cqr; return cqr;
ccw = cqr->cpaddr; ccw = cqr->cpaddr;
...@@ -3262,7 +3264,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( ...@@ -3262,7 +3264,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
/* Allocate the ccw request. */ /* Allocate the ccw request. */
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
startdev); startdev, blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr)) if (IS_ERR(cqr))
return cqr; return cqr;
ccw = cqr->cpaddr; ccw = cqr->cpaddr;
...@@ -3595,7 +3597,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( ...@@ -3595,7 +3597,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
/* Allocate the ccw request. */ /* Allocate the ccw request. */
itcw_size = itcw_calc_size(0, ctidaw, 0); itcw_size = itcw_calc_size(0, ctidaw, 0);
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev); cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr)) if (IS_ERR(cqr))
return cqr; return cqr;
...@@ -3862,7 +3865,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev, ...@@ -3862,7 +3865,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
/* Allocate the ccw request. */ /* Allocate the ccw request. */
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
datasize, startdev); datasize, startdev, blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr)) if (IS_ERR(cqr))
return cqr; return cqr;
...@@ -4102,7 +4105,7 @@ dasd_eckd_release(struct dasd_device *device) ...@@ -4102,7 +4105,7 @@ dasd_eckd_release(struct dasd_device *device)
return -EACCES; return -EACCES;
useglobal = 0; useglobal = 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
if (IS_ERR(cqr)) { if (IS_ERR(cqr)) {
mutex_lock(&dasd_reserve_mutex); mutex_lock(&dasd_reserve_mutex);
useglobal = 1; useglobal = 1;
...@@ -4157,7 +4160,7 @@ dasd_eckd_reserve(struct dasd_device *device) ...@@ -4157,7 +4160,7 @@ dasd_eckd_reserve(struct dasd_device *device)
return -EACCES; return -EACCES;
useglobal = 0; useglobal = 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
if (IS_ERR(cqr)) { if (IS_ERR(cqr)) {
mutex_lock(&dasd_reserve_mutex); mutex_lock(&dasd_reserve_mutex);
useglobal = 1; useglobal = 1;
...@@ -4211,7 +4214,7 @@ dasd_eckd_steal_lock(struct dasd_device *device) ...@@ -4211,7 +4214,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
return -EACCES; return -EACCES;
useglobal = 0; useglobal = 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
if (IS_ERR(cqr)) { if (IS_ERR(cqr)) {
mutex_lock(&dasd_reserve_mutex); mutex_lock(&dasd_reserve_mutex);
useglobal = 1; useglobal = 1;
...@@ -4271,7 +4274,8 @@ static int dasd_eckd_snid(struct dasd_device *device, ...@@ -4271,7 +4274,8 @@ static int dasd_eckd_snid(struct dasd_device *device,
useglobal = 0; useglobal = 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
sizeof(struct dasd_snid_data), device); sizeof(struct dasd_snid_data), device,
NULL);
if (IS_ERR(cqr)) { if (IS_ERR(cqr)) {
mutex_lock(&dasd_reserve_mutex); mutex_lock(&dasd_reserve_mutex);
useglobal = 1; useglobal = 1;
...@@ -4331,7 +4335,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp) ...@@ -4331,7 +4335,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data) + (sizeof(struct dasd_psf_prssd_data) +
sizeof(struct dasd_rssd_perf_stats_t)), sizeof(struct dasd_rssd_perf_stats_t)),
device); device, NULL);
if (IS_ERR(cqr)) { if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s", DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request"); "Could not allocate initialization request");
...@@ -4477,7 +4481,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp) ...@@ -4477,7 +4481,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
psf1 = psf_data[1]; psf1 = psf_data[1];
/* setup CCWs for PSF + RSSD */ /* setup CCWs for PSF + RSSD */
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device); cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
if (IS_ERR(cqr)) { if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s", DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request"); "Could not allocate initialization request");
...@@ -5037,7 +5041,7 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device, ...@@ -5037,7 +5041,7 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device,
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data) + (sizeof(struct dasd_psf_prssd_data) +
sizeof(struct dasd_rssd_messages)), sizeof(struct dasd_rssd_messages)),
device); device, NULL);
if (IS_ERR(cqr)) { if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate read message buffer request"); "Could not allocate read message buffer request");
...@@ -5126,7 +5130,7 @@ static int dasd_eckd_query_host_access(struct dasd_device *device, ...@@ -5126,7 +5130,7 @@ static int dasd_eckd_query_host_access(struct dasd_device *device,
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
sizeof(struct dasd_psf_prssd_data) + 1, sizeof(struct dasd_psf_prssd_data) + 1,
device); device, NULL);
if (IS_ERR(cqr)) { if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate read message buffer request"); "Could not allocate read message buffer request");
...@@ -5285,7 +5289,7 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response, ...@@ -5285,7 +5289,7 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
sizeof(struct dasd_psf_cuir_response), sizeof(struct dasd_psf_cuir_response),
device); device, NULL);
if (IS_ERR(cqr)) { if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s", DBF_DEV_EVENT(DBF_WARNING, device, "%s",
......
...@@ -447,7 +447,7 @@ static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data) ...@@ -447,7 +447,7 @@ static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
* is a new ccw in device->eer_cqr. Free the "old" * is a new ccw in device->eer_cqr. Free the "old"
* snss request now. * snss request now.
*/ */
dasd_kfree_request(cqr, device); dasd_sfree_request(cqr, device);
} }
/* /*
...@@ -472,8 +472,8 @@ int dasd_eer_enable(struct dasd_device *device) ...@@ -472,8 +472,8 @@ int dasd_eer_enable(struct dasd_device *device)
if (rc) if (rc)
goto out; goto out;
cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */, cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
SNSS_DATA_SIZE, device); SNSS_DATA_SIZE, device, NULL);
if (IS_ERR(cqr)) { if (IS_ERR(cqr)) {
rc = -ENOMEM; rc = -ENOMEM;
cqr = NULL; cqr = NULL;
...@@ -505,7 +505,7 @@ int dasd_eer_enable(struct dasd_device *device) ...@@ -505,7 +505,7 @@ int dasd_eer_enable(struct dasd_device *device)
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
if (cqr) if (cqr)
dasd_kfree_request(cqr, device); dasd_sfree_request(cqr, device);
return rc; return rc;
} }
...@@ -528,7 +528,7 @@ void dasd_eer_disable(struct dasd_device *device) ...@@ -528,7 +528,7 @@ void dasd_eer_disable(struct dasd_device *device)
in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags); in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
if (cqr && !in_use) if (cqr && !in_use)
dasd_kfree_request(cqr, device); dasd_sfree_request(cqr, device);
} }
/* /*
......
...@@ -356,7 +356,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_discard( ...@@ -356,7 +356,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_discard(
datasize = sizeof(struct DE_fba_data) + datasize = sizeof(struct DE_fba_data) +
nr_ccws * (sizeof(struct LO_fba_data) + sizeof(struct ccw1)); nr_ccws * (sizeof(struct LO_fba_data) + sizeof(struct ccw1));
cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev); cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr)) if (IS_ERR(cqr))
return cqr; return cqr;
...@@ -490,7 +491,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular( ...@@ -490,7 +491,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular(
datasize += (count - 1)*sizeof(struct LO_fba_data); datasize += (count - 1)*sizeof(struct LO_fba_data);
} }
/* Allocate the ccw request. */ /* Allocate the ccw request. */
cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev); cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr)) if (IS_ERR(cqr))
return cqr; return cqr;
ccw = cqr->cpaddr; ccw = cqr->cpaddr;
......
...@@ -158,40 +158,33 @@ do { \ ...@@ -158,40 +158,33 @@ do { \
struct dasd_ccw_req { struct dasd_ccw_req {
unsigned int magic; /* Eye catcher */ unsigned int magic; /* Eye catcher */
int intrc; /* internal error, e.g. from start_IO */
struct list_head devlist; /* for dasd_device request queue */ struct list_head devlist; /* for dasd_device request queue */
struct list_head blocklist; /* for dasd_block request queue */ struct list_head blocklist; /* for dasd_block request queue */
/* Where to execute what... */
struct dasd_block *block; /* the originating block device */ struct dasd_block *block; /* the originating block device */
struct dasd_device *memdev; /* the device used to allocate this */ struct dasd_device *memdev; /* the device used to allocate this */
struct dasd_device *startdev; /* device the request is started on */ struct dasd_device *startdev; /* device the request is started on */
struct dasd_device *basedev; /* base device if no block->base */ struct dasd_device *basedev; /* base device if no block->base */
void *cpaddr; /* address of ccw or tcw */ void *cpaddr; /* address of ccw or tcw */
short retries; /* A retry counter */
unsigned char cpmode; /* 0 = cmd mode, 1 = itcw */ unsigned char cpmode; /* 0 = cmd mode, 1 = itcw */
char status; /* status of this request */ char status; /* status of this request */
short retries; /* A retry counter */ char lpm; /* logical path mask */
unsigned long flags; /* flags of this request */ unsigned long flags; /* flags of this request */
struct dasd_queue *dq; struct dasd_queue *dq;
/* ... and how */
unsigned long starttime; /* jiffies time of request start */ unsigned long starttime; /* jiffies time of request start */
unsigned long expires; /* expiration period in jiffies */ unsigned long expires; /* expiration period in jiffies */
char lpm; /* logical path mask */
void *data; /* pointer to data area */ void *data; /* pointer to data area */
/* these are important for recovering erroneous requests */
int intrc; /* internal error, e.g. from start_IO */
struct irb irb; /* device status in case of an error */ struct irb irb; /* device status in case of an error */
struct dasd_ccw_req *refers; /* ERP-chain queueing. */ struct dasd_ccw_req *refers; /* ERP-chain queueing. */
void *function; /* originating ERP action */ void *function; /* originating ERP action */
void *mem_chunk;
/* these are for statistics only */
unsigned long buildclk; /* TOD-clock of request generation */ unsigned long buildclk; /* TOD-clock of request generation */
unsigned long startclk; /* TOD-clock of request start */ unsigned long startclk; /* TOD-clock of request start */
unsigned long stopclk; /* TOD-clock of request interrupt */ unsigned long stopclk; /* TOD-clock of request interrupt */
unsigned long endclk; /* TOD-clock of request termination */ unsigned long endclk; /* TOD-clock of request termination */
/* Callback that is called after reaching final status. */
void (*callback)(struct dasd_ccw_req *, void *data); void (*callback)(struct dasd_ccw_req *, void *data);
void *callback_data; void *callback_data;
}; };
...@@ -714,19 +707,10 @@ extern const struct block_device_operations dasd_device_operations; ...@@ -714,19 +707,10 @@ extern const struct block_device_operations dasd_device_operations;
extern struct kmem_cache *dasd_page_cache; extern struct kmem_cache *dasd_page_cache;
struct dasd_ccw_req * struct dasd_ccw_req *
dasd_kmalloc_request(int , int, int, struct dasd_device *); dasd_smalloc_request(int, int, int, struct dasd_device *, struct dasd_ccw_req *);
struct dasd_ccw_req *
dasd_smalloc_request(int , int, int, struct dasd_device *);
void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *); void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
void dasd_wakeup_cb(struct dasd_ccw_req *, void *); void dasd_wakeup_cb(struct dasd_ccw_req *, void *);
static inline int
dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device)
{
return set_normalized_cda(ccw, cda);
}
struct dasd_device *dasd_alloc_device(void); struct dasd_device *dasd_alloc_device(void);
void dasd_free_device(struct dasd_device *); void dasd_free_device(struct dasd_device *);
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
# The following is required for define_trace.h to find ./trace.h # The following is required for define_trace.h to find ./trace.h
CFLAGS_trace.o := -I$(src) CFLAGS_trace.o := -I$(src)
CFLAGS_vfio_ccw_fsm.o := -I$(src)
obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \ obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o
......
...@@ -23,9 +23,13 @@ ...@@ -23,9 +23,13 @@
#define CCWCHAIN_LEN_MAX 256 #define CCWCHAIN_LEN_MAX 256
struct pfn_array { struct pfn_array {
/* Starting guest physical I/O address. */
unsigned long pa_iova; unsigned long pa_iova;
/* Array that stores PFNs of the pages need to pin. */
unsigned long *pa_iova_pfn; unsigned long *pa_iova_pfn;
/* Array that receives PFNs of the pages pinned. */
unsigned long *pa_pfn; unsigned long *pa_pfn;
/* Number of pages pinned from @pa_iova. */
int pa_nr; int pa_nr;
}; };
...@@ -46,70 +50,33 @@ struct ccwchain { ...@@ -46,70 +50,33 @@ struct ccwchain {
}; };
/* /*
* pfn_array_pin() - pin user pages in memory * pfn_array_alloc_pin() - alloc memory for PFNs, then pin user pages in memory
* @pa: pfn_array on which to perform the operation * @pa: pfn_array on which to perform the operation
* @mdev: the mediated device to perform pin/unpin operations * @mdev: the mediated device to perform pin/unpin operations
* @iova: target guest physical address
* @len: number of bytes that should be pinned from @iova
* *
* Attempt to pin user pages in memory. * Attempt to allocate memory for PFNs, and pin user pages in memory.
* *
* Usage of pfn_array: * Usage of pfn_array:
* @pa->pa_iova starting guest physical I/O address. Assigned by caller. * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
* @pa->pa_iova_pfn array that stores PFNs of the pages need to pin. Allocated * this structure will be filled in by this function.
* by caller.
* @pa->pa_pfn array that receives PFNs of the pages pinned. Allocated by
* caller.
* @pa->pa_nr number of pages from @pa->pa_iova to pin. Assigned by
* caller.
* number of pages pinned. Assigned by callee.
* *
* Returns: * Returns:
* Number of pages pinned on success. * Number of pages pinned on success.
* If @pa->pa_nr is 0 or negative, returns 0. * If @pa->pa_nr is not 0, or @pa->pa_iova_pfn is not NULL initially,
* returns -EINVAL.
* If no pages were pinned, returns -errno. * If no pages were pinned, returns -errno.
*/ */
static int pfn_array_pin(struct pfn_array *pa, struct device *mdev)
{
int i, ret;
if (pa->pa_nr <= 0) {
pa->pa_nr = 0;
return 0;
}
pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
for (i = 1; i < pa->pa_nr; i++)
pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
if (ret > 0 && ret != pa->pa_nr) {
vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
pa->pa_nr = 0;
return 0;
}
return ret;
}
/* Unpin the pages before releasing the memory. */
static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
{
vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
pa->pa_nr = 0;
kfree(pa->pa_iova_pfn);
}
/* Alloc memory for PFNs, then pin pages with them. */
static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev, static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
u64 iova, unsigned int len) u64 iova, unsigned int len)
{ {
int ret = 0; int i, ret = 0;
if (!len) if (!len)
return 0; return 0;
if (pa->pa_nr) if (pa->pa_nr || pa->pa_iova_pfn)
return -EINVAL; return -EINVAL;
pa->pa_iova = iova; pa->pa_iova = iova;
...@@ -126,18 +93,39 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev, ...@@ -126,18 +93,39 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
return -ENOMEM; return -ENOMEM;
pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr; pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
ret = pfn_array_pin(pa, mdev); pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
for (i = 1; i < pa->pa_nr; i++)
pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
if (ret > 0) ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
return ret; IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
else if (!ret)
if (ret < 0) {
goto err_out;
} else if (ret > 0 && ret != pa->pa_nr) {
vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
ret = -EINVAL; ret = -EINVAL;
goto err_out;
}
return ret;
err_out:
pa->pa_nr = 0;
kfree(pa->pa_iova_pfn); kfree(pa->pa_iova_pfn);
pa->pa_iova_pfn = NULL;
return ret; return ret;
} }
/* Unpin the pages before releasing the memory. */
static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
{
vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
pa->pa_nr = 0;
kfree(pa->pa_iova_pfn);
}
static int pfn_array_table_init(struct pfn_array_table *pat, int nr) static int pfn_array_table_init(struct pfn_array_table *pat, int nr)
{ {
pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL); pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL);
...@@ -365,6 +353,9 @@ static void cp_unpin_free(struct channel_program *cp) ...@@ -365,6 +353,9 @@ static void cp_unpin_free(struct channel_program *cp)
* This is the chain length not considering any TICs. * This is the chain length not considering any TICs.
* You need to do a new round for each TIC target. * You need to do a new round for each TIC target.
* *
* The program is also validated for absence of not yet supported
* indirect data addressing scenarios.
*
* Returns: the length of the ccw chain or -errno. * Returns: the length of the ccw chain or -errno.
*/ */
static int ccwchain_calc_length(u64 iova, struct channel_program *cp) static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
...@@ -391,6 +382,14 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp) ...@@ -391,6 +382,14 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
do { do {
cnt++; cnt++;
/*
* As we don't want to fail direct addressing even if the
* orb specified one of the unsupported formats, we defer
* checking for IDAWs in unsupported formats to here.
*/
if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
return -EOPNOTSUPP;
if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw))) if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
break; break;
...@@ -503,7 +502,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, ...@@ -503,7 +502,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
struct ccw1 *ccw; struct ccw1 *ccw;
struct pfn_array_table *pat; struct pfn_array_table *pat;
unsigned long *idaws; unsigned long *idaws;
int idaw_nr; int ret;
ccw = chain->ch_ccw + idx; ccw = chain->ch_ccw + idx;
...@@ -523,18 +522,19 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, ...@@ -523,18 +522,19 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
* needed when translating a direct ccw to a idal ccw. * needed when translating a direct ccw to a idal ccw.
*/ */
pat = chain->ch_pat + idx; pat = chain->ch_pat + idx;
if (pfn_array_table_init(pat, 1)) ret = pfn_array_table_init(pat, 1);
return -ENOMEM; if (ret)
idaw_nr = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, goto out_init;
ccw->cda, ccw->count);
if (idaw_nr < 0) ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
return idaw_nr; if (ret < 0)
goto out_init;
/* Translate this direct ccw to a idal ccw. */ /* Translate this direct ccw to a idal ccw. */
idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL); idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
if (!idaws) { if (!idaws) {
pfn_array_table_unpin_free(pat, cp->mdev); ret = -ENOMEM;
return -ENOMEM; goto out_unpin;
} }
ccw->cda = (__u32) virt_to_phys(idaws); ccw->cda = (__u32) virt_to_phys(idaws);
ccw->flags |= CCW_FLAG_IDA; ccw->flags |= CCW_FLAG_IDA;
...@@ -542,6 +542,12 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, ...@@ -542,6 +542,12 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
pfn_array_table_idal_create_words(pat, idaws); pfn_array_table_idal_create_words(pat, idaws);
return 0; return 0;
out_unpin:
pfn_array_table_unpin_free(pat, cp->mdev);
out_init:
ccw->cda = 0;
return ret;
} }
static int ccwchain_fetch_idal(struct ccwchain *chain, static int ccwchain_fetch_idal(struct ccwchain *chain,
...@@ -571,7 +577,7 @@ static int ccwchain_fetch_idal(struct ccwchain *chain, ...@@ -571,7 +577,7 @@ static int ccwchain_fetch_idal(struct ccwchain *chain,
pat = chain->ch_pat + idx; pat = chain->ch_pat + idx;
ret = pfn_array_table_init(pat, idaw_nr); ret = pfn_array_table_init(pat, idaw_nr);
if (ret) if (ret)
return ret; goto out_init;
/* Translate idal ccw to use new allocated idaws. */ /* Translate idal ccw to use new allocated idaws. */
idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL); idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL);
...@@ -603,6 +609,8 @@ static int ccwchain_fetch_idal(struct ccwchain *chain, ...@@ -603,6 +609,8 @@ static int ccwchain_fetch_idal(struct ccwchain *chain,
kfree(idaws); kfree(idaws);
out_unpin: out_unpin:
pfn_array_table_unpin_free(pat, cp->mdev); pfn_array_table_unpin_free(pat, cp->mdev);
out_init:
ccw->cda = 0;
return ret; return ret;
} }
...@@ -656,10 +664,8 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) ...@@ -656,10 +664,8 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
/* /*
* XXX: * XXX:
* Only support prefetch enable mode now. * Only support prefetch enable mode now.
* Only support 64bit addressing idal.
* Only support 4k IDAW.
*/ */
if (!orb->cmd.pfch || !orb->cmd.c64 || orb->cmd.i2k) if (!orb->cmd.pfch)
return -EOPNOTSUPP; return -EOPNOTSUPP;
INIT_LIST_HEAD(&cp->ccwchain_list); INIT_LIST_HEAD(&cp->ccwchain_list);
...@@ -688,6 +694,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) ...@@ -688,6 +694,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
ret = ccwchain_loop_tic(chain, cp); ret = ccwchain_loop_tic(chain, cp);
if (ret) if (ret)
cp_unpin_free(cp); cp_unpin_free(cp);
/* It is safe to force: if not set but idals used
* ccwchain_calc_length returns an error.
*/
cp->orb.cmd.c64 = 1;
return ret; return ret;
} }
......
...@@ -177,6 +177,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process) ...@@ -177,6 +177,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
{ {
struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
unsigned long flags; unsigned long flags;
int rc = -EAGAIN;
spin_lock_irqsave(sch->lock, flags); spin_lock_irqsave(sch->lock, flags);
if (!device_is_registered(&sch->dev)) if (!device_is_registered(&sch->dev))
...@@ -187,6 +188,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process) ...@@ -187,6 +188,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
if (cio_update_schib(sch)) { if (cio_update_schib(sch)) {
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
rc = 0;
goto out_unlock; goto out_unlock;
} }
...@@ -195,11 +197,12 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process) ...@@ -195,11 +197,12 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
private->state = private->mdev ? VFIO_CCW_STATE_IDLE : private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
VFIO_CCW_STATE_STANDBY; VFIO_CCW_STATE_STANDBY;
} }
rc = 0;
out_unlock: out_unlock:
spin_unlock_irqrestore(sch->lock, flags); spin_unlock_irqrestore(sch->lock, flags);
return 0; return rc;
} }
static struct css_device_id vfio_ccw_sch_ids[] = { static struct css_device_id vfio_ccw_sch_ids[] = {
......
...@@ -13,6 +13,9 @@ ...@@ -13,6 +13,9 @@
#include "ioasm.h" #include "ioasm.h"
#include "vfio_ccw_private.h" #include "vfio_ccw_private.h"
#define CREATE_TRACE_POINTS
#include "vfio_ccw_trace.h"
static int fsm_io_helper(struct vfio_ccw_private *private) static int fsm_io_helper(struct vfio_ccw_private *private)
{ {
struct subchannel *sch; struct subchannel *sch;
...@@ -110,6 +113,10 @@ static void fsm_disabled_irq(struct vfio_ccw_private *private, ...@@ -110,6 +113,10 @@ static void fsm_disabled_irq(struct vfio_ccw_private *private,
*/ */
cio_disable_subchannel(sch); cio_disable_subchannel(sch);
} }
inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
{
return p->sch->schid;
}
/* /*
* Deal with the ccw command request from the userspace. * Deal with the ccw command request from the userspace.
...@@ -121,6 +128,7 @@ static void fsm_io_request(struct vfio_ccw_private *private, ...@@ -121,6 +128,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
union scsw *scsw = &private->scsw; union scsw *scsw = &private->scsw;
struct ccw_io_region *io_region = &private->io_region; struct ccw_io_region *io_region = &private->io_region;
struct mdev_device *mdev = private->mdev; struct mdev_device *mdev = private->mdev;
char *errstr = "request";
private->state = VFIO_CCW_STATE_BOXED; private->state = VFIO_CCW_STATE_BOXED;
...@@ -132,15 +140,19 @@ static void fsm_io_request(struct vfio_ccw_private *private, ...@@ -132,15 +140,19 @@ static void fsm_io_request(struct vfio_ccw_private *private,
/* Don't try to build a cp if transport mode is specified. */ /* Don't try to build a cp if transport mode is specified. */
if (orb->tm.b) { if (orb->tm.b) {
io_region->ret_code = -EOPNOTSUPP; io_region->ret_code = -EOPNOTSUPP;
errstr = "transport mode";
goto err_out; goto err_out;
} }
io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev), io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
orb); orb);
if (io_region->ret_code) if (io_region->ret_code) {
errstr = "cp init";
goto err_out; goto err_out;
}
io_region->ret_code = cp_prefetch(&private->cp); io_region->ret_code = cp_prefetch(&private->cp);
if (io_region->ret_code) { if (io_region->ret_code) {
errstr = "cp prefetch";
cp_free(&private->cp); cp_free(&private->cp);
goto err_out; goto err_out;
} }
...@@ -148,6 +160,7 @@ static void fsm_io_request(struct vfio_ccw_private *private, ...@@ -148,6 +160,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
/* Start channel program and wait for I/O interrupt. */ /* Start channel program and wait for I/O interrupt. */
io_region->ret_code = fsm_io_helper(private); io_region->ret_code = fsm_io_helper(private);
if (io_region->ret_code) { if (io_region->ret_code) {
errstr = "cp fsm_io_helper";
cp_free(&private->cp); cp_free(&private->cp);
goto err_out; goto err_out;
} }
...@@ -164,6 +177,8 @@ static void fsm_io_request(struct vfio_ccw_private *private, ...@@ -164,6 +177,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
err_out: err_out:
private->state = VFIO_CCW_STATE_IDLE; private->state = VFIO_CCW_STATE_IDLE;
trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private),
io_region->ret_code, errstr);
} }
/* /*
......
/* SPDX-License-Identifier: GPL-2.0
* Tracepoints for vfio_ccw driver
*
* Copyright IBM Corp. 2018
*
* Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
* Halil Pasic <pasic@linux.vnet.ibm.com>
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM vfio_ccw
#if !defined(_VFIO_CCW_TRACE_) || defined(TRACE_HEADER_MULTI_READ)
#define _VFIO_CCW_TRACE_
#include <linux/tracepoint.h>
TRACE_EVENT(vfio_ccw_io_fctl,
TP_PROTO(int fctl, struct subchannel_id schid, int errno, char *errstr),
TP_ARGS(fctl, schid, errno, errstr),
TP_STRUCT__entry(
__field(int, fctl)
__field_struct(struct subchannel_id, schid)
__field(int, errno)
__field(char*, errstr)
),
TP_fast_assign(
__entry->fctl = fctl;
__entry->schid = schid;
__entry->errno = errno;
__entry->errstr = errstr;
),
TP_printk("schid=%x.%x.%04x fctl=%x errno=%d info=%s",
__entry->schid.cssid,
__entry->schid.ssid,
__entry->schid.sch_no,
__entry->fctl,
__entry->errno,
__entry->errstr)
);
#endif /* _VFIO_CCW_TRACE_ */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE vfio_ccw_trace
#include <trace/define_trace.h>
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment