Commit 6708bb27 authored by Andy Grover's avatar Andy Grover Committed by Nicholas Bellinger

target: Follow up core updates from AGrover and HCH (round 4)

This patch contains the squashed version of forth round series cleanups
from Andy and Christoph following the post heavy lifting in the preceeding:
'Eliminate usage of struct se_mem' and 'Make all control CDBs scatter-gather'
changes.  This also includes a conversion of target core and the v3.0
mainline fabric modules (loopback and tcm_fc) to use pr_debug and the
CONFIG_DYNAMIC_DEBUG infrastructure!

These have been squashed into this third and final round for v3.1.

target: Remove ifdeffed code in t_g_process_write
target: Remove direct ramdisk code
target: Rename task_sg_num to task_sg_nents
target: Remove custom debug macros for pr_debug. Use pr_err().
target: Remove custom debug macros in mainline fabrics
target: Set WSNZ=1 in block limits VPD. Abort if WRITE_SAME sectors = 0
target: Remove transport do_se_mem_map callback
target: Further simplify transport_free_pages
target: Redo task allocation return value handling
target: Remove extra parentheses
target: change alloc_task call to take *cdb, not *cmd

(nab: Fix bogus struct file assignments in fd_do_readv and fd_do_writev)
Signed-off-by: default avatarAndy Grover <agrover@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent ec98f782
...@@ -3,9 +3,3 @@ config LOOPBACK_TARGET ...@@ -3,9 +3,3 @@ config LOOPBACK_TARGET
help help
Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD
fabric loopback module. fabric loopback module.
config LOOPBACK_TARGET_CDB_DEBUG
bool "TCM loopback fabric module CDB debug code"
depends on LOOPBACK_TARGET
help
Say Y here to enable the TCM loopback fabric module CDB debug code
This diff is collapsed.
...@@ -16,12 +16,6 @@ ...@@ -16,12 +16,6 @@
*/ */
#define TL_SCSI_MAX_CMD_LEN 32 #define TL_SCSI_MAX_CMD_LEN 32
#ifdef CONFIG_LOOPBACK_TARGET_CDB_DEBUG
# define TL_CDB_DEBUG(x...) printk(KERN_INFO x)
#else
# define TL_CDB_DEBUG(x...)
#endif
struct tcm_loop_cmd { struct tcm_loop_cmd {
/* State of Linux/SCSI CDB+Data descriptor */ /* State of Linux/SCSI CDB+Data descriptor */
u32 sc_cmd_state; u32 sc_cmd_state;
......
This diff is collapsed.
...@@ -73,7 +73,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd) ...@@ -73,7 +73,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
* payload going back for EVPD=0 * payload going back for EVPD=0
*/ */
if (cmd->data_length < 6) { if (cmd->data_length < 6) {
printk(KERN_ERR "SCSI Inquiry payload length: %u" pr_err("SCSI Inquiry payload length: %u"
" too small for EVPD=0\n", cmd->data_length); " too small for EVPD=0\n", cmd->data_length);
return -EINVAL; return -EINVAL;
} }
...@@ -327,7 +327,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) ...@@ -327,7 +327,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
if (!(tg_pt_gp)) { if (!tg_pt_gp) {
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
goto check_lu_gp; goto check_lu_gp;
} }
...@@ -358,12 +358,12 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) ...@@ -358,12 +358,12 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
goto check_scsi_name; goto check_scsi_name;
} }
lu_gp_mem = dev->dev_alua_lu_gp_mem; lu_gp_mem = dev->dev_alua_lu_gp_mem;
if (!(lu_gp_mem)) if (!lu_gp_mem)
goto check_scsi_name; goto check_scsi_name;
spin_lock(&lu_gp_mem->lu_gp_mem_lock); spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp; lu_gp = lu_gp_mem->lu_gp;
if (!(lu_gp)) { if (!lu_gp) {
spin_unlock(&lu_gp_mem->lu_gp_mem_lock); spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
goto check_scsi_name; goto check_scsi_name;
} }
...@@ -475,14 +475,14 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) ...@@ -475,14 +475,14 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
have_tp = 1; have_tp = 1;
if (cmd->data_length < (0x10 + 4)) { if (cmd->data_length < (0x10 + 4)) {
printk(KERN_INFO "Received data_length: %u" pr_debug("Received data_length: %u"
" too small for EVPD 0xb0\n", " too small for EVPD 0xb0\n",
cmd->data_length); cmd->data_length);
return -EINVAL; return -EINVAL;
} }
if (have_tp && cmd->data_length < (0x3c + 4)) { if (have_tp && cmd->data_length < (0x3c + 4)) {
printk(KERN_INFO "Received data_length: %u" pr_debug("Received data_length: %u"
" too small for TPE=1 EVPD 0xb0\n", " too small for TPE=1 EVPD 0xb0\n",
cmd->data_length); cmd->data_length);
have_tp = 0; have_tp = 0;
...@@ -491,6 +491,9 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) ...@@ -491,6 +491,9 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
buf[0] = dev->transport->get_device_type(dev); buf[0] = dev->transport->get_device_type(dev);
buf[3] = have_tp ? 0x3c : 0x10; buf[3] = have_tp ? 0x3c : 0x10;
/* Set WSNZ to 1 */
buf[4] = 0x01;
/* /*
* Set OPTIMAL TRANSFER LENGTH GRANULARITY * Set OPTIMAL TRANSFER LENGTH GRANULARITY
*/ */
...@@ -667,7 +670,7 @@ target_emulate_inquiry(struct se_cmd *cmd) ...@@ -667,7 +670,7 @@ target_emulate_inquiry(struct se_cmd *cmd)
* payload length left for the next outgoing EVPD metadata * payload length left for the next outgoing EVPD metadata
*/ */
if (cmd->data_length < 4) { if (cmd->data_length < 4) {
printk(KERN_ERR "SCSI Inquiry payload length: %u" pr_err("SCSI Inquiry payload length: %u"
" too small for EVPD=1\n", cmd->data_length); " too small for EVPD=1\n", cmd->data_length);
return -EINVAL; return -EINVAL;
} }
...@@ -685,7 +688,7 @@ target_emulate_inquiry(struct se_cmd *cmd) ...@@ -685,7 +688,7 @@ target_emulate_inquiry(struct se_cmd *cmd)
} }
transport_kunmap_first_data_page(cmd); transport_kunmap_first_data_page(cmd);
printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]); pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
return -EINVAL; return -EINVAL;
} }
...@@ -891,7 +894,7 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) ...@@ -891,7 +894,7 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
length += target_modesense_control(dev, &buf[offset+length]); length += target_modesense_control(dev, &buf[offset+length]);
break; break;
default: default:
printk(KERN_ERR "Got Unknown Mode Page: 0x%02x\n", pr_err("Got Unknown Mode Page: 0x%02x\n",
cdb[2] & 0x3f); cdb[2] & 0x3f);
return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
} }
...@@ -947,14 +950,14 @@ target_emulate_request_sense(struct se_cmd *cmd) ...@@ -947,14 +950,14 @@ target_emulate_request_sense(struct se_cmd *cmd)
int err = 0; int err = 0;
if (cdb[1] & 0x01) { if (cdb[1] & 0x01) {
printk(KERN_ERR "REQUEST_SENSE description emulation not" pr_err("REQUEST_SENSE description emulation not"
" supported\n"); " supported\n");
return PYX_TRANSPORT_INVALID_CDB_FIELD; return PYX_TRANSPORT_INVALID_CDB_FIELD;
} }
buf = transport_kmap_first_data_page(cmd); buf = transport_kmap_first_data_page(cmd);
if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) { if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
/* /*
* CURRENT ERROR, UNIT ATTENTION * CURRENT ERROR, UNIT ATTENTION
*/ */
...@@ -1028,18 +1031,18 @@ target_emulate_unmap(struct se_task *task) ...@@ -1028,18 +1031,18 @@ target_emulate_unmap(struct se_task *task)
buf = transport_kmap_first_data_page(cmd); buf = transport_kmap_first_data_page(cmd);
ptr = &buf[offset]; ptr = &buf[offset];
printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu" pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
while (size) { while (size) {
lba = get_unaligned_be64(&ptr[0]); lba = get_unaligned_be64(&ptr[0]);
range = get_unaligned_be32(&ptr[8]); range = get_unaligned_be32(&ptr[8]);
printk(KERN_INFO "UNMAP: Using lba: %llu and range: %u\n", pr_debug("UNMAP: Using lba: %llu and range: %u\n",
(unsigned long long)lba, range); (unsigned long long)lba, range);
ret = dev->transport->do_discard(dev, lba, range); ret = dev->transport->do_discard(dev, lba, range);
if (ret < 0) { if (ret < 0) {
printk(KERN_ERR "blkdev_issue_discard() failed: %d\n", pr_err("blkdev_issue_discard() failed: %d\n",
ret); ret);
goto err; goto err;
} }
...@@ -1084,12 +1087,12 @@ target_emulate_write_same(struct se_task *task, int write_same32) ...@@ -1084,12 +1087,12 @@ target_emulate_write_same(struct se_task *task, int write_same32)
else else
range = (dev->transport->get_blocks(dev) - lba); range = (dev->transport->get_blocks(dev) - lba);
printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %llu\n", pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
(unsigned long long)lba, (unsigned long long)range); (unsigned long long)lba, (unsigned long long)range);
ret = dev->transport->do_discard(dev, lba, range); ret = dev->transport->do_discard(dev, lba, range);
if (ret < 0) { if (ret < 0) {
printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n"); pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
return ret; return ret;
} }
...@@ -1125,7 +1128,7 @@ transport_emulate_control_cdb(struct se_task *task) ...@@ -1125,7 +1128,7 @@ transport_emulate_control_cdb(struct se_task *task)
ret = target_emulate_readcapacity_16(cmd); ret = target_emulate_readcapacity_16(cmd);
break; break;
default: default:
printk(KERN_ERR "Unsupported SA: 0x%02x\n", pr_err("Unsupported SA: 0x%02x\n",
cmd->t_task_cdb[1] & 0x1f); cmd->t_task_cdb[1] & 0x1f);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
} }
...@@ -1135,7 +1138,7 @@ transport_emulate_control_cdb(struct se_task *task) ...@@ -1135,7 +1138,7 @@ transport_emulate_control_cdb(struct se_task *task)
break; break;
case UNMAP: case UNMAP:
if (!dev->transport->do_discard) { if (!dev->transport->do_discard) {
printk(KERN_ERR "UNMAP emulation not supported for: %s\n", pr_err("UNMAP emulation not supported for: %s\n",
dev->transport->name); dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
} }
...@@ -1143,7 +1146,7 @@ transport_emulate_control_cdb(struct se_task *task) ...@@ -1143,7 +1146,7 @@ transport_emulate_control_cdb(struct se_task *task)
break; break;
case WRITE_SAME_16: case WRITE_SAME_16:
if (!dev->transport->do_discard) { if (!dev->transport->do_discard) {
printk(KERN_ERR "WRITE_SAME_16 emulation not supported" pr_err("WRITE_SAME_16 emulation not supported"
" for: %s\n", dev->transport->name); " for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
} }
...@@ -1155,7 +1158,7 @@ transport_emulate_control_cdb(struct se_task *task) ...@@ -1155,7 +1158,7 @@ transport_emulate_control_cdb(struct se_task *task)
switch (service_action) { switch (service_action) {
case WRITE_SAME_32: case WRITE_SAME_32:
if (!dev->transport->do_discard) { if (!dev->transport->do_discard) {
printk(KERN_ERR "WRITE_SAME_32 SA emulation not" pr_err("WRITE_SAME_32 SA emulation not"
" supported for: %s\n", " supported for: %s\n",
dev->transport->name); dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
...@@ -1163,7 +1166,7 @@ transport_emulate_control_cdb(struct se_task *task) ...@@ -1163,7 +1166,7 @@ transport_emulate_control_cdb(struct se_task *task)
ret = target_emulate_write_same(task, 1); ret = target_emulate_write_same(task, 1);
break; break;
default: default:
printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:" pr_err("Unsupported VARIABLE_LENGTH_CMD SA:"
" 0x%02x\n", service_action); " 0x%02x\n", service_action);
break; break;
} }
...@@ -1171,8 +1174,7 @@ transport_emulate_control_cdb(struct se_task *task) ...@@ -1171,8 +1174,7 @@ transport_emulate_control_cdb(struct se_task *task)
case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE:
case 0x91: /* SYNCHRONIZE_CACHE_16: */ case 0x91: /* SYNCHRONIZE_CACHE_16: */
if (!dev->transport->do_sync_cache) { if (!dev->transport->do_sync_cache) {
printk(KERN_ERR pr_err("SYNCHRONIZE_CACHE emulation not supported"
"SYNCHRONIZE_CACHE emulation not supported"
" for: %s\n", dev->transport->name); " for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
} }
...@@ -1189,7 +1191,7 @@ transport_emulate_control_cdb(struct se_task *task) ...@@ -1189,7 +1191,7 @@ transport_emulate_control_cdb(struct se_task *task)
case WRITE_FILEMARKS: case WRITE_FILEMARKS:
break; break;
default: default:
printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n", pr_err("Unsupported SCSI Opcode: 0x%02x for %s\n",
cmd->t_task_cdb[0], dev->transport->name); cmd->t_task_cdb[0], dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -172,7 +172,7 @@ u32 fc_get_pr_transport_id( ...@@ -172,7 +172,7 @@ u32 fc_get_pr_transport_id(
ptr = &se_nacl->initiatorname[0]; ptr = &se_nacl->initiatorname[0];
for (i = 0; i < 24; ) { for (i = 0; i < 24; ) {
if (!(strncmp(&ptr[i], ":", 1))) { if (!strncmp(&ptr[i], ":", 1)) {
i++; i++;
continue; continue;
} }
...@@ -386,7 +386,7 @@ char *iscsi_parse_pr_out_transport_id( ...@@ -386,7 +386,7 @@ char *iscsi_parse_pr_out_transport_id(
* Reserved * Reserved
*/ */
if ((format_code != 0x00) && (format_code != 0x40)) { if ((format_code != 0x00) && (format_code != 0x40)) {
printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI" pr_err("Illegal format code: 0x%02x for iSCSI"
" Initiator Transport ID\n", format_code); " Initiator Transport ID\n", format_code);
return NULL; return NULL;
} }
...@@ -406,7 +406,7 @@ char *iscsi_parse_pr_out_transport_id( ...@@ -406,7 +406,7 @@ char *iscsi_parse_pr_out_transport_id(
tid_len += padding; tid_len += padding;
if ((add_len + 4) != tid_len) { if ((add_len + 4) != tid_len) {
printk(KERN_INFO "LIO-Target Extracted add_len: %hu " pr_debug("LIO-Target Extracted add_len: %hu "
"does not match calculated tid_len: %u," "does not match calculated tid_len: %u,"
" using tid_len instead\n", add_len+4, tid_len); " using tid_len instead\n", add_len+4, tid_len);
*out_tid_len = tid_len; *out_tid_len = tid_len;
...@@ -420,8 +420,8 @@ char *iscsi_parse_pr_out_transport_id( ...@@ -420,8 +420,8 @@ char *iscsi_parse_pr_out_transport_id(
*/ */
if (format_code == 0x40) { if (format_code == 0x40) {
p = strstr((char *)&buf[4], ",i,0x"); p = strstr((char *)&buf[4], ",i,0x");
if (!(p)) { if (!p) {
printk(KERN_ERR "Unable to locate \",i,0x\" seperator" pr_err("Unable to locate \",i,0x\" seperator"
" for Initiator port identifier: %s\n", " for Initiator port identifier: %s\n",
(char *)&buf[4]); (char *)&buf[4]);
return NULL; return NULL;
......
...@@ -42,18 +42,6 @@ ...@@ -42,18 +42,6 @@
#include "target_core_file.h" #include "target_core_file.h"
#if 1
#define DEBUG_FD_CACHE(x...) printk(x)
#else
#define DEBUG_FD_CACHE(x...)
#endif
#if 1
#define DEBUG_FD_FUA(x...) printk(x)
#else
#define DEBUG_FD_FUA(x...)
#endif
static struct se_subsystem_api fileio_template; static struct se_subsystem_api fileio_template;
/* fd_attach_hba(): (Part of se_subsystem_api_t template) /* fd_attach_hba(): (Part of se_subsystem_api_t template)
...@@ -65,8 +53,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id) ...@@ -65,8 +53,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
struct fd_host *fd_host; struct fd_host *fd_host;
fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
if (!(fd_host)) { if (!fd_host) {
printk(KERN_ERR "Unable to allocate memory for struct fd_host\n"); pr_err("Unable to allocate memory for struct fd_host\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -74,10 +62,10 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id) ...@@ -74,10 +62,10 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
hba->hba_ptr = fd_host; hba->hba_ptr = fd_host;
printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
" Target Core Stack %s\n", hba->hba_id, FD_VERSION, " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
TARGET_CORE_MOD_VERSION); TARGET_CORE_MOD_VERSION);
printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
" MaxSectors: %u\n", " MaxSectors: %u\n",
hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS); hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
...@@ -88,7 +76,7 @@ static void fd_detach_hba(struct se_hba *hba) ...@@ -88,7 +76,7 @@ static void fd_detach_hba(struct se_hba *hba)
{ {
struct fd_host *fd_host = hba->hba_ptr; struct fd_host *fd_host = hba->hba_ptr;
printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic" pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
" Target Core\n", hba->hba_id, fd_host->fd_host_id); " Target Core\n", hba->hba_id, fd_host->fd_host_id);
kfree(fd_host); kfree(fd_host);
...@@ -101,14 +89,14 @@ static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name) ...@@ -101,14 +89,14 @@ static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr; struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL); fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
if (!(fd_dev)) { if (!fd_dev) {
printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n"); pr_err("Unable to allocate memory for struct fd_dev\n");
return NULL; return NULL;
} }
fd_dev->fd_host = fd_host; fd_dev->fd_host = fd_host;
printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name); pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
return fd_dev; return fd_dev;
} }
...@@ -141,7 +129,7 @@ static struct se_device *fd_create_virtdevice( ...@@ -141,7 +129,7 @@ static struct se_device *fd_create_virtdevice(
set_fs(old_fs); set_fs(old_fs);
if (IS_ERR(dev_p)) { if (IS_ERR(dev_p)) {
printk(KERN_ERR "getname(%s) failed: %lu\n", pr_err("getname(%s) failed: %lu\n",
fd_dev->fd_dev_name, IS_ERR(dev_p)); fd_dev->fd_dev_name, IS_ERR(dev_p));
ret = PTR_ERR(dev_p); ret = PTR_ERR(dev_p);
goto fail; goto fail;
...@@ -164,12 +152,12 @@ static struct se_device *fd_create_virtdevice( ...@@ -164,12 +152,12 @@ static struct se_device *fd_create_virtdevice(
file = filp_open(dev_p, flags, 0600); file = filp_open(dev_p, flags, 0600);
if (IS_ERR(file)) { if (IS_ERR(file)) {
printk(KERN_ERR "filp_open(%s) failed\n", dev_p); pr_err("filp_open(%s) failed\n", dev_p);
ret = PTR_ERR(file); ret = PTR_ERR(file);
goto fail; goto fail;
} }
if (!file || !file->f_dentry) { if (!file || !file->f_dentry) {
printk(KERN_ERR "filp_open(%s) failed\n", dev_p); pr_err("filp_open(%s) failed\n", dev_p);
goto fail; goto fail;
} }
fd_dev->fd_file = file; fd_dev->fd_file = file;
...@@ -199,14 +187,14 @@ static struct se_device *fd_create_virtdevice( ...@@ -199,14 +187,14 @@ static struct se_device *fd_create_virtdevice(
fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) - fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
fd_dev->fd_block_size); fd_dev->fd_block_size);
printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct" pr_debug("FILEIO: Using size: %llu bytes from struct"
" block_device blocks: %llu logical_block_size: %d\n", " block_device blocks: %llu logical_block_size: %d\n",
fd_dev->fd_dev_size, fd_dev->fd_dev_size,
div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size), div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size); fd_dev->fd_block_size);
} else { } else {
if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
printk(KERN_ERR "FILEIO: Missing fd_dev_size=" pr_err("FILEIO: Missing fd_dev_size="
" parameter, and no backing struct" " parameter, and no backing struct"
" block_device\n"); " block_device\n");
goto fail; goto fail;
...@@ -225,13 +213,13 @@ static struct se_device *fd_create_virtdevice( ...@@ -225,13 +213,13 @@ static struct se_device *fd_create_virtdevice(
dev = transport_add_device_to_core_hba(hba, &fileio_template, dev = transport_add_device_to_core_hba(hba, &fileio_template,
se_dev, dev_flags, fd_dev, se_dev, dev_flags, fd_dev,
&dev_limits, "FILEIO", FD_VERSION); &dev_limits, "FILEIO", FD_VERSION);
if (!(dev)) if (!dev)
goto fail; goto fail;
fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
fd_dev->fd_queue_depth = dev->queue_depth; fd_dev->fd_queue_depth = dev->queue_depth;
printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
fd_dev->fd_dev_name, fd_dev->fd_dev_size); fd_dev->fd_dev_name, fd_dev->fd_dev_size);
...@@ -269,25 +257,24 @@ static inline struct fd_request *FILE_REQ(struct se_task *task) ...@@ -269,25 +257,24 @@ static inline struct fd_request *FILE_REQ(struct se_task *task)
static struct se_task * static struct se_task *
fd_alloc_task(struct se_cmd *cmd) fd_alloc_task(unsigned char *cdb)
{ {
struct fd_request *fd_req; struct fd_request *fd_req;
fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL); fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
if (!(fd_req)) { if (!fd_req) {
printk(KERN_ERR "Unable to allocate struct fd_request\n"); pr_err("Unable to allocate struct fd_request\n");
return NULL; return NULL;
} }
fd_req->fd_dev = cmd->se_dev->dev_ptr;
return &fd_req->fd_task; return &fd_req->fd_task;
} }
static int fd_do_readv(struct se_task *task) static int fd_do_readv(struct se_task *task)
{ {
struct fd_request *req = FILE_REQ(task); struct fd_request *req = FILE_REQ(task);
struct file *fd = req->fd_dev->fd_file; struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
struct file *fd = dev->fd_file;
struct scatterlist *sg = task->task_sg; struct scatterlist *sg = task->task_sg;
struct iovec *iov; struct iovec *iov;
mm_segment_t old_fs; mm_segment_t old_fs;
...@@ -295,20 +282,20 @@ static int fd_do_readv(struct se_task *task) ...@@ -295,20 +282,20 @@ static int fd_do_readv(struct se_task *task)
task->se_dev->se_sub_dev->se_dev_attrib.block_size); task->se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret = 0, i; int ret = 0, i;
iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
if (!(iov)) { if (!iov) {
printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n"); pr_err("Unable to allocate fd_do_readv iov[]\n");
return -ENOMEM; return -ENOMEM;
} }
for (i = 0; i < task->task_sg_num; i++) { for (i = 0; i < task->task_sg_nents; i++) {
iov[i].iov_len = sg[i].length; iov[i].iov_len = sg[i].length;
iov[i].iov_base = sg_virt(&sg[i]); iov[i].iov_base = sg_virt(&sg[i]);
} }
old_fs = get_fs(); old_fs = get_fs();
set_fs(get_ds()); set_fs(get_ds());
ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos); ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos);
set_fs(old_fs); set_fs(old_fs);
kfree(iov); kfree(iov);
...@@ -319,14 +306,14 @@ static int fd_do_readv(struct se_task *task) ...@@ -319,14 +306,14 @@ static int fd_do_readv(struct se_task *task)
*/ */
if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) { if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
if (ret < 0 || ret != task->task_size) { if (ret < 0 || ret != task->task_size) {
printk(KERN_ERR "vfs_readv() returned %d," pr_err("vfs_readv() returned %d,"
" expecting %d for S_ISBLK\n", ret, " expecting %d for S_ISBLK\n", ret,
(int)task->task_size); (int)task->task_size);
return (ret < 0 ? ret : -EINVAL); return (ret < 0 ? ret : -EINVAL);
} }
} else { } else {
if (ret < 0) { if (ret < 0) {
printk(KERN_ERR "vfs_readv() returned %d for non" pr_err("vfs_readv() returned %d for non"
" S_ISBLK\n", ret); " S_ISBLK\n", ret);
return ret; return ret;
} }
...@@ -338,7 +325,8 @@ static int fd_do_readv(struct se_task *task) ...@@ -338,7 +325,8 @@ static int fd_do_readv(struct se_task *task)
static int fd_do_writev(struct se_task *task) static int fd_do_writev(struct se_task *task)
{ {
struct fd_request *req = FILE_REQ(task); struct fd_request *req = FILE_REQ(task);
struct file *fd = req->fd_dev->fd_file; struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
struct file *fd = dev->fd_file;
struct scatterlist *sg = task->task_sg; struct scatterlist *sg = task->task_sg;
struct iovec *iov; struct iovec *iov;
mm_segment_t old_fs; mm_segment_t old_fs;
...@@ -346,26 +334,26 @@ static int fd_do_writev(struct se_task *task) ...@@ -346,26 +334,26 @@ static int fd_do_writev(struct se_task *task)
task->se_dev->se_sub_dev->se_dev_attrib.block_size); task->se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret, i = 0; int ret, i = 0;
iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
if (!(iov)) { if (!iov) {
printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n"); pr_err("Unable to allocate fd_do_writev iov[]\n");
return -ENOMEM; return -ENOMEM;
} }
for (i = 0; i < task->task_sg_num; i++) { for (i = 0; i < task->task_sg_nents; i++) {
iov[i].iov_len = sg[i].length; iov[i].iov_len = sg[i].length;
iov[i].iov_base = sg_virt(&sg[i]); iov[i].iov_base = sg_virt(&sg[i]);
} }
old_fs = get_fs(); old_fs = get_fs();
set_fs(get_ds()); set_fs(get_ds());
ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos); ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos);
set_fs(old_fs); set_fs(old_fs);
kfree(iov); kfree(iov);
if (ret < 0 || ret != task->task_size) { if (ret < 0 || ret != task->task_size) {
printk(KERN_ERR "vfs_writev() returned %d\n", ret); pr_err("vfs_writev() returned %d\n", ret);
return (ret < 0 ? ret : -EINVAL); return (ret < 0 ? ret : -EINVAL);
} }
...@@ -404,7 +392,7 @@ static void fd_emulate_sync_cache(struct se_task *task) ...@@ -404,7 +392,7 @@ static void fd_emulate_sync_cache(struct se_task *task)
ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
if (ret != 0) if (ret != 0)
printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret); pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
if (!immed) if (!immed)
transport_complete_sync_cache(cmd, ret == 0); transport_complete_sync_cache(cmd, ret == 0);
...@@ -449,12 +437,12 @@ static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task) ...@@ -449,12 +437,12 @@ static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
loff_t end = start + task->task_size; loff_t end = start + task->task_size;
int ret; int ret;
DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
task->task_lba, task->task_size); task->task_lba, task->task_size);
ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
if (ret != 0) if (ret != 0)
printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret); pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
} }
static int fd_do_task(struct se_task *task) static int fd_do_task(struct se_task *task)
...@@ -548,7 +536,7 @@ static ssize_t fd_set_configfs_dev_params( ...@@ -548,7 +536,7 @@ static ssize_t fd_set_configfs_dev_params(
snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
"%s", arg_p); "%s", arg_p);
kfree(arg_p); kfree(arg_p);
printk(KERN_INFO "FILEIO: Referencing Path: %s\n", pr_debug("FILEIO: Referencing Path: %s\n",
fd_dev->fd_dev_name); fd_dev->fd_dev_name);
fd_dev->fbd_flags |= FBDF_HAS_PATH; fd_dev->fbd_flags |= FBDF_HAS_PATH;
break; break;
...@@ -561,23 +549,23 @@ static ssize_t fd_set_configfs_dev_params( ...@@ -561,23 +549,23 @@ static ssize_t fd_set_configfs_dev_params(
ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
kfree(arg_p); kfree(arg_p);
if (ret < 0) { if (ret < 0) {
printk(KERN_ERR "strict_strtoull() failed for" pr_err("strict_strtoull() failed for"
" fd_dev_size=\n"); " fd_dev_size=\n");
goto out; goto out;
} }
printk(KERN_INFO "FILEIO: Referencing Size: %llu" pr_debug("FILEIO: Referencing Size: %llu"
" bytes\n", fd_dev->fd_dev_size); " bytes\n", fd_dev->fd_dev_size);
fd_dev->fbd_flags |= FBDF_HAS_SIZE; fd_dev->fbd_flags |= FBDF_HAS_SIZE;
break; break;
case Opt_fd_buffered_io: case Opt_fd_buffered_io:
match_int(args, &arg); match_int(args, &arg);
if (arg != 1) { if (arg != 1) {
printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg); pr_err("bogus fd_buffered_io=%d value\n", arg);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
printk(KERN_INFO "FILEIO: Using buffered I/O" pr_debug("FILEIO: Using buffered I/O"
" operations for struct fd_dev\n"); " operations for struct fd_dev\n");
fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO; fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
...@@ -597,7 +585,7 @@ static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys ...@@ -597,7 +585,7 @@ static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys
struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr; struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
printk(KERN_ERR "Missing fd_dev_name=\n"); pr_err("Missing fd_dev_name=\n");
return -EINVAL; return -EINVAL;
} }
......
...@@ -16,8 +16,6 @@ struct fd_request { ...@@ -16,8 +16,6 @@ struct fd_request {
struct se_task fd_task; struct se_task fd_task;
/* SCSI CDB from iSCSI Command PDU */ /* SCSI CDB from iSCSI Command PDU */
unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE]; unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
/* FILEIO device */
struct fd_dev *fd_dev;
} ____cacheline_aligned; } ____cacheline_aligned;
#define FBDF_HAS_PATH 0x01 #define FBDF_HAS_PATH 0x01
......
...@@ -58,8 +58,8 @@ int transport_subsystem_register(struct se_subsystem_api *sub_api) ...@@ -58,8 +58,8 @@ int transport_subsystem_register(struct se_subsystem_api *sub_api)
mutex_lock(&subsystem_mutex); mutex_lock(&subsystem_mutex);
list_for_each_entry(s, &subsystem_list, sub_api_list) { list_for_each_entry(s, &subsystem_list, sub_api_list) {
if (!(strcmp(s->name, sub_api->name))) { if (!strcmp(s->name, sub_api->name)) {
printk(KERN_ERR "%p is already registered with" pr_err("%p is already registered with"
" duplicate name %s, unable to process" " duplicate name %s, unable to process"
" request\n", s, s->name); " request\n", s, s->name);
mutex_unlock(&subsystem_mutex); mutex_unlock(&subsystem_mutex);
...@@ -69,7 +69,7 @@ int transport_subsystem_register(struct se_subsystem_api *sub_api) ...@@ -69,7 +69,7 @@ int transport_subsystem_register(struct se_subsystem_api *sub_api)
list_add_tail(&sub_api->sub_api_list, &subsystem_list); list_add_tail(&sub_api->sub_api_list, &subsystem_list);
mutex_unlock(&subsystem_mutex); mutex_unlock(&subsystem_mutex);
printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:" pr_debug("TCM: Registered subsystem plugin: %s struct module:"
" %p\n", sub_api->name, sub_api->owner); " %p\n", sub_api->name, sub_api->owner);
return 0; return 0;
} }
...@@ -109,7 +109,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) ...@@ -109,7 +109,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
hba = kzalloc(sizeof(*hba), GFP_KERNEL); hba = kzalloc(sizeof(*hba), GFP_KERNEL);
if (!hba) { if (!hba) {
printk(KERN_ERR "Unable to allocate struct se_hba\n"); pr_err("Unable to allocate struct se_hba\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -135,7 +135,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) ...@@ -135,7 +135,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
list_add_tail(&hba->hba_node, &hba_list); list_add_tail(&hba->hba_node, &hba_list);
spin_unlock(&hba_lock); spin_unlock(&hba_lock);
printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target" pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target"
" Core\n", hba->hba_id); " Core\n", hba->hba_id);
return hba; return hba;
...@@ -161,7 +161,7 @@ core_delete_hba(struct se_hba *hba) ...@@ -161,7 +161,7 @@ core_delete_hba(struct se_hba *hba)
list_del(&hba->hba_node); list_del(&hba->hba_node);
spin_unlock(&hba_lock); spin_unlock(&hba_lock);
printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target" pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
" Core\n", hba->hba_id); " Core\n", hba->hba_id);
if (hba->transport->owner) if (hba->transport->owner)
......
This diff is collapsed.
...@@ -12,7 +12,6 @@ struct iblock_req { ...@@ -12,7 +12,6 @@ struct iblock_req {
atomic_t ib_bio_cnt; atomic_t ib_bio_cnt;
atomic_t ib_bio_err_cnt; atomic_t ib_bio_err_cnt;
struct bio *ib_bio; struct bio *ib_bio;
struct iblock_dev *ib_dev;
} ____cacheline_aligned; } ____cacheline_aligned;
#define IBDF_HAS_UDEV_PATH 0x01 #define IBDF_HAS_UDEV_PATH 0x01
......
This diff is collapsed.
This diff is collapsed.
...@@ -23,13 +23,12 @@ ...@@ -23,13 +23,12 @@
struct pscsi_plugin_task { struct pscsi_plugin_task {
struct se_task pscsi_task; struct se_task pscsi_task;
unsigned char *pscsi_cdb;
unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE];
unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE]; unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
int pscsi_direction; int pscsi_direction;
int pscsi_result; int pscsi_result;
u32 pscsi_resid; u32 pscsi_resid;
struct request *pscsi_req; struct request *pscsi_req;
unsigned char pscsi_cdb[0];
} ____cacheline_aligned; } ____cacheline_aligned;
#define PDF_HAS_CHANNEL_ID 0x01 #define PDF_HAS_CHANNEL_ID 0x01
......
This diff is collapsed.
...@@ -32,8 +32,6 @@ struct rd_request { ...@@ -32,8 +32,6 @@ struct rd_request {
u32 rd_page_count; u32 rd_page_count;
/* Scatterlist count */ /* Scatterlist count */
u32 rd_size; u32 rd_size;
/* Ramdisk device */
struct rd_dev *rd_dev;
} ____cacheline_aligned; } ____cacheline_aligned;
struct rd_dev_sg_table { struct rd_dev_sg_table {
......
...@@ -41,13 +41,6 @@ ...@@ -41,13 +41,6 @@
#include "target_core_alua.h" #include "target_core_alua.h"
#include "target_core_pr.h" #include "target_core_pr.h"
#define DEBUG_LUN_RESET
#ifdef DEBUG_LUN_RESET
#define DEBUG_LR(x...) printk(KERN_INFO x)
#else
#define DEBUG_LR(x...)
#endif
struct se_tmr_req *core_tmr_alloc_req( struct se_tmr_req *core_tmr_alloc_req(
struct se_cmd *se_cmd, struct se_cmd *se_cmd,
void *fabric_tmr_ptr, void *fabric_tmr_ptr,
...@@ -57,8 +50,8 @@ struct se_tmr_req *core_tmr_alloc_req( ...@@ -57,8 +50,8 @@ struct se_tmr_req *core_tmr_alloc_req(
tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ? tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ?
GFP_ATOMIC : GFP_KERNEL); GFP_ATOMIC : GFP_KERNEL);
if (!(tmr)) { if (!tmr) {
printk(KERN_ERR "Unable to allocate struct se_tmr_req\n"); pr_err("Unable to allocate struct se_tmr_req\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
tmr->task_cmd = se_cmd; tmr->task_cmd = se_cmd;
...@@ -93,14 +86,14 @@ static void core_tmr_handle_tas_abort( ...@@ -93,14 +86,14 @@ static void core_tmr_handle_tas_abort(
int tas, int tas,
int fe_count) int fe_count)
{ {
if (!(fe_count)) { if (!fe_count) {
transport_cmd_finish_abort(cmd, 1); transport_cmd_finish_abort(cmd, 1);
return; return;
} }
/* /*
* TASK ABORTED status (TAS) bit support * TASK ABORTED status (TAS) bit support
*/ */
if (((tmr_nacl != NULL) && if ((tmr_nacl &&
(tmr_nacl == cmd->se_sess->se_node_acl)) || tas) (tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
transport_send_task_abort(cmd); transport_send_task_abort(cmd);
...@@ -141,13 +134,13 @@ int core_tmr_lun_reset( ...@@ -141,13 +134,13 @@ int core_tmr_lun_reset(
tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
tmr_tpg = tmr->task_cmd->se_sess->se_tpg; tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
if (tmr_nacl && tmr_tpg) { if (tmr_nacl && tmr_tpg) {
DEBUG_LR("LUN_RESET: TMR caller fabric: %s" pr_debug("LUN_RESET: TMR caller fabric: %s"
" initiator port %s\n", " initiator port %s\n",
tmr_tpg->se_tpg_tfo->get_fabric_name(), tmr_tpg->se_tpg_tfo->get_fabric_name(),
tmr_nacl->initiatorname); tmr_nacl->initiatorname);
} }
} }
DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n", pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
(preempt_and_abort_list) ? "Preempt" : "TMR", (preempt_and_abort_list) ? "Preempt" : "TMR",
dev->transport->name, tas); dev->transport->name, tas);
/* /*
...@@ -163,8 +156,8 @@ int core_tmr_lun_reset( ...@@ -163,8 +156,8 @@ int core_tmr_lun_reset(
continue; continue;
cmd = tmr_p->task_cmd; cmd = tmr_p->task_cmd;
if (!(cmd)) { if (!cmd) {
printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n"); pr_err("Unable to locate struct se_cmd for TMR\n");
continue; continue;
} }
/* /*
...@@ -172,14 +165,14 @@ int core_tmr_lun_reset( ...@@ -172,14 +165,14 @@ int core_tmr_lun_reset(
* parameter (eg: for PROUT PREEMPT_AND_ABORT service action * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
* skip non regisration key matching TMRs. * skip non regisration key matching TMRs.
*/ */
if ((preempt_and_abort_list != NULL) && if (preempt_and_abort_list &&
(core_scsi3_check_cdb_abort_and_preempt( (core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0)) preempt_and_abort_list, cmd) != 0))
continue; continue;
spin_unlock_irq(&dev->se_tmr_lock); spin_unlock_irq(&dev->se_tmr_lock);
spin_lock_irqsave(&cmd->t_state_lock, flags); spin_lock_irqsave(&cmd->t_state_lock, flags);
if (!(atomic_read(&cmd->t_transport_active))) { if (!atomic_read(&cmd->t_transport_active)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
spin_lock_irq(&dev->se_tmr_lock); spin_lock_irq(&dev->se_tmr_lock);
continue; continue;
...@@ -189,7 +182,7 @@ int core_tmr_lun_reset( ...@@ -189,7 +182,7 @@ int core_tmr_lun_reset(
spin_lock_irq(&dev->se_tmr_lock); spin_lock_irq(&dev->se_tmr_lock);
continue; continue;
} }
DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x," pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
" Response: 0x%02x, t_state: %d\n", " Response: 0x%02x, t_state: %d\n",
(preempt_and_abort_list) ? "Preempt" : "", tmr_p, (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
tmr_p->function, tmr_p->response, cmd->t_state); tmr_p->function, tmr_p->response, cmd->t_state);
...@@ -224,7 +217,7 @@ int core_tmr_lun_reset( ...@@ -224,7 +217,7 @@ int core_tmr_lun_reset(
list_for_each_entry_safe(task, task_tmp, &dev->state_task_list, list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
t_state_list) { t_state_list) {
if (!task->task_se_cmd) { if (!task->task_se_cmd) {
printk(KERN_ERR "task->task_se_cmd is NULL!\n"); pr_err("task->task_se_cmd is NULL!\n");
continue; continue;
} }
cmd = task->task_se_cmd; cmd = task->task_se_cmd;
...@@ -233,7 +226,7 @@ int core_tmr_lun_reset( ...@@ -233,7 +226,7 @@ int core_tmr_lun_reset(
* For PREEMPT_AND_ABORT usage, only process commands * For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key. * with a matching reservation key.
*/ */
if ((preempt_and_abort_list != NULL) && if (preempt_and_abort_list &&
(core_scsi3_check_cdb_abort_and_preempt( (core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0)) preempt_and_abort_list, cmd) != 0))
continue; continue;
...@@ -248,14 +241,14 @@ int core_tmr_lun_reset( ...@@ -248,14 +241,14 @@ int core_tmr_lun_reset(
spin_unlock_irqrestore(&dev->execute_task_lock, flags); spin_unlock_irqrestore(&dev->execute_task_lock, flags);
spin_lock_irqsave(&cmd->t_state_lock, flags); spin_lock_irqsave(&cmd->t_state_lock, flags);
DEBUG_LR("LUN_RESET: %s cmd: %p task: %p" pr_debug("LUN_RESET: %s cmd: %p task: %p"
" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
"def_t_state: %d/%d cdb: 0x%02x\n", "def_t_state: %d/%d cdb: 0x%02x\n",
(preempt_and_abort_list) ? "Preempt" : "", cmd, task, (preempt_and_abort_list) ? "Preempt" : "", cmd, task,
cmd->se_tfo->get_task_tag(cmd), 0, cmd->se_tfo->get_task_tag(cmd), 0,
cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
cmd->deferred_t_state, cmd->t_task_cdb[0]); cmd->deferred_t_state, cmd->t_task_cdb[0]);
DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
" t_task_cdbs: %d t_task_cdbs_left: %d" " t_task_cdbs: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d -- t_transport_active: %d" " t_task_cdbs_sent: %d -- t_transport_active: %d"
" t_transport_stop: %d t_transport_sent: %d\n", " t_transport_stop: %d t_transport_sent: %d\n",
...@@ -272,10 +265,10 @@ int core_tmr_lun_reset( ...@@ -272,10 +265,10 @@ int core_tmr_lun_reset(
spin_unlock_irqrestore( spin_unlock_irqrestore(
&cmd->t_state_lock, flags); &cmd->t_state_lock, flags);
DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown" pr_debug("LUN_RESET: Waiting for task: %p to shutdown"
" for dev: %p\n", task, dev); " for dev: %p\n", task, dev);
wait_for_completion(&task->task_stop_comp); wait_for_completion(&task->task_stop_comp);
DEBUG_LR("LUN_RESET Completed task: %p shutdown for" pr_debug("LUN_RESET Completed task: %p shutdown for"
" dev: %p\n", task, dev); " dev: %p\n", task, dev);
spin_lock_irqsave(&cmd->t_state_lock, flags); spin_lock_irqsave(&cmd->t_state_lock, flags);
atomic_dec(&cmd->t_task_cdbs_left); atomic_dec(&cmd->t_task_cdbs_left);
...@@ -288,10 +281,10 @@ int core_tmr_lun_reset( ...@@ -288,10 +281,10 @@ int core_tmr_lun_reset(
} }
__transport_stop_task_timer(task, &flags); __transport_stop_task_timer(task, &flags);
if (!(atomic_dec_and_test(&cmd->t_task_cdbs_ex_left))) { if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
spin_unlock_irqrestore( spin_unlock_irqrestore(
&cmd->t_state_lock, flags); &cmd->t_state_lock, flags);
DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for" pr_debug("LUN_RESET: Skipping task: %p, dev: %p for"
" t_task_cdbs_ex_left: %d\n", task, dev, " t_task_cdbs_ex_left: %d\n", task, dev,
atomic_read(&cmd->t_task_cdbs_ex_left)); atomic_read(&cmd->t_task_cdbs_ex_left));
...@@ -301,7 +294,7 @@ int core_tmr_lun_reset( ...@@ -301,7 +294,7 @@ int core_tmr_lun_reset(
fe_count = atomic_read(&cmd->t_fe_count); fe_count = atomic_read(&cmd->t_fe_count);
if (atomic_read(&cmd->t_transport_active)) { if (atomic_read(&cmd->t_transport_active)) {
DEBUG_LR("LUN_RESET: got t_transport_active = 1 for" pr_debug("LUN_RESET: got t_transport_active = 1 for"
" task: %p, t_fe_count: %d dev: %p\n", task, " task: %p, t_fe_count: %d dev: %p\n", task,
fe_count, dev); fe_count, dev);
atomic_set(&cmd->t_transport_aborted, 1); atomic_set(&cmd->t_transport_aborted, 1);
...@@ -312,7 +305,7 @@ int core_tmr_lun_reset( ...@@ -312,7 +305,7 @@ int core_tmr_lun_reset(
spin_lock_irqsave(&dev->execute_task_lock, flags); spin_lock_irqsave(&dev->execute_task_lock, flags);
continue; continue;
} }
DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p," pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
" t_fe_count: %d dev: %p\n", task, fe_count, dev); " t_fe_count: %d dev: %p\n", task, fe_count, dev);
atomic_set(&cmd->t_transport_aborted, 1); atomic_set(&cmd->t_transport_aborted, 1);
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
...@@ -335,7 +328,7 @@ int core_tmr_lun_reset( ...@@ -335,7 +328,7 @@ int core_tmr_lun_reset(
* For PREEMPT_AND_ABORT usage, only process commands * For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key. * with a matching reservation key.
*/ */
if ((preempt_and_abort_list != NULL) && if (preempt_and_abort_list &&
(core_scsi3_check_cdb_abort_and_preempt( (core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0)) preempt_and_abort_list, cmd) != 0))
continue; continue;
...@@ -350,7 +343,7 @@ int core_tmr_lun_reset( ...@@ -350,7 +343,7 @@ int core_tmr_lun_reset(
list_del(&cmd->se_queue_node); list_del(&cmd->se_queue_node);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:" pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
" %d t_fe_count: %d\n", (preempt_and_abort_list) ? " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
"Preempt" : "", cmd, cmd->t_state, "Preempt" : "", cmd, cmd->t_state,
atomic_read(&cmd->t_fe_count)); atomic_read(&cmd->t_fe_count));
...@@ -368,20 +361,20 @@ int core_tmr_lun_reset( ...@@ -368,20 +361,20 @@ int core_tmr_lun_reset(
* Clear any legacy SPC-2 reservation when called during * Clear any legacy SPC-2 reservation when called during
* LOGICAL UNIT RESET * LOGICAL UNIT RESET
*/ */
if (!(preempt_and_abort_list) && if (!preempt_and_abort_list &&
(dev->dev_flags & DF_SPC2_RESERVATIONS)) { (dev->dev_flags & DF_SPC2_RESERVATIONS)) {
spin_lock(&dev->dev_reservation_lock); spin_lock(&dev->dev_reservation_lock);
dev->dev_reserved_node_acl = NULL; dev->dev_reserved_node_acl = NULL;
dev->dev_flags &= ~DF_SPC2_RESERVATIONS; dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
spin_unlock(&dev->dev_reservation_lock); spin_unlock(&dev->dev_reservation_lock);
printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n"); pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
} }
spin_lock_irq(&dev->stats_lock); spin_lock_irq(&dev->stats_lock);
dev->num_resets++; dev->num_resets++;
spin_unlock_irq(&dev->stats_lock); spin_unlock_irq(&dev->stats_lock);
DEBUG_LR("LUN_RESET: %s for [%s] Complete\n", pr_debug("LUN_RESET: %s for [%s] Complete\n",
(preempt_and_abort_list) ? "Preempt" : "TMR", (preempt_and_abort_list) ? "Preempt" : "TMR",
dev->transport->name); dev->transport->name);
return 0; return 0;
......
This diff is collapsed.
This diff is collapsed.
...@@ -49,15 +49,15 @@ int core_scsi3_ua_check( ...@@ -49,15 +49,15 @@ int core_scsi3_ua_check(
struct se_session *sess = cmd->se_sess; struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl; struct se_node_acl *nacl;
if (!(sess)) if (!sess)
return 0; return 0;
nacl = sess->se_node_acl; nacl = sess->se_node_acl;
if (!(nacl)) if (!nacl)
return 0; return 0;
deve = &nacl->device_list[cmd->orig_fe_lun]; deve = &nacl->device_list[cmd->orig_fe_lun];
if (!(atomic_read(&deve->ua_count))) if (!atomic_read(&deve->ua_count))
return 0; return 0;
/* /*
* From sam4r14, section 5.14 Unit attention condition: * From sam4r14, section 5.14 Unit attention condition:
...@@ -97,12 +97,12 @@ int core_scsi3_ua_allocate( ...@@ -97,12 +97,12 @@ int core_scsi3_ua_allocate(
/* /*
* PASSTHROUGH OPS * PASSTHROUGH OPS
*/ */
if (!(nacl)) if (!nacl)
return -EINVAL; return -EINVAL;
ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC); ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
if (!(ua)) { if (!ua) {
printk(KERN_ERR "Unable to allocate struct se_ua\n"); pr_err("Unable to allocate struct se_ua\n");
return -ENOMEM; return -ENOMEM;
} }
INIT_LIST_HEAD(&ua->ua_dev_list); INIT_LIST_HEAD(&ua->ua_dev_list);
...@@ -177,7 +177,7 @@ int core_scsi3_ua_allocate( ...@@ -177,7 +177,7 @@ int core_scsi3_ua_allocate(
spin_unlock(&deve->ua_lock); spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock); spin_unlock_irq(&nacl->device_list_lock);
printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:" pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
" 0x%02x, ASCQ: 0x%02x\n", " 0x%02x, ASCQ: 0x%02x\n",
nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
asc, ascq); asc, ascq);
...@@ -215,16 +215,16 @@ void core_scsi3_ua_for_check_condition( ...@@ -215,16 +215,16 @@ void core_scsi3_ua_for_check_condition(
struct se_ua *ua = NULL, *ua_p; struct se_ua *ua = NULL, *ua_p;
int head = 1; int head = 1;
if (!(sess)) if (!sess)
return; return;
nacl = sess->se_node_acl; nacl = sess->se_node_acl;
if (!(nacl)) if (!nacl)
return; return;
spin_lock_irq(&nacl->device_list_lock); spin_lock_irq(&nacl->device_list_lock);
deve = &nacl->device_list[cmd->orig_fe_lun]; deve = &nacl->device_list[cmd->orig_fe_lun];
if (!(atomic_read(&deve->ua_count))) { if (!atomic_read(&deve->ua_count)) {
spin_unlock_irq(&nacl->device_list_lock); spin_unlock_irq(&nacl->device_list_lock);
return; return;
} }
...@@ -264,7 +264,7 @@ void core_scsi3_ua_for_check_condition( ...@@ -264,7 +264,7 @@ void core_scsi3_ua_for_check_condition(
spin_unlock(&deve->ua_lock); spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock); spin_unlock_irq(&nacl->device_list_lock);
printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with" pr_debug("[%s]: %s UNIT ATTENTION condition with"
" INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x" " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
" reported ASC: 0x%02x, ASCQ: 0x%02x\n", " reported ASC: 0x%02x, ASCQ: 0x%02x\n",
nacl->se_tpg->se_tpg_tfo->get_fabric_name(), nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
...@@ -284,16 +284,16 @@ int core_scsi3_ua_clear_for_request_sense( ...@@ -284,16 +284,16 @@ int core_scsi3_ua_clear_for_request_sense(
struct se_ua *ua = NULL, *ua_p; struct se_ua *ua = NULL, *ua_p;
int head = 1; int head = 1;
if (!(sess)) if (!sess)
return -EINVAL; return -EINVAL;
nacl = sess->se_node_acl; nacl = sess->se_node_acl;
if (!(nacl)) if (!nacl)
return -EINVAL; return -EINVAL;
spin_lock_irq(&nacl->device_list_lock); spin_lock_irq(&nacl->device_list_lock);
deve = &nacl->device_list[cmd->orig_fe_lun]; deve = &nacl->device_list[cmd->orig_fe_lun];
if (!(atomic_read(&deve->ua_count))) { if (!atomic_read(&deve->ua_count)) {
spin_unlock_irq(&nacl->device_list_lock); spin_unlock_irq(&nacl->device_list_lock);
return -EPERM; return -EPERM;
} }
...@@ -323,7 +323,7 @@ int core_scsi3_ua_clear_for_request_sense( ...@@ -323,7 +323,7 @@ int core_scsi3_ua_clear_for_request_sense(
spin_unlock(&deve->ua_lock); spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock); spin_unlock_irq(&nacl->device_list_lock);
printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped" pr_debug("[%s]: Released UNIT ATTENTION condition, mapped"
" LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x," " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
" ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(), " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
cmd->orig_fe_lun, *asc, *ascq); cmd->orig_fe_lun, *asc, *ascq);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment