Commit 0fd97ccf authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Nicholas Bellinger

target: kill struct se_subsystem_dev

Simplify the code a lot by killing the superflous struct se_subsystem_dev.
Instead se_device is allocated early on by the backend driver, which allocates
it as part of its own per-device structure, borrowing the scheme that is for
example used for inode allocation.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent 3d70f8c6
...@@ -53,7 +53,6 @@ struct tcm_loop_hba { ...@@ -53,7 +53,6 @@ struct tcm_loop_hba {
struct se_hba_s *se_hba; struct se_hba_s *se_hba;
struct se_lun *tl_hba_lun; struct se_lun *tl_hba_lun;
struct se_port *tl_hba_lun_sep; struct se_port *tl_hba_lun_sep;
struct se_device_s *se_dev_hba_ptr;
struct tcm_loop_nexus *tl_nexus; struct tcm_loop_nexus *tl_nexus;
struct device dev; struct device dev;
struct Scsi_Host *sh; struct Scsi_Host *sh;
......
This diff is collapsed.
...@@ -91,7 +91,7 @@ extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *, ...@@ -91,7 +91,7 @@ extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *,
struct t10_alua_lu_gp *); struct t10_alua_lu_gp *);
extern void core_alua_drop_lu_gp_dev(struct se_device *); extern void core_alua_drop_lu_gp_dev(struct se_device *);
extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
struct se_subsystem_dev *, const char *, int); struct se_device *, const char *, int);
extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16); extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
struct se_port *); struct se_port *);
...@@ -131,6 +131,6 @@ extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *, ...@@ -131,6 +131,6 @@ extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
char *); char *);
extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *, extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
const char *, size_t); const char *, size_t);
extern int core_setup_alua(struct se_device *, int); extern int core_setup_alua(struct se_device *);
#endif /* TARGET_CORE_ALUA_H */ #endif /* TARGET_CORE_ALUA_H */
This diff is collapsed.
This diff is collapsed.
...@@ -734,14 +734,12 @@ static int target_fabric_port_link( ...@@ -734,14 +734,12 @@ static int target_fabric_port_link(
struct config_item *se_dev_ci) struct config_item *se_dev_ci)
{ {
struct config_item *tpg_ci; struct config_item *tpg_ci;
struct se_device *dev;
struct se_lun *lun = container_of(to_config_group(lun_ci), struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group); struct se_lun, lun_group);
struct se_lun *lun_p; struct se_lun *lun_p;
struct se_portal_group *se_tpg; struct se_portal_group *se_tpg;
struct se_subsystem_dev *se_dev = container_of( struct se_device *dev =
to_config_group(se_dev_ci), struct se_subsystem_dev, container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
se_dev_group);
struct target_fabric_configfs *tf; struct target_fabric_configfs *tf;
int ret; int ret;
...@@ -755,14 +753,6 @@ static int target_fabric_port_link( ...@@ -755,14 +753,6 @@ static int target_fabric_port_link(
return -EEXIST; return -EEXIST;
} }
dev = se_dev->se_dev_ptr;
if (!dev) {
pr_err("Unable to locate struct se_device pointer from"
" %s\n", config_item_name(se_dev_ci));
ret = -ENODEV;
goto out;
}
lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun); lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun);
if (IS_ERR(lun_p)) { if (IS_ERR(lun_p)) {
pr_err("core_dev_add_lun() failed\n"); pr_err("core_dev_add_lun() failed\n");
......
...@@ -41,7 +41,10 @@ ...@@ -41,7 +41,10 @@
#include "target_core_file.h" #include "target_core_file.h"
static struct se_subsystem_api fileio_template; static inline struct fd_dev *FD_DEV(struct se_device *dev)
{
return container_of(dev, struct fd_dev, dev);
}
/* fd_attach_hba(): (Part of se_subsystem_api_t template) /* fd_attach_hba(): (Part of se_subsystem_api_t template)
* *
...@@ -82,7 +85,7 @@ static void fd_detach_hba(struct se_hba *hba) ...@@ -82,7 +85,7 @@ static void fd_detach_hba(struct se_hba *hba)
hba->hba_ptr = NULL; hba->hba_ptr = NULL;
} }
static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name) static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
{ {
struct fd_dev *fd_dev; struct fd_dev *fd_dev;
struct fd_host *fd_host = hba->hba_ptr; struct fd_host *fd_host = hba->hba_ptr;
...@@ -97,34 +100,28 @@ static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name) ...@@ -97,34 +100,28 @@ static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
pr_debug("FILEIO: Allocated fd_dev for %p\n", name); pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
return fd_dev; return &fd_dev->dev;
} }
/* fd_create_virtdevice(): (Part of se_subsystem_api_t template) static int fd_configure_device(struct se_device *dev)
*
*
*/
static struct se_device *fd_create_virtdevice(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
void *p)
{ {
struct se_device *dev; struct fd_dev *fd_dev = FD_DEV(dev);
struct se_dev_limits dev_limits; struct fd_host *fd_host = dev->se_hba->hba_ptr;
struct queue_limits *limits;
struct fd_dev *fd_dev = p;
struct fd_host *fd_host = hba->hba_ptr;
struct file *file; struct file *file;
struct inode *inode = NULL; struct inode *inode = NULL;
int dev_flags = 0, flags, ret = -EINVAL; int flags, ret = -EINVAL;
memset(&dev_limits, 0, sizeof(struct se_dev_limits)); if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
pr_err("Missing fd_dev_name=\n");
return -EINVAL;
}
/* /*
* Use O_DSYNC by default instead of O_SYNC to forgo syncing * Use O_DSYNC by default instead of O_SYNC to forgo syncing
* of pure timestamp updates. * of pure timestamp updates.
*/ */
flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
/* /*
* Optionally allow fd_buffered_io=1 to be enabled for people * Optionally allow fd_buffered_io=1 to be enabled for people
* who want use the fs buffer cache as an WriteCache mechanism. * who want use the fs buffer cache as an WriteCache mechanism.
...@@ -154,22 +151,17 @@ static struct se_device *fd_create_virtdevice( ...@@ -154,22 +151,17 @@ static struct se_device *fd_create_virtdevice(
*/ */
inode = file->f_mapping->host; inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) { if (S_ISBLK(inode->i_mode)) {
struct request_queue *q; struct request_queue *q = bdev_get_queue(inode->i_bdev);
unsigned long long dev_size; unsigned long long dev_size;
/*
* Setup the local scope queue_limits from struct request_queue->limits dev->dev_attrib.hw_block_size =
* to pass into transport_add_device_to_core_hba() as struct se_dev_limits. bdev_logical_block_size(inode->i_bdev);
*/ dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
q = bdev_get_queue(inode->i_bdev);
limits = &dev_limits.limits;
limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
limits->max_hw_sectors = queue_max_hw_sectors(q);
limits->max_sectors = queue_max_sectors(q);
/* /*
* Determine the number of bytes from i_size_read() minus * Determine the number of bytes from i_size_read() minus
* one (1) logical sector from underlying struct block_device * one (1) logical sector from underlying struct block_device
*/ */
fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
dev_size = (i_size_read(file->f_mapping->host) - dev_size = (i_size_read(file->f_mapping->host) -
fd_dev->fd_block_size); fd_dev->fd_block_size);
...@@ -185,26 +177,18 @@ static struct se_device *fd_create_virtdevice( ...@@ -185,26 +177,18 @@ static struct se_device *fd_create_virtdevice(
goto fail; goto fail;
} }
limits = &dev_limits.limits; dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
limits->logical_block_size = FD_BLOCKSIZE; dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
limits->max_hw_sectors = FD_MAX_SECTORS;
limits->max_sectors = FD_MAX_SECTORS;
fd_dev->fd_block_size = FD_BLOCKSIZE;
} }
dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
dev = transport_add_device_to_core_hba(hba, &fileio_template, dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
se_dev, dev_flags, fd_dev,
&dev_limits, "FILEIO", FD_VERSION);
if (!dev)
goto fail;
if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
pr_debug("FILEIO: Forcing setting of emulate_write_cache=1" pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
" with FDBD_HAS_BUFFERED_IO_WCE\n"); " with FDBD_HAS_BUFFERED_IO_WCE\n");
dev->se_sub_dev->se_dev_attrib.emulate_write_cache = 1; dev->dev_attrib.emulate_write_cache = 1;
} }
fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
...@@ -214,22 +198,18 @@ static struct se_device *fd_create_virtdevice( ...@@ -214,22 +198,18 @@ static struct se_device *fd_create_virtdevice(
" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
fd_dev->fd_dev_name, fd_dev->fd_dev_size); fd_dev->fd_dev_name, fd_dev->fd_dev_size);
return dev; return 0;
fail: fail:
if (fd_dev->fd_file) { if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL); filp_close(fd_dev->fd_file, NULL);
fd_dev->fd_file = NULL; fd_dev->fd_file = NULL;
} }
return ERR_PTR(ret); return ret;
} }
/* fd_free_device(): (Part of se_subsystem_api_t template) static void fd_free_device(struct se_device *dev)
*
*
*/
static void fd_free_device(void *p)
{ {
struct fd_dev *fd_dev = p; struct fd_dev *fd_dev = FD_DEV(dev);
if (fd_dev->fd_file) { if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL); filp_close(fd_dev->fd_file, NULL);
...@@ -243,13 +223,12 @@ static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl, ...@@ -243,13 +223,12 @@ static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_nents) u32 sgl_nents)
{ {
struct se_device *se_dev = cmd->se_dev; struct se_device *se_dev = cmd->se_dev;
struct fd_dev *dev = se_dev->dev_ptr; struct fd_dev *dev = FD_DEV(se_dev);
struct file *fd = dev->fd_file; struct file *fd = dev->fd_file;
struct scatterlist *sg; struct scatterlist *sg;
struct iovec *iov; struct iovec *iov;
mm_segment_t old_fs; mm_segment_t old_fs;
loff_t pos = (cmd->t_task_lba * loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret = 0, i; int ret = 0, i;
iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL); iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
...@@ -296,13 +275,12 @@ static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl, ...@@ -296,13 +275,12 @@ static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_nents) u32 sgl_nents)
{ {
struct se_device *se_dev = cmd->se_dev; struct se_device *se_dev = cmd->se_dev;
struct fd_dev *dev = se_dev->dev_ptr; struct fd_dev *dev = FD_DEV(se_dev);
struct file *fd = dev->fd_file; struct file *fd = dev->fd_file;
struct scatterlist *sg; struct scatterlist *sg;
struct iovec *iov; struct iovec *iov;
mm_segment_t old_fs; mm_segment_t old_fs;
loff_t pos = (cmd->t_task_lba * loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret, i = 0; int ret, i = 0;
iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL); iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
...@@ -334,7 +312,7 @@ static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl, ...@@ -334,7 +312,7 @@ static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
static int fd_execute_sync_cache(struct se_cmd *cmd) static int fd_execute_sync_cache(struct se_cmd *cmd)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = dev->dev_ptr; struct fd_dev *fd_dev = FD_DEV(dev);
int immed = (cmd->t_task_cdb[1] & 0x2); int immed = (cmd->t_task_cdb[1] & 0x2);
loff_t start, end; loff_t start, end;
int ret; int ret;
...@@ -353,7 +331,7 @@ static int fd_execute_sync_cache(struct se_cmd *cmd) ...@@ -353,7 +331,7 @@ static int fd_execute_sync_cache(struct se_cmd *cmd)
start = 0; start = 0;
end = LLONG_MAX; end = LLONG_MAX;
} else { } else {
start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size; start = cmd->t_task_lba * dev->dev_attrib.block_size;
if (cmd->data_length) if (cmd->data_length)
end = start + cmd->data_length; end = start + cmd->data_length;
else else
...@@ -399,11 +377,11 @@ static int fd_execute_rw(struct se_cmd *cmd) ...@@ -399,11 +377,11 @@ static int fd_execute_rw(struct se_cmd *cmd)
* Allow this to happen independent of WCE=0 setting. * Allow this to happen independent of WCE=0 setting.
*/ */
if (ret > 0 && if (ret > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && dev->dev_attrib.emulate_fua_write > 0 &&
(cmd->se_cmd_flags & SCF_FUA)) { (cmd->se_cmd_flags & SCF_FUA)) {
struct fd_dev *fd_dev = dev->dev_ptr; struct fd_dev *fd_dev = FD_DEV(dev);
loff_t start = cmd->t_task_lba * loff_t start = cmd->t_task_lba *
dev->se_sub_dev->se_dev_attrib.block_size; dev->dev_attrib.block_size;
loff_t end = start + cmd->data_length; loff_t end = start + cmd->data_length;
vfs_fsync_range(fd_dev->fd_file, start, end, 1); vfs_fsync_range(fd_dev->fd_file, start, end, 1);
...@@ -430,12 +408,10 @@ static match_table_t tokens = { ...@@ -430,12 +408,10 @@ static match_table_t tokens = {
{Opt_err, NULL} {Opt_err, NULL}
}; };
static ssize_t fd_set_configfs_dev_params( static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
struct se_hba *hba, const char *page, ssize_t count)
struct se_subsystem_dev *se_dev,
const char *page, ssize_t count)
{ {
struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; struct fd_dev *fd_dev = FD_DEV(dev);
char *orig, *ptr, *arg_p, *opts; char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS]; substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token; int ret = 0, arg, token;
...@@ -502,24 +478,9 @@ static ssize_t fd_set_configfs_dev_params( ...@@ -502,24 +478,9 @@ static ssize_t fd_set_configfs_dev_params(
return (!ret) ? count : ret; return (!ret) ? count : ret;
} }
static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
{
struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
pr_err("Missing fd_dev_name=\n");
return -EINVAL;
}
return 0;
}
static ssize_t fd_show_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
char *b)
{ {
struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; struct fd_dev *fd_dev = FD_DEV(dev);
ssize_t bl = 0; ssize_t bl = 0;
bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
...@@ -550,7 +511,7 @@ static u32 fd_get_device_type(struct se_device *dev) ...@@ -550,7 +511,7 @@ static u32 fd_get_device_type(struct se_device *dev)
static sector_t fd_get_blocks(struct se_device *dev) static sector_t fd_get_blocks(struct se_device *dev)
{ {
struct fd_dev *fd_dev = dev->dev_ptr; struct fd_dev *fd_dev = FD_DEV(dev);
struct file *f = fd_dev->fd_file; struct file *f = fd_dev->fd_file;
struct inode *i = f->f_mapping->host; struct inode *i = f->f_mapping->host;
unsigned long long dev_size; unsigned long long dev_size;
...@@ -564,7 +525,7 @@ static sector_t fd_get_blocks(struct se_device *dev) ...@@ -564,7 +525,7 @@ static sector_t fd_get_blocks(struct se_device *dev)
else else
dev_size = fd_dev->fd_dev_size; dev_size = fd_dev->fd_dev_size;
return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size); return div_u64(dev_size, dev->dev_attrib.block_size);
} }
static struct spc_ops fd_spc_ops = { static struct spc_ops fd_spc_ops = {
...@@ -579,15 +540,16 @@ static int fd_parse_cdb(struct se_cmd *cmd) ...@@ -579,15 +540,16 @@ static int fd_parse_cdb(struct se_cmd *cmd)
static struct se_subsystem_api fileio_template = { static struct se_subsystem_api fileio_template = {
.name = "fileio", .name = "fileio",
.inquiry_prod = "FILEIO",
.inquiry_rev = FD_VERSION,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
.attach_hba = fd_attach_hba, .attach_hba = fd_attach_hba,
.detach_hba = fd_detach_hba, .detach_hba = fd_detach_hba,
.allocate_virtdevice = fd_allocate_virtdevice, .alloc_device = fd_alloc_device,
.create_virtdevice = fd_create_virtdevice, .configure_device = fd_configure_device,
.free_device = fd_free_device, .free_device = fd_free_device,
.parse_cdb = fd_parse_cdb, .parse_cdb = fd_parse_cdb,
.check_configfs_dev_params = fd_check_configfs_dev_params,
.set_configfs_dev_params = fd_set_configfs_dev_params, .set_configfs_dev_params = fd_set_configfs_dev_params,
.show_configfs_dev_params = fd_show_configfs_dev_params, .show_configfs_dev_params = fd_show_configfs_dev_params,
.get_device_rev = fd_get_device_rev, .get_device_rev = fd_get_device_rev,
......
...@@ -17,6 +17,8 @@ ...@@ -17,6 +17,8 @@
#define FDBD_HAS_BUFFERED_IO_WCE 0x04 #define FDBD_HAS_BUFFERED_IO_WCE 0x04
struct fd_dev { struct fd_dev {
struct se_device dev;
u32 fbd_flags; u32 fbd_flags;
unsigned char fd_dev_name[FD_MAX_DEV_NAME]; unsigned char fd_dev_name[FD_MAX_DEV_NAME];
/* Unique Ramdisk Device ID in Ramdisk HBA */ /* Unique Ramdisk Device ID in Ramdisk HBA */
......
...@@ -113,7 +113,6 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) ...@@ -113,7 +113,6 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
INIT_LIST_HEAD(&hba->hba_dev_list);
spin_lock_init(&hba->device_lock); spin_lock_init(&hba->device_lock);
mutex_init(&hba->hba_access_mutex); mutex_init(&hba->hba_access_mutex);
...@@ -152,8 +151,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) ...@@ -152,8 +151,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
int int
core_delete_hba(struct se_hba *hba) core_delete_hba(struct se_hba *hba)
{ {
if (!list_empty(&hba->hba_dev_list)) WARN_ON(hba->dev_count);
dump_stack();
hba->transport->detach_hba(hba); hba->transport->detach_hba(hba);
......
This diff is collapsed.
...@@ -14,6 +14,7 @@ struct iblock_req { ...@@ -14,6 +14,7 @@ struct iblock_req {
#define IBDF_HAS_UDEV_PATH 0x01 #define IBDF_HAS_UDEV_PATH 0x01
struct iblock_dev { struct iblock_dev {
struct se_device dev;
unsigned char ibd_udev_path[SE_UDEV_PATH_LEN]; unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
u32 ibd_flags; u32 ibd_flags;
struct bio_set *ibd_bio_set; struct bio_set *ibd_bio_set;
......
...@@ -20,12 +20,6 @@ int core_dev_export(struct se_device *, struct se_portal_group *, ...@@ -20,12 +20,6 @@ int core_dev_export(struct se_device *, struct se_portal_group *,
void core_dev_unexport(struct se_device *, struct se_portal_group *, void core_dev_unexport(struct se_device *, struct se_portal_group *,
struct se_lun *); struct se_lun *);
int target_report_luns(struct se_cmd *); int target_report_luns(struct se_cmd *);
void se_release_device_for_hba(struct se_device *);
void se_release_vpd_for_dev(struct se_device *);
int se_free_virtual_device(struct se_device *, struct se_hba *);
int se_dev_check_online(struct se_device *);
int se_dev_check_shutdown(struct se_device *);
void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
int se_dev_set_task_timeout(struct se_device *, u32); int se_dev_set_task_timeout(struct se_device *, u32);
int se_dev_set_max_unmap_lba_count(struct se_device *, u32); int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
...@@ -60,6 +54,9 @@ void core_dev_free_initiator_node_lun_acl(struct se_portal_group *, ...@@ -60,6 +54,9 @@ void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *lacl); struct se_lun_acl *lacl);
int core_dev_setup_virtual_lun0(void); int core_dev_setup_virtual_lun0(void);
void core_dev_release_virtual_lun0(void); void core_dev_release_virtual_lun0(void);
struct se_device *target_alloc_device(struct se_hba *hba, const char *name);
int target_configure_device(struct se_device *dev);
void target_free_device(struct se_device *);
/* target_core_hba.c */ /* target_core_hba.c */
struct se_hba *core_alloc_hba(const char *, u32, u32); struct se_hba *core_alloc_hba(const char *, u32, u32);
...@@ -106,9 +103,10 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags); ...@@ -106,9 +103,10 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
int transport_clear_lun_from_sessions(struct se_lun *); int transport_clear_lun_from_sessions(struct se_lun *);
void transport_send_task_abort(struct se_cmd *); void transport_send_task_abort(struct se_cmd *);
int target_cmd_size_check(struct se_cmd *cmd, unsigned int size); int target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
void target_qf_do_work(struct work_struct *work);
/* target_core_stat.c */ /* target_core_stat.c */
void target_stat_setup_dev_default_groups(struct se_subsystem_dev *); void target_stat_setup_dev_default_groups(struct se_device *);
void target_stat_setup_port_default_groups(struct se_lun *); void target_stat_setup_port_default_groups(struct se_lun *);
void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *); void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
......
This diff is collapsed.
...@@ -63,6 +63,6 @@ extern unsigned char *core_scsi3_pr_dump_type(int); ...@@ -63,6 +63,6 @@ extern unsigned char *core_scsi3_pr_dump_type(int);
extern int target_scsi3_emulate_pr_in(struct se_cmd *); extern int target_scsi3_emulate_pr_in(struct se_cmd *);
extern int target_scsi3_emulate_pr_out(struct se_cmd *); extern int target_scsi3_emulate_pr_out(struct se_cmd *);
extern int core_setup_reservations(struct se_device *, int); extern void core_setup_reservations(struct se_device *);
#endif /* TARGET_CORE_PR_H */ #endif /* TARGET_CORE_PR_H */
This diff is collapsed.
...@@ -37,6 +37,7 @@ struct pscsi_plugin_task { ...@@ -37,6 +37,7 @@ struct pscsi_plugin_task {
#define PDF_HAS_VIRT_HOST_ID 0x20 #define PDF_HAS_VIRT_HOST_ID 0x20
struct pscsi_dev_virt { struct pscsi_dev_virt {
struct se_device dev;
int pdv_flags; int pdv_flags;
int pdv_host_id; int pdv_host_id;
int pdv_channel_id; int pdv_channel_id;
...@@ -44,7 +45,6 @@ struct pscsi_dev_virt { ...@@ -44,7 +45,6 @@ struct pscsi_dev_virt {
int pdv_lun_id; int pdv_lun_id;
struct block_device *pdv_bd; struct block_device *pdv_bd;
struct scsi_device *pdv_sd; struct scsi_device *pdv_sd;
struct se_hba *pdv_se_hba;
} ____cacheline_aligned; } ____cacheline_aligned;
typedef enum phv_modes { typedef enum phv_modes {
......
...@@ -41,7 +41,10 @@ ...@@ -41,7 +41,10 @@
#include "target_core_rd.h" #include "target_core_rd.h"
static struct se_subsystem_api rd_mcp_template; static inline struct rd_dev *RD_DEV(struct se_device *dev)
{
return container_of(dev, struct rd_dev, dev);
}
/* rd_attach_hba(): (Part of se_subsystem_api_t template) /* rd_attach_hba(): (Part of se_subsystem_api_t template)
* *
...@@ -196,7 +199,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev) ...@@ -196,7 +199,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
return 0; return 0;
} }
static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name) static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
{ {
struct rd_dev *rd_dev; struct rd_dev *rd_dev;
struct rd_host *rd_host = hba->hba_ptr; struct rd_host *rd_host = hba->hba_ptr;
...@@ -209,39 +212,27 @@ static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name) ...@@ -209,39 +212,27 @@ static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name)
rd_dev->rd_host = rd_host; rd_dev->rd_host = rd_host;
return rd_dev; return &rd_dev->dev;
} }
static struct se_device *rd_create_virtdevice(struct se_hba *hba, static int rd_configure_device(struct se_device *dev)
struct se_subsystem_dev *se_dev, void *p)
{ {
struct se_device *dev; struct rd_dev *rd_dev = RD_DEV(dev);
struct se_dev_limits dev_limits; struct rd_host *rd_host = dev->se_hba->hba_ptr;
struct rd_dev *rd_dev = p; int ret;
struct rd_host *rd_host = hba->hba_ptr;
int dev_flags = 0, ret;
char prod[16], rev[4];
memset(&dev_limits, 0, sizeof(struct se_dev_limits)); if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
pr_debug("Missing rd_pages= parameter\n");
return -EINVAL;
}
ret = rd_build_device_space(rd_dev); ret = rd_build_device_space(rd_dev);
if (ret < 0) if (ret < 0)
goto fail; goto fail;
snprintf(prod, 16, "RAMDISK-MCP"); dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
snprintf(rev, 4, "%s", RD_MCP_VERSION); dev->dev_attrib.hw_max_sectors = UINT_MAX;
dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
dev_limits.limits.max_hw_sectors = UINT_MAX;
dev_limits.limits.max_sectors = UINT_MAX;
dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
dev = transport_add_device_to_core_hba(hba,
&rd_mcp_template, se_dev, dev_flags, rd_dev,
&dev_limits, prod, rev);
if (!dev)
goto fail;
rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
...@@ -251,16 +242,16 @@ static struct se_device *rd_create_virtdevice(struct se_hba *hba, ...@@ -251,16 +242,16 @@ static struct se_device *rd_create_virtdevice(struct se_hba *hba,
rd_dev->sg_table_count, rd_dev->sg_table_count,
(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE)); (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
return dev; return 0;
fail: fail:
rd_release_device_space(rd_dev); rd_release_device_space(rd_dev);
return ERR_PTR(ret); return ret;
} }
static void rd_free_device(void *p) static void rd_free_device(struct se_device *dev)
{ {
struct rd_dev *rd_dev = p; struct rd_dev *rd_dev = RD_DEV(dev);
rd_release_device_space(rd_dev); rd_release_device_space(rd_dev);
kfree(rd_dev); kfree(rd_dev);
...@@ -290,7 +281,7 @@ static int rd_execute_rw(struct se_cmd *cmd) ...@@ -290,7 +281,7 @@ static int rd_execute_rw(struct se_cmd *cmd)
u32 sgl_nents = cmd->t_data_nents; u32 sgl_nents = cmd->t_data_nents;
enum dma_data_direction data_direction = cmd->data_direction; enum dma_data_direction data_direction = cmd->data_direction;
struct se_device *se_dev = cmd->se_dev; struct se_device *se_dev = cmd->se_dev;
struct rd_dev *dev = se_dev->dev_ptr; struct rd_dev *dev = RD_DEV(se_dev);
struct rd_dev_sg_table *table; struct rd_dev_sg_table *table;
struct scatterlist *rd_sg; struct scatterlist *rd_sg;
struct sg_mapping_iter m; struct sg_mapping_iter m;
...@@ -300,7 +291,7 @@ static int rd_execute_rw(struct se_cmd *cmd) ...@@ -300,7 +291,7 @@ static int rd_execute_rw(struct se_cmd *cmd)
u32 src_len; u32 src_len;
u64 tmp; u64 tmp;
tmp = cmd->t_task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size; tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
rd_offset = do_div(tmp, PAGE_SIZE); rd_offset = do_div(tmp, PAGE_SIZE);
rd_page = tmp; rd_page = tmp;
rd_size = cmd->data_length; rd_size = cmd->data_length;
...@@ -378,13 +369,10 @@ static match_table_t tokens = { ...@@ -378,13 +369,10 @@ static match_table_t tokens = {
{Opt_err, NULL} {Opt_err, NULL}
}; };
static ssize_t rd_set_configfs_dev_params( static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
struct se_hba *hba, const char *page, ssize_t count)
struct se_subsystem_dev *se_dev,
const char *page,
ssize_t count)
{ {
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; struct rd_dev *rd_dev = RD_DEV(dev);
char *orig, *ptr, *opts; char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS]; substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token; int ret = 0, arg, token;
...@@ -417,24 +405,10 @@ static ssize_t rd_set_configfs_dev_params( ...@@ -417,24 +405,10 @@ static ssize_t rd_set_configfs_dev_params(
return (!ret) ? count : ret; return (!ret) ? count : ret;
} }
static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
{ {
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; struct rd_dev *rd_dev = RD_DEV(dev);
if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
pr_debug("Missing rd_pages= parameter\n");
return -EINVAL;
}
return 0;
}
static ssize_t rd_show_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
char *b)
{
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
rd_dev->rd_dev_id); rd_dev->rd_dev_id);
bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
...@@ -455,9 +429,10 @@ static u32 rd_get_device_type(struct se_device *dev) ...@@ -455,9 +429,10 @@ static u32 rd_get_device_type(struct se_device *dev)
static sector_t rd_get_blocks(struct se_device *dev) static sector_t rd_get_blocks(struct se_device *dev)
{ {
struct rd_dev *rd_dev = dev->dev_ptr; struct rd_dev *rd_dev = RD_DEV(dev);
unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
dev->se_sub_dev->se_dev_attrib.block_size) - 1; dev->dev_attrib.block_size) - 1;
return blocks_long; return blocks_long;
} }
...@@ -473,14 +448,15 @@ static int rd_parse_cdb(struct se_cmd *cmd) ...@@ -473,14 +448,15 @@ static int rd_parse_cdb(struct se_cmd *cmd)
static struct se_subsystem_api rd_mcp_template = { static struct se_subsystem_api rd_mcp_template = {
.name = "rd_mcp", .name = "rd_mcp",
.inquiry_prod = "RAMDISK-MCP",
.inquiry_rev = RD_MCP_VERSION,
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
.attach_hba = rd_attach_hba, .attach_hba = rd_attach_hba,
.detach_hba = rd_detach_hba, .detach_hba = rd_detach_hba,
.allocate_virtdevice = rd_allocate_virtdevice, .alloc_device = rd_alloc_device,
.create_virtdevice = rd_create_virtdevice, .configure_device = rd_configure_device,
.free_device = rd_free_device, .free_device = rd_free_device,
.parse_cdb = rd_parse_cdb, .parse_cdb = rd_parse_cdb,
.check_configfs_dev_params = rd_check_configfs_dev_params,
.set_configfs_dev_params = rd_set_configfs_dev_params, .set_configfs_dev_params = rd_set_configfs_dev_params,
.show_configfs_dev_params = rd_show_configfs_dev_params, .show_configfs_dev_params = rd_show_configfs_dev_params,
.get_device_rev = rd_get_device_rev, .get_device_rev = rd_get_device_rev,
......
...@@ -24,6 +24,7 @@ struct rd_dev_sg_table { ...@@ -24,6 +24,7 @@ struct rd_dev_sg_table {
#define RDF_HAS_PAGE_COUNT 0x01 #define RDF_HAS_PAGE_COUNT 0x01
struct rd_dev { struct rd_dev {
struct se_device dev;
u32 rd_flags; u32 rd_flags;
/* Unique Ramdisk Device ID in Ramdisk HBA */ /* Unique Ramdisk Device ID in Ramdisk HBA */
u32 rd_dev_id; u32 rd_dev_id;
......
...@@ -54,10 +54,10 @@ static int sbc_emulate_readcapacity(struct se_cmd *cmd) ...@@ -54,10 +54,10 @@ static int sbc_emulate_readcapacity(struct se_cmd *cmd)
buf[1] = (blocks >> 16) & 0xff; buf[1] = (blocks >> 16) & 0xff;
buf[2] = (blocks >> 8) & 0xff; buf[2] = (blocks >> 8) & 0xff;
buf[3] = blocks & 0xff; buf[3] = blocks & 0xff;
buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; buf[7] = dev->dev_attrib.block_size & 0xff;
rbuf = transport_kmap_data_sg(cmd); rbuf = transport_kmap_data_sg(cmd);
if (rbuf) { if (rbuf) {
...@@ -85,15 +85,15 @@ static int sbc_emulate_readcapacity_16(struct se_cmd *cmd) ...@@ -85,15 +85,15 @@ static int sbc_emulate_readcapacity_16(struct se_cmd *cmd)
buf[5] = (blocks >> 16) & 0xff; buf[5] = (blocks >> 16) & 0xff;
buf[6] = (blocks >> 8) & 0xff; buf[6] = (blocks >> 8) & 0xff;
buf[7] = blocks & 0xff; buf[7] = blocks & 0xff;
buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; buf[11] = dev->dev_attrib.block_size & 0xff;
/* /*
* Set Thin Provisioning Enable bit following sbc3r22 in section * Set Thin Provisioning Enable bit following sbc3r22 in section
* READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
*/ */
if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
buf[14] = 0x80; buf[14] = 0x80;
rbuf = transport_kmap_data_sg(cmd); rbuf = transport_kmap_data_sg(cmd);
...@@ -143,7 +143,7 @@ static int sbc_emulate_noop(struct se_cmd *cmd) ...@@ -143,7 +143,7 @@ static int sbc_emulate_noop(struct se_cmd *cmd)
static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
{ {
return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors; return cmd->se_dev->dev_attrib.block_size * sectors;
} }
static int sbc_check_valid_sectors(struct se_cmd *cmd) static int sbc_check_valid_sectors(struct se_cmd *cmd)
...@@ -152,7 +152,7 @@ static int sbc_check_valid_sectors(struct se_cmd *cmd) ...@@ -152,7 +152,7 @@ static int sbc_check_valid_sectors(struct se_cmd *cmd)
unsigned long long end_lba; unsigned long long end_lba;
u32 sectors; u32 sectors;
sectors = cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size; sectors = cmd->data_length / dev->dev_attrib.block_size;
end_lba = dev->transport->get_blocks(dev) + 1; end_lba = dev->transport->get_blocks(dev) + 1;
if (cmd->t_task_lba + sectors > end_lba) { if (cmd->t_task_lba + sectors > end_lba) {
...@@ -315,7 +315,6 @@ static void xdreadwrite_callback(struct se_cmd *cmd) ...@@ -315,7 +315,6 @@ static void xdreadwrite_callback(struct se_cmd *cmd)
int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops) int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
{ {
struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb; unsigned char *cdb = cmd->t_task_cdb;
unsigned int size; unsigned int size;
...@@ -562,18 +561,18 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops) ...@@ -562,18 +561,18 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
unsigned long long end_lba; unsigned long long end_lba;
if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) { if (sectors > dev->dev_attrib.fabric_max_sectors) {
printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
" big sectors %u exceeds fabric_max_sectors:" " big sectors %u exceeds fabric_max_sectors:"
" %u\n", cdb[0], sectors, " %u\n", cdb[0], sectors,
su_dev->se_dev_attrib.fabric_max_sectors); dev->dev_attrib.fabric_max_sectors);
goto out_invalid_cdb_field; goto out_invalid_cdb_field;
} }
if (sectors > su_dev->se_dev_attrib.hw_max_sectors) { if (sectors > dev->dev_attrib.hw_max_sectors) {
printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
" big sectors %u exceeds backend hw_max_sectors:" " big sectors %u exceeds backend hw_max_sectors:"
" %u\n", cdb[0], sectors, " %u\n", cdb[0], sectors,
su_dev->se_dev_attrib.hw_max_sectors); dev->dev_attrib.hw_max_sectors);
goto out_invalid_cdb_field; goto out_invalid_cdb_field;
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -371,7 +371,7 @@ int core_tmr_lun_reset( ...@@ -371,7 +371,7 @@ int core_tmr_lun_reset(
* which the command was received shall be completed with TASK ABORTED * which the command was received shall be completed with TASK ABORTED
* status (see SAM-4). * status (see SAM-4).
*/ */
tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; tas = dev->dev_attrib.emulate_tas;
/* /*
* Determine if this se_tmr is coming from a $FABRIC_MOD * Determine if this se_tmr is coming from a $FABRIC_MOD
* or struct se_device passthrough.. * or struct se_device passthrough..
...@@ -399,10 +399,10 @@ int core_tmr_lun_reset( ...@@ -399,10 +399,10 @@ int core_tmr_lun_reset(
* LOGICAL UNIT RESET * LOGICAL UNIT RESET
*/ */
if (!preempt_and_abort_list && if (!preempt_and_abort_list &&
(dev->dev_flags & DF_SPC2_RESERVATIONS)) { (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)) {
spin_lock(&dev->dev_reservation_lock); spin_lock(&dev->dev_reservation_lock);
dev->dev_reserved_node_acl = NULL; dev->dev_reserved_node_acl = NULL;
dev->dev_flags &= ~DF_SPC2_RESERVATIONS; dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
spin_unlock(&dev->dev_reservation_lock); spin_unlock(&dev->dev_reservation_lock);
pr_debug("LUN_RESET: SCSI-2 Released reservation\n"); pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
} }
......
...@@ -659,7 +659,7 @@ static void target_add_to_state_list(struct se_cmd *cmd) ...@@ -659,7 +659,7 @@ static void target_add_to_state_list(struct se_cmd *cmd)
static void transport_write_pending_qf(struct se_cmd *cmd); static void transport_write_pending_qf(struct se_cmd *cmd);
static void transport_complete_qf(struct se_cmd *cmd); static void transport_complete_qf(struct se_cmd *cmd);
static void target_qf_do_work(struct work_struct *work) void target_qf_do_work(struct work_struct *work)
{ {
struct se_device *dev = container_of(work, struct se_device, struct se_device *dev = container_of(work, struct se_device,
qf_work_queue); qf_work_queue);
...@@ -712,29 +712,15 @@ void transport_dump_dev_state( ...@@ -712,29 +712,15 @@ void transport_dump_dev_state(
int *bl) int *bl)
{ {
*bl += sprintf(b + *bl, "Status: "); *bl += sprintf(b + *bl, "Status: ");
switch (dev->dev_status) { if (dev->export_count)
case TRANSPORT_DEVICE_ACTIVATED:
*bl += sprintf(b + *bl, "ACTIVATED"); *bl += sprintf(b + *bl, "ACTIVATED");
break; else
case TRANSPORT_DEVICE_DEACTIVATED:
*bl += sprintf(b + *bl, "DEACTIVATED"); *bl += sprintf(b + *bl, "DEACTIVATED");
break;
case TRANSPORT_DEVICE_SHUTDOWN:
*bl += sprintf(b + *bl, "SHUTDOWN");
break;
case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
*bl += sprintf(b + *bl, "OFFLINE");
break;
default:
*bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
break;
}
*bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
*bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
dev->se_sub_dev->se_dev_attrib.block_size, dev->dev_attrib.block_size,
dev->se_sub_dev->se_dev_attrib.hw_max_sectors); dev->dev_attrib.hw_max_sectors);
*bl += sprintf(b + *bl, " "); *bl += sprintf(b + *bl, " ");
} }
...@@ -991,185 +977,6 @@ transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) ...@@ -991,185 +977,6 @@ transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
} }
EXPORT_SYMBOL(transport_set_vpd_ident); EXPORT_SYMBOL(transport_set_vpd_ident);
static void core_setup_task_attr_emulation(struct se_device *dev)
{
/*
* If this device is from Target_Core_Mod/pSCSI, disable the
* SAM Task Attribute emulation.
*
* This is currently not available in upsream Linux/SCSI Target
* mode code, and is assumed to be disabled while using TCM/pSCSI.
*/
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
return;
}
dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
" device\n", dev->transport->name,
dev->transport->get_device_rev(dev));
}
static void scsi_dump_inquiry(struct se_device *dev)
{
struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
char buf[17];
int i, device_type;
/*
* Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
*/
for (i = 0; i < 8; i++)
if (wwn->vendor[i] >= 0x20)
buf[i] = wwn->vendor[i];
else
buf[i] = ' ';
buf[i] = '\0';
pr_debug(" Vendor: %s\n", buf);
for (i = 0; i < 16; i++)
if (wwn->model[i] >= 0x20)
buf[i] = wwn->model[i];
else
buf[i] = ' ';
buf[i] = '\0';
pr_debug(" Model: %s\n", buf);
for (i = 0; i < 4; i++)
if (wwn->revision[i] >= 0x20)
buf[i] = wwn->revision[i];
else
buf[i] = ' ';
buf[i] = '\0';
pr_debug(" Revision: %s\n", buf);
device_type = dev->transport->get_device_type(dev);
pr_debug(" Type: %s ", scsi_device_type(device_type));
pr_debug(" ANSI SCSI revision: %02x\n",
dev->transport->get_device_rev(dev));
}
struct se_device *transport_add_device_to_core_hba(
struct se_hba *hba,
struct se_subsystem_api *transport,
struct se_subsystem_dev *se_dev,
u32 device_flags,
void *transport_dev,
struct se_dev_limits *dev_limits,
const char *inquiry_prod,
const char *inquiry_rev)
{
int force_pt;
struct se_device *dev;
dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
if (!dev) {
pr_err("Unable to allocate memory for se_dev_t\n");
return NULL;
}
dev->dev_flags = device_flags;
dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
dev->dev_ptr = transport_dev;
dev->se_hba = hba;
dev->se_sub_dev = se_dev;
dev->transport = transport;
INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_sep_list);
INIT_LIST_HEAD(&dev->dev_tmr_list);
INIT_LIST_HEAD(&dev->delayed_cmd_list);
INIT_LIST_HEAD(&dev->state_list);
INIT_LIST_HEAD(&dev->qf_cmd_list);
spin_lock_init(&dev->execute_task_lock);
spin_lock_init(&dev->delayed_cmd_lock);
spin_lock_init(&dev->dev_reservation_lock);
spin_lock_init(&dev->dev_status_lock);
spin_lock_init(&dev->se_port_lock);
spin_lock_init(&dev->se_tmr_lock);
spin_lock_init(&dev->qf_cmd_lock);
atomic_set(&dev->dev_ordered_id, 0);
se_dev_set_default_attribs(dev, dev_limits);
dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
dev->creation_time = get_jiffies_64();
spin_lock_init(&dev->stats_lock);
spin_lock(&hba->device_lock);
list_add_tail(&dev->dev_list, &hba->hba_dev_list);
hba->dev_count++;
spin_unlock(&hba->device_lock);
/*
* Setup the SAM Task Attribute emulation for struct se_device
*/
core_setup_task_attr_emulation(dev);
/*
* Force PR and ALUA passthrough emulation with internal object use.
*/
force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
/*
* Setup the Reservations infrastructure for struct se_device
*/
core_setup_reservations(dev, force_pt);
/*
* Setup the Asymmetric Logical Unit Assignment for struct se_device
*/
if (core_setup_alua(dev, force_pt) < 0)
goto err_dev_list;
/*
* Startup the struct se_device processing thread
*/
dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
dev->transport->name);
if (!dev->tmr_wq) {
pr_err("Unable to create tmr workqueue for %s\n",
dev->transport->name);
goto err_dev_list;
}
/*
* Setup work_queue for QUEUE_FULL
*/
INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
/*
* Preload the initial INQUIRY const values if we are doing
* anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
* passthrough because this is being provided by the backend LLD.
* This is required so that transport_get_inquiry() copies these
* originals once back into DEV_T10_WWN(dev) for the virtual device
* setup.
*/
if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
if (!inquiry_prod || !inquiry_rev) {
pr_err("All non TCM/pSCSI plugins require"
" INQUIRY consts\n");
goto err_wq;
}
strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
}
scsi_dump_inquiry(dev);
return dev;
err_wq:
destroy_workqueue(dev->tmr_wq);
err_dev_list:
spin_lock(&hba->device_lock);
list_del(&dev->dev_list);
hba->dev_count--;
spin_unlock(&hba->device_lock);
se_release_vpd_for_dev(dev);
kfree(dev);
return NULL;
}
EXPORT_SYMBOL(transport_add_device_to_core_hba);
int target_cmd_size_check(struct se_cmd *cmd, unsigned int size) int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
...@@ -1191,7 +998,7 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size) ...@@ -1191,7 +998,7 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
* Reject READ_* or WRITE_* with overflow/underflow for * Reject READ_* or WRITE_* with overflow/underflow for
* type SCF_SCSI_DATA_CDB. * type SCF_SCSI_DATA_CDB.
*/ */
if (dev->se_sub_dev->se_dev_attrib.block_size != 512) { if (dev->dev_attrib.block_size != 512) {
pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
" CDB on non 512-byte sector setup subsystem" " CDB on non 512-byte sector setup subsystem"
" plugin: %s\n", dev->transport->name); " plugin: %s\n", dev->transport->name);
...@@ -1293,7 +1100,7 @@ int target_setup_cmd_from_cdb( ...@@ -1293,7 +1100,7 @@ int target_setup_cmd_from_cdb(
struct se_cmd *cmd, struct se_cmd *cmd,
unsigned char *cdb) unsigned char *cdb)
{ {
struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; struct se_device *dev = cmd->se_dev;
u32 pr_reg_type = 0; u32 pr_reg_type = 0;
u8 alua_ascq = 0; u8 alua_ascq = 0;
unsigned long flags; unsigned long flags;
...@@ -1345,7 +1152,7 @@ int target_setup_cmd_from_cdb( ...@@ -1345,7 +1152,7 @@ int target_setup_cmd_from_cdb(
return -EINVAL; return -EINVAL;
} }
ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); ret = dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
if (ret != 0) { if (ret != 0) {
/* /*
* Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
...@@ -1371,8 +1178,8 @@ int target_setup_cmd_from_cdb( ...@@ -1371,8 +1178,8 @@ int target_setup_cmd_from_cdb(
/* /*
* Check status for SPC-3 Persistent Reservations * Check status for SPC-3 Persistent Reservations
*/ */
if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type)) { if (dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type)) {
if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( if (dev->t10_pr.pr_ops.t10_seq_non_holder(
cmd, cdb, pr_reg_type) != 0) { cmd, cdb, pr_reg_type) != 0) {
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
...@@ -1387,7 +1194,7 @@ int target_setup_cmd_from_cdb( ...@@ -1387,7 +1194,7 @@ int target_setup_cmd_from_cdb(
*/ */
} }
ret = cmd->se_dev->transport->parse_cdb(cmd); ret = dev->transport->parse_cdb(cmd);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -1759,7 +1566,7 @@ void transport_generic_request_failure(struct se_cmd *cmd) ...@@ -1759,7 +1566,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
* See spc4r17, section 7.4.6 Control Mode Page, Table 349 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
*/ */
if (cmd->se_sess && if (cmd->se_sess &&
cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2)
core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
cmd->orig_fe_lun, 0x2C, cmd->orig_fe_lun, 0x2C,
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
......
...@@ -237,7 +237,7 @@ void core_scsi3_ua_for_check_condition( ...@@ -237,7 +237,7 @@ void core_scsi3_ua_for_check_condition(
* highest priority UNIT_ATTENTION and ASC/ASCQ without * highest priority UNIT_ATTENTION and ASC/ASCQ without
* clearing it. * clearing it.
*/ */
if (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) { if (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) {
*asc = ua->ua_asc; *asc = ua->ua_asc;
*ascq = ua->ua_ascq; *ascq = ua->ua_ascq;
break; break;
...@@ -265,8 +265,8 @@ void core_scsi3_ua_for_check_condition( ...@@ -265,8 +265,8 @@ void core_scsi3_ua_for_check_condition(
" INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x" " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
" reported ASC: 0x%02x, ASCQ: 0x%02x\n", " reported ASC: 0x%02x, ASCQ: 0x%02x\n",
nacl->se_tpg->se_tpg_tfo->get_fabric_name(), nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
(dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" : (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
"Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl, "Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl,
cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq); cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
} }
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment