Commit 5873c4d1 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Nicholas Bellinger

target: consolidate backend attribute implementations

Provide a common sets of dev_attrib attributes for all devices using the
generic SPC/SBC parsers, and a second one with the minimal required read-only
attributes for passthrough devices.  The later is only used by pscsi for now,
but will be wired up for the full-passthrough TCMU use case as well.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent 0a06d430
......@@ -471,10 +471,179 @@ EXPORT_SYMBOL(target_unregister_template);
//############################################################################*/
/* Start functions for struct config_item_type tb_dev_attrib_cit */
#define DEF_TB_DEV_ATTRIB_SHOW(_backend, _name) \
static ssize_t _backend##_dev_show_attr_##_name( \
struct se_dev_attrib *da, \
char *page) \
{ \
return snprintf(page, PAGE_SIZE, "%u\n", \
(u32)da->da_dev->dev_attrib._name); \
}
#define DEF_TB_DEV_ATTRIB_STORE(_backend, _name) \
static ssize_t _backend##_dev_store_attr_##_name( \
struct se_dev_attrib *da, \
const char *page, \
size_t count) \
{ \
unsigned long val; \
int ret; \
\
ret = kstrtoul(page, 0, &val); \
if (ret < 0) { \
pr_err("kstrtoul() failed with ret: %d\n", ret); \
return -EINVAL; \
} \
ret = se_dev_set_##_name(da->da_dev, (u32)val); \
\
return (!ret) ? count : -EINVAL; \
}
#define DEF_TB_DEV_ATTRIB(_backend, _name) \
DEF_TB_DEV_ATTRIB_SHOW(_backend, _name); \
DEF_TB_DEV_ATTRIB_STORE(_backend, _name);
#define DEF_TB_DEV_ATTRIB_RO(_backend, name) \
DEF_TB_DEV_ATTRIB_SHOW(_backend, name);
CONFIGFS_EATTR_STRUCT(target_backend_dev_attrib, se_dev_attrib);
#define TB_DEV_ATTR(_backend, _name, _mode) \
static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_backend##_dev_show_attr_##_name, \
_backend##_dev_store_attr_##_name);
#define TB_DEV_ATTR_RO(_backend, _name) \
static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
__CONFIGFS_EATTR_RO(_name, \
_backend##_dev_show_attr_##_name);
DEF_TB_DEV_ATTRIB(target_core, emulate_model_alias);
DEF_TB_DEV_ATTRIB(target_core, emulate_dpo);
DEF_TB_DEV_ATTRIB(target_core, emulate_fua_write);
DEF_TB_DEV_ATTRIB(target_core, emulate_fua_read);
DEF_TB_DEV_ATTRIB(target_core, emulate_write_cache);
DEF_TB_DEV_ATTRIB(target_core, emulate_ua_intlck_ctrl);
DEF_TB_DEV_ATTRIB(target_core, emulate_tas);
DEF_TB_DEV_ATTRIB(target_core, emulate_tpu);
DEF_TB_DEV_ATTRIB(target_core, emulate_tpws);
DEF_TB_DEV_ATTRIB(target_core, emulate_caw);
DEF_TB_DEV_ATTRIB(target_core, emulate_3pc);
DEF_TB_DEV_ATTRIB(target_core, pi_prot_type);
DEF_TB_DEV_ATTRIB_RO(target_core, hw_pi_prot_type);
DEF_TB_DEV_ATTRIB(target_core, pi_prot_format);
DEF_TB_DEV_ATTRIB(target_core, enforce_pr_isids);
DEF_TB_DEV_ATTRIB(target_core, is_nonrot);
DEF_TB_DEV_ATTRIB(target_core, emulate_rest_reord);
DEF_TB_DEV_ATTRIB(target_core, force_pr_aptpl);
DEF_TB_DEV_ATTRIB_RO(target_core, hw_block_size);
DEF_TB_DEV_ATTRIB(target_core, block_size);
DEF_TB_DEV_ATTRIB_RO(target_core, hw_max_sectors);
DEF_TB_DEV_ATTRIB(target_core, optimal_sectors);
DEF_TB_DEV_ATTRIB_RO(target_core, hw_queue_depth);
DEF_TB_DEV_ATTRIB(target_core, queue_depth);
DEF_TB_DEV_ATTRIB(target_core, max_unmap_lba_count);
DEF_TB_DEV_ATTRIB(target_core, max_unmap_block_desc_count);
DEF_TB_DEV_ATTRIB(target_core, unmap_granularity);
DEF_TB_DEV_ATTRIB(target_core, unmap_granularity_alignment);
DEF_TB_DEV_ATTRIB(target_core, max_write_same_len);
TB_DEV_ATTR(target_core, emulate_model_alias, S_IRUGO | S_IWUSR);
TB_DEV_ATTR(target_core, emulate_dpo, S_IRUGO | S_IWUSR);
TB_DEV_ATTR(target_core, emulate_fua_write, S_IRUGO | S_IWUSR);
TB_DEV_ATTR(target_core, emulate_fua_read, S_IRUGO | S_IWUSR);
TB_DEV_ATTR(target_core, emulate_write_cache, S_IRUGO | S_IWUSR);
TB_DEV_ATTR(target_core, emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR);
TB_DEV_ATTR(target_core, emulate_tas, S_IRUGO | S_IWUSR);
TB_DEV_ATTR(target_core, emulate_tpu, S_IRUGO | S_IWUSR);
TB_DEV_ATTR(target_core, emulate_tpws, S_IRUGO | S_IWUSR);
TB_DEV_ATTR(target_core, emulate_caw, S_IRUGO | S_IWUSR);
TB_DEV_ATTR(target_core, emulate_3pc, S_IRUGO | S_IWUSR);
TB_DEV_ATTR(target_core, pi_prot_type, S_IRUGO | S_IWUSR);
TB_DEV_ATTR_RO(target_core, hw_pi_prot_type);
TB_DEV_ATTR(target_core, pi_prot_format, S_IRUGO | S_IWUSR);
TB_DEV_ATTR(target_core, enforce_pr_isids, S_IRUGO | S_IWUSR);
TB_DEV_ATTR(target_core, is_nonrot, S_IRUGO | S_IWUSR);
TB_DEV_ATTR(target_core, emulate_rest_reord, S_IRUGO | S_IWUSR);
TB_DEV_ATTR(target_core, force_pr_aptpl, S_IRUGO | S_IWUSR)
TB_DEV_ATTR_RO(target_core, hw_block_size);
TB_DEV_ATTR(target_core, block_size, S_IRUGO | S_IWUSR)
TB_DEV_ATTR_RO(target_core, hw_max_sectors);
TB_DEV_ATTR(target_core, optimal_sectors, S_IRUGO | S_IWUSR);
TB_DEV_ATTR_RO(target_core, hw_queue_depth);
TB_DEV_ATTR(target_core, queue_depth, S_IRUGO | S_IWUSR);
TB_DEV_ATTR(target_core, max_unmap_lba_count, S_IRUGO | S_IWUSR);
TB_DEV_ATTR(target_core, max_unmap_block_desc_count, S_IRUGO | S_IWUSR);
TB_DEV_ATTR(target_core, unmap_granularity, S_IRUGO | S_IWUSR);
TB_DEV_ATTR(target_core, unmap_granularity_alignment, S_IRUGO | S_IWUSR);
TB_DEV_ATTR(target_core, max_write_same_len, S_IRUGO | S_IWUSR);
CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
/*
* dev_attrib attributes for devices using the target core SBC/SPC
* interpreter. Any backend using spc_parse_cdb should be using
* these.
*/
struct configfs_attribute *sbc_attrib_attrs[] = {
&target_core_dev_attrib_emulate_model_alias.attr,
&target_core_dev_attrib_emulate_dpo.attr,
&target_core_dev_attrib_emulate_fua_write.attr,
&target_core_dev_attrib_emulate_fua_read.attr,
&target_core_dev_attrib_emulate_write_cache.attr,
&target_core_dev_attrib_emulate_ua_intlck_ctrl.attr,
&target_core_dev_attrib_emulate_tas.attr,
&target_core_dev_attrib_emulate_tpu.attr,
&target_core_dev_attrib_emulate_tpws.attr,
&target_core_dev_attrib_emulate_caw.attr,
&target_core_dev_attrib_emulate_3pc.attr,
&target_core_dev_attrib_pi_prot_type.attr,
&target_core_dev_attrib_hw_pi_prot_type.attr,
&target_core_dev_attrib_pi_prot_format.attr,
&target_core_dev_attrib_enforce_pr_isids.attr,
&target_core_dev_attrib_is_nonrot.attr,
&target_core_dev_attrib_emulate_rest_reord.attr,
&target_core_dev_attrib_force_pr_aptpl.attr,
&target_core_dev_attrib_hw_block_size.attr,
&target_core_dev_attrib_block_size.attr,
&target_core_dev_attrib_hw_max_sectors.attr,
&target_core_dev_attrib_optimal_sectors.attr,
&target_core_dev_attrib_hw_queue_depth.attr,
&target_core_dev_attrib_queue_depth.attr,
&target_core_dev_attrib_max_unmap_lba_count.attr,
&target_core_dev_attrib_max_unmap_block_desc_count.attr,
&target_core_dev_attrib_unmap_granularity.attr,
&target_core_dev_attrib_unmap_granularity_alignment.attr,
&target_core_dev_attrib_max_write_same_len.attr,
NULL,
};
EXPORT_SYMBOL(sbc_attrib_attrs);
DEF_TB_DEV_ATTRIB_RO(target_pt, hw_pi_prot_type);
DEF_TB_DEV_ATTRIB_RO(target_pt, hw_block_size);
DEF_TB_DEV_ATTRIB_RO(target_pt, hw_max_sectors);
DEF_TB_DEV_ATTRIB_RO(target_pt, hw_queue_depth);
TB_DEV_ATTR_RO(target_pt, hw_pi_prot_type);
TB_DEV_ATTR_RO(target_pt, hw_block_size);
TB_DEV_ATTR_RO(target_pt, hw_max_sectors);
TB_DEV_ATTR_RO(target_pt, hw_queue_depth);
/*
* Minimal dev_attrib attributes for devices passing through CDBs.
* In this case we only provide a few read-only attributes for
* backwards compatibility.
*/
struct configfs_attribute *passthrough_attrib_attrs[] = {
&target_pt_dev_attrib_hw_pi_prot_type.attr,
&target_pt_dev_attrib_hw_block_size.attr,
&target_pt_dev_attrib_hw_max_sectors.attr,
&target_pt_dev_attrib_hw_queue_depth.attr,
NULL,
};
EXPORT_SYMBOL(passthrough_attrib_attrs);
static struct configfs_item_operations target_core_dev_attrib_ops = {
.show_attribute = target_core_dev_attrib_attr_show,
.store_attribute = target_core_dev_attrib_attr_store,
......
......@@ -37,7 +37,6 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_backend_configfs.h>
#include "target_core_file.h"
......@@ -842,41 +841,6 @@ fd_parse_cdb(struct se_cmd *cmd)
return sbc_parse_cdb(cmd, &fd_sbc_ops);
}
DEF_TB_DEFAULT_ATTRIBS(fileio);
static struct configfs_attribute *fileio_backend_dev_attrs[] = {
&fileio_dev_attrib_emulate_model_alias.attr,
&fileio_dev_attrib_emulate_dpo.attr,
&fileio_dev_attrib_emulate_fua_write.attr,
&fileio_dev_attrib_emulate_fua_read.attr,
&fileio_dev_attrib_emulate_write_cache.attr,
&fileio_dev_attrib_emulate_ua_intlck_ctrl.attr,
&fileio_dev_attrib_emulate_tas.attr,
&fileio_dev_attrib_emulate_tpu.attr,
&fileio_dev_attrib_emulate_tpws.attr,
&fileio_dev_attrib_emulate_caw.attr,
&fileio_dev_attrib_emulate_3pc.attr,
&fileio_dev_attrib_pi_prot_type.attr,
&fileio_dev_attrib_hw_pi_prot_type.attr,
&fileio_dev_attrib_pi_prot_format.attr,
&fileio_dev_attrib_enforce_pr_isids.attr,
&fileio_dev_attrib_is_nonrot.attr,
&fileio_dev_attrib_emulate_rest_reord.attr,
&fileio_dev_attrib_force_pr_aptpl.attr,
&fileio_dev_attrib_hw_block_size.attr,
&fileio_dev_attrib_block_size.attr,
&fileio_dev_attrib_hw_max_sectors.attr,
&fileio_dev_attrib_optimal_sectors.attr,
&fileio_dev_attrib_hw_queue_depth.attr,
&fileio_dev_attrib_queue_depth.attr,
&fileio_dev_attrib_max_unmap_lba_count.attr,
&fileio_dev_attrib_max_unmap_block_desc_count.attr,
&fileio_dev_attrib_unmap_granularity.attr,
&fileio_dev_attrib_unmap_granularity_alignment.attr,
&fileio_dev_attrib_max_write_same_len.attr,
NULL,
};
static const struct target_backend_ops fileio_ops = {
.name = "fileio",
.inquiry_prod = "FILEIO",
......@@ -895,7 +859,7 @@ static const struct target_backend_ops fileio_ops = {
.init_prot = fd_init_prot,
.format_prot = fd_format_prot,
.free_prot = fd_free_prot,
.tb_dev_attrib_attrs = fileio_backend_dev_attrs,
.tb_dev_attrib_attrs = sbc_attrib_attrs,
};
static int __init fileio_module_init(void)
......
......@@ -41,7 +41,6 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_backend_configfs.h>
#include "target_core_iblock.h"
......@@ -858,41 +857,6 @@ static bool iblock_get_write_cache(struct se_device *dev)
return q->flush_flags & REQ_FLUSH;
}
DEF_TB_DEFAULT_ATTRIBS(iblock);
static struct configfs_attribute *iblock_backend_dev_attrs[] = {
&iblock_dev_attrib_emulate_model_alias.attr,
&iblock_dev_attrib_emulate_dpo.attr,
&iblock_dev_attrib_emulate_fua_write.attr,
&iblock_dev_attrib_emulate_fua_read.attr,
&iblock_dev_attrib_emulate_write_cache.attr,
&iblock_dev_attrib_emulate_ua_intlck_ctrl.attr,
&iblock_dev_attrib_emulate_tas.attr,
&iblock_dev_attrib_emulate_tpu.attr,
&iblock_dev_attrib_emulate_tpws.attr,
&iblock_dev_attrib_emulate_caw.attr,
&iblock_dev_attrib_emulate_3pc.attr,
&iblock_dev_attrib_pi_prot_type.attr,
&iblock_dev_attrib_hw_pi_prot_type.attr,
&iblock_dev_attrib_pi_prot_format.attr,
&iblock_dev_attrib_enforce_pr_isids.attr,
&iblock_dev_attrib_is_nonrot.attr,
&iblock_dev_attrib_emulate_rest_reord.attr,
&iblock_dev_attrib_force_pr_aptpl.attr,
&iblock_dev_attrib_hw_block_size.attr,
&iblock_dev_attrib_block_size.attr,
&iblock_dev_attrib_hw_max_sectors.attr,
&iblock_dev_attrib_optimal_sectors.attr,
&iblock_dev_attrib_hw_queue_depth.attr,
&iblock_dev_attrib_queue_depth.attr,
&iblock_dev_attrib_max_unmap_lba_count.attr,
&iblock_dev_attrib_max_unmap_block_desc_count.attr,
&iblock_dev_attrib_unmap_granularity.attr,
&iblock_dev_attrib_unmap_granularity_alignment.attr,
&iblock_dev_attrib_max_write_same_len.attr,
NULL,
};
static const struct target_backend_ops iblock_ops = {
.name = "iblock",
.inquiry_prod = "IBLOCK",
......@@ -913,7 +877,7 @@ static const struct target_backend_ops iblock_ops = {
.get_io_min = iblock_get_io_min,
.get_io_opt = iblock_get_io_opt,
.get_write_cache = iblock_get_write_cache,
.tb_dev_attrib_attrs = iblock_backend_dev_attrs,
.tb_dev_attrib_attrs = sbc_attrib_attrs,
};
static int __init iblock_module_init(void)
......
......@@ -44,7 +44,6 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_backend_configfs.h>
#include "target_core_alua.h"
#include "target_core_internal.h"
......@@ -1119,26 +1118,6 @@ static void pscsi_req_done(struct request *req, int uptodate)
kfree(pt);
}
DEF_TB_DEV_ATTRIB_RO(pscsi, hw_pi_prot_type);
TB_DEV_ATTR_RO(pscsi, hw_pi_prot_type);
DEF_TB_DEV_ATTRIB_RO(pscsi, hw_block_size);
TB_DEV_ATTR_RO(pscsi, hw_block_size);
DEF_TB_DEV_ATTRIB_RO(pscsi, hw_max_sectors);
TB_DEV_ATTR_RO(pscsi, hw_max_sectors);
DEF_TB_DEV_ATTRIB_RO(pscsi, hw_queue_depth);
TB_DEV_ATTR_RO(pscsi, hw_queue_depth);
static struct configfs_attribute *pscsi_backend_dev_attrs[] = {
&pscsi_dev_attrib_hw_pi_prot_type.attr,
&pscsi_dev_attrib_hw_block_size.attr,
&pscsi_dev_attrib_hw_max_sectors.attr,
&pscsi_dev_attrib_hw_queue_depth.attr,
NULL,
};
static const struct target_backend_ops pscsi_ops = {
.name = "pscsi",
.owner = THIS_MODULE,
......@@ -1155,7 +1134,7 @@ static const struct target_backend_ops pscsi_ops = {
.show_configfs_dev_params = pscsi_show_configfs_dev_params,
.get_device_type = pscsi_get_device_type,
.get_blocks = pscsi_get_blocks,
.tb_dev_attrib_attrs = pscsi_backend_dev_attrs,
.tb_dev_attrib_attrs = passthrough_attrib_attrs,
};
static int __init pscsi_module_init(void)
......
......@@ -34,7 +34,6 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_backend_configfs.h>
#include "target_core_rd.h"
......@@ -696,41 +695,6 @@ rd_parse_cdb(struct se_cmd *cmd)
return sbc_parse_cdb(cmd, &rd_sbc_ops);
}
DEF_TB_DEFAULT_ATTRIBS(rd_mcp);
static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
&rd_mcp_dev_attrib_emulate_model_alias.attr,
&rd_mcp_dev_attrib_emulate_dpo.attr,
&rd_mcp_dev_attrib_emulate_fua_write.attr,
&rd_mcp_dev_attrib_emulate_fua_read.attr,
&rd_mcp_dev_attrib_emulate_write_cache.attr,
&rd_mcp_dev_attrib_emulate_ua_intlck_ctrl.attr,
&rd_mcp_dev_attrib_emulate_tas.attr,
&rd_mcp_dev_attrib_emulate_tpu.attr,
&rd_mcp_dev_attrib_emulate_tpws.attr,
&rd_mcp_dev_attrib_emulate_caw.attr,
&rd_mcp_dev_attrib_emulate_3pc.attr,
&rd_mcp_dev_attrib_pi_prot_type.attr,
&rd_mcp_dev_attrib_hw_pi_prot_type.attr,
&rd_mcp_dev_attrib_pi_prot_format.attr,
&rd_mcp_dev_attrib_enforce_pr_isids.attr,
&rd_mcp_dev_attrib_is_nonrot.attr,
&rd_mcp_dev_attrib_emulate_rest_reord.attr,
&rd_mcp_dev_attrib_force_pr_aptpl.attr,
&rd_mcp_dev_attrib_hw_block_size.attr,
&rd_mcp_dev_attrib_block_size.attr,
&rd_mcp_dev_attrib_hw_max_sectors.attr,
&rd_mcp_dev_attrib_optimal_sectors.attr,
&rd_mcp_dev_attrib_hw_queue_depth.attr,
&rd_mcp_dev_attrib_queue_depth.attr,
&rd_mcp_dev_attrib_max_unmap_lba_count.attr,
&rd_mcp_dev_attrib_max_unmap_block_desc_count.attr,
&rd_mcp_dev_attrib_unmap_granularity.attr,
&rd_mcp_dev_attrib_unmap_granularity_alignment.attr,
&rd_mcp_dev_attrib_max_write_same_len.attr,
NULL,
};
static const struct target_backend_ops rd_mcp_ops = {
.name = "rd_mcp",
.inquiry_prod = "RAMDISK-MCP",
......@@ -747,7 +711,7 @@ static const struct target_backend_ops rd_mcp_ops = {
.get_blocks = rd_get_blocks,
.init_prot = rd_init_prot,
.free_prot = rd_free_prot,
.tb_dev_attrib_attrs = rd_mcp_backend_dev_attrs,
.tb_dev_attrib_attrs = sbc_attrib_attrs,
};
int __init rd_module_init(void)
......
......@@ -29,7 +29,6 @@
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_backend.h>
#include <target/target_core_backend_configfs.h>
#include <linux/target_core_user.h>
......@@ -1095,26 +1094,6 @@ tcmu_parse_cdb(struct se_cmd *cmd)
return passthrough_parse_cdb(cmd, tcmu_pass_op);
}
DEF_TB_DEV_ATTRIB_RO(tcmu, hw_pi_prot_type);
TB_DEV_ATTR_RO(tcmu, hw_pi_prot_type);
DEF_TB_DEV_ATTRIB_RO(tcmu, hw_block_size);
TB_DEV_ATTR_RO(tcmu, hw_block_size);
DEF_TB_DEV_ATTRIB_RO(tcmu, hw_max_sectors);
TB_DEV_ATTR_RO(tcmu, hw_max_sectors);
DEF_TB_DEV_ATTRIB_RO(tcmu, hw_queue_depth);
TB_DEV_ATTR_RO(tcmu, hw_queue_depth);
static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
&tcmu_dev_attrib_hw_pi_prot_type.attr,
&tcmu_dev_attrib_hw_block_size.attr,
&tcmu_dev_attrib_hw_max_sectors.attr,
&tcmu_dev_attrib_hw_queue_depth.attr,
NULL,
};
static const struct target_backend_ops tcmu_ops = {
.name = "user",
.inquiry_prod = "USER",
......@@ -1131,7 +1110,7 @@ static const struct target_backend_ops tcmu_ops = {
.show_configfs_dev_params = tcmu_show_configfs_dev_params,
.get_device_type = sbc_get_device_type,
.get_blocks = tcmu_get_blocks,
.tb_dev_attrib_attrs = tcmu_backend_dev_attrs,
.tb_dev_attrib_attrs = passthrough_attrib_attrs,
};
static int __init tcmu_module_init(void)
......
......@@ -82,6 +82,9 @@ int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);
int transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
extern struct configfs_attribute *sbc_attrib_attrs[];
extern struct configfs_attribute *passthrough_attrib_attrs[];
/* core helpers also used by command snooping in pscsi */
void *transport_kmap_data_sg(struct se_cmd *);
void transport_kunmap_data_sg(struct se_cmd *);
......
#ifndef TARGET_CORE_BACKEND_CONFIGFS_H
#define TARGET_CORE_BACKEND_CONFIGFS_H
#include <target/configfs_macros.h>
#define DEF_TB_DEV_ATTRIB_SHOW(_backend, _name) \
static ssize_t _backend##_dev_show_attr_##_name( \
struct se_dev_attrib *da, \
char *page) \
{ \
return snprintf(page, PAGE_SIZE, "%u\n", \
(u32)da->da_dev->dev_attrib._name); \
}
#define DEF_TB_DEV_ATTRIB_STORE(_backend, _name) \
static ssize_t _backend##_dev_store_attr_##_name( \
struct se_dev_attrib *da, \
const char *page, \
size_t count) \
{ \
unsigned long val; \
int ret; \
\
ret = kstrtoul(page, 0, &val); \
if (ret < 0) { \
pr_err("kstrtoul() failed with ret: %d\n", ret); \
return -EINVAL; \
} \
ret = se_dev_set_##_name(da->da_dev, (u32)val); \
\
return (!ret) ? count : -EINVAL; \
}
#define DEF_TB_DEV_ATTRIB(_backend, _name) \
DEF_TB_DEV_ATTRIB_SHOW(_backend, _name); \
DEF_TB_DEV_ATTRIB_STORE(_backend, _name);
#define DEF_TB_DEV_ATTRIB_RO(_backend, name) \
DEF_TB_DEV_ATTRIB_SHOW(_backend, name);
CONFIGFS_EATTR_STRUCT(target_backend_dev_attrib, se_dev_attrib);
#define TB_DEV_ATTR(_backend, _name, _mode) \
static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_backend##_dev_show_attr_##_name, \
_backend##_dev_store_attr_##_name);
#define TB_DEV_ATTR_RO(_backend, _name) \
static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
__CONFIGFS_EATTR_RO(_name, \
_backend##_dev_show_attr_##_name);
/*
* Default list of target backend device attributes as defined by
* struct se_dev_attrib
*/
#define DEF_TB_DEFAULT_ATTRIBS(_backend) \
DEF_TB_DEV_ATTRIB(_backend, emulate_model_alias); \
TB_DEV_ATTR(_backend, emulate_model_alias, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, emulate_dpo); \
TB_DEV_ATTR(_backend, emulate_dpo, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, emulate_fua_write); \
TB_DEV_ATTR(_backend, emulate_fua_write, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, emulate_fua_read); \
TB_DEV_ATTR(_backend, emulate_fua_read, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, emulate_write_cache); \
TB_DEV_ATTR(_backend, emulate_write_cache, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, emulate_ua_intlck_ctrl); \
TB_DEV_ATTR(_backend, emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, emulate_tas); \
TB_DEV_ATTR(_backend, emulate_tas, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, emulate_tpu); \
TB_DEV_ATTR(_backend, emulate_tpu, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, emulate_tpws); \
TB_DEV_ATTR(_backend, emulate_tpws, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, emulate_caw); \
TB_DEV_ATTR(_backend, emulate_caw, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, emulate_3pc); \
TB_DEV_ATTR(_backend, emulate_3pc, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, pi_prot_type); \
TB_DEV_ATTR(_backend, pi_prot_type, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB_RO(_backend, hw_pi_prot_type); \
TB_DEV_ATTR_RO(_backend, hw_pi_prot_type); \
DEF_TB_DEV_ATTRIB(_backend, pi_prot_format); \
TB_DEV_ATTR(_backend, pi_prot_format, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, enforce_pr_isids); \
TB_DEV_ATTR(_backend, enforce_pr_isids, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, is_nonrot); \
TB_DEV_ATTR(_backend, is_nonrot, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, emulate_rest_reord); \
TB_DEV_ATTR(_backend, emulate_rest_reord, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, force_pr_aptpl); \
TB_DEV_ATTR(_backend, force_pr_aptpl, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB_RO(_backend, hw_block_size); \
TB_DEV_ATTR_RO(_backend, hw_block_size); \
DEF_TB_DEV_ATTRIB(_backend, block_size); \
TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \
TB_DEV_ATTR_RO(_backend, hw_max_sectors); \
DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \
TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \
TB_DEV_ATTR_RO(_backend, hw_queue_depth); \
DEF_TB_DEV_ATTRIB(_backend, queue_depth); \
TB_DEV_ATTR(_backend, queue_depth, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, max_unmap_lba_count); \
TB_DEV_ATTR(_backend, max_unmap_lba_count, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, max_unmap_block_desc_count); \
TB_DEV_ATTR(_backend, max_unmap_block_desc_count, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, unmap_granularity); \
TB_DEV_ATTR(_backend, unmap_granularity, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, unmap_granularity_alignment); \
TB_DEV_ATTR(_backend, unmap_granularity_alignment, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, max_write_same_len); \
TB_DEV_ATTR(_backend, max_write_same_len, S_IRUGO | S_IWUSR);
#endif /* TARGET_CORE_BACKEND_CONFIGFS_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment