Commit c9193f48 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.17/drivers-2022-01-11' of git://git.kernel.dk/linux-block

Pull block driver updates from Jens Axboe:

 - mtip32xx pci cleanups (Bjorn)

 - mtip32xx conversion to generic power management (Vaibhav)

 - rsxx pci powermanagement cleanups (Bjorn)

 - Remove the rsxx driver. This hardware never saw much adoption, and
   it's been end of lifed for a while. (Christoph)

 - MD pull request from Song:
      - REQ_NOWAIT support (Vishal Verma)
      - raid6 benchmark optimization (Dirk Müller)
      - Fix for acct bioset (Xiao Ni)
      - Clean up max_queued_requests (Mariusz Tkaczyk)
      - PREEMPT_RT optimization (Davidlohr Bueso)
      - Use default_groups in kobj_type (Greg Kroah-Hartman)

 - Use attribute groups in pktcdvd and rnbd (Greg)

 - NVMe pull request from Christoph:
      - increment request genctr on completion (Keith Busch, Geliang
        Tang)
      - add a 'iopolicy' module parameter (Hannes Reinecke)
      - print out valid arguments when reading from /dev/nvme-fabrics
        (Hannes Reinecke)

 - Use struct_group() in drbd (Kees)

 - null_blk fixes (Ming)

 - Get rid of congestion logic in pktcdvd (Neil)

 - Floppy ejection hang fix (Tasos)

 - Floppy max user request size fix (Xiongwei)

 - Loop locking fix (Tetsuo)

* tag 'for-5.17/drivers-2022-01-11' of git://git.kernel.dk/linux-block: (32 commits)
  md: use default_groups in kobj_type
  md: Move alloc/free acct bioset in to personality
  lib/raid6: Use strict priority ranking for pq gen() benchmarking
  lib/raid6: skip benchmark of non-chosen xor_syndrome functions
  md: fix spelling of "its"
  md: raid456 add nowait support
  md: raid10 add nowait support
  md: raid1 add nowait support
  md: add support for REQ_NOWAIT
  md: drop queue limitation for RAID1 and RAID10
  md/raid5: play nice with PREEMPT_RT
  block/rnbd-clt-sysfs: use default_groups in kobj_type
  pktcdvd: convert to use attribute groups
  block: null_blk: only set set->nr_maps as 3 if active poll_queues is > 0
  nvme: add 'iopolicy' module parameter
  nvme: drop unused variable ctrl in nvme_setup_cmd
  nvme: increment request genctr on completion
  nvme-fabrics: print out valid arguments when reading from /dev/nvme-fabrics
  block: remove the rsxx driver
  rsxx: Drop PCI legacy power management
  ...
parents d3c81080 d85bd823
...@@ -7489,12 +7489,6 @@ F: Documentation/firmware_class/ ...@@ -7489,12 +7489,6 @@ F: Documentation/firmware_class/
F: drivers/base/firmware_loader/ F: drivers/base/firmware_loader/
F: include/linux/firmware.h F: include/linux/firmware.h
FLASH ADAPTER DRIVER (IBM Flash Adapter 900GB Full Height PCI Flash Card)
M: Joshua Morris <josh.h.morris@us.ibm.com>
M: Philip Kelleher <pjk1939@linux.ibm.com>
S: Maintained
F: drivers/block/rsxx/
FLEXTIMER FTM-QUADDEC DRIVER FLEXTIMER FTM-QUADDEC DRIVER
M: Patrick Havelange <patrick.havelange@essensium.com> M: Patrick Havelange <patrick.havelange@essensium.com>
L: linux-iio@vger.kernel.org L: linux-iio@vger.kernel.org
......
...@@ -392,17 +392,6 @@ config BLK_DEV_RBD ...@@ -392,17 +392,6 @@ config BLK_DEV_RBD
If unsure, say N. If unsure, say N.
config BLK_DEV_RSXX
tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
depends on PCI
select CRC32
help
Device driver for IBM's high speed PCIe SSD
storage device: Flash Adapter 900GB Full Height.
To compile this driver as a module, choose M here: the
module will be called rsxx.
source "drivers/block/rnbd/Kconfig" source "drivers/block/rnbd/Kconfig"
endif # BLK_DEV endif # BLK_DEV
...@@ -34,7 +34,6 @@ obj-$(CONFIG_BLK_DEV_DRBD) += drbd/ ...@@ -34,7 +34,6 @@ obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
obj-$(CONFIG_BLK_DEV_RBD) += rbd.o obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/ obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
obj-$(CONFIG_ZRAM) += zram/ obj-$(CONFIG_ZRAM) += zram/
obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/ obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
......
...@@ -729,7 +729,8 @@ int drbd_send_sync_param(struct drbd_peer_device *peer_device) ...@@ -729,7 +729,8 @@ int drbd_send_sync_param(struct drbd_peer_device *peer_device)
cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM; cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
/* initialize verify_alg and csums_alg */ /* initialize verify_alg and csums_alg */
memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); BUILD_BUG_ON(sizeof(p->algs) != 2 * SHARED_SECRET_MAX);
memset(&p->algs, 0, sizeof(p->algs));
if (get_ldev(peer_device->device)) { if (get_ldev(peer_device->device)) {
dc = rcu_dereference(peer_device->device->ldev->disk_conf); dc = rcu_dereference(peer_device->device->ldev->disk_conf);
......
...@@ -283,8 +283,10 @@ struct p_rs_param_89 { ...@@ -283,8 +283,10 @@ struct p_rs_param_89 {
struct p_rs_param_95 { struct p_rs_param_95 {
u32 resync_rate; u32 resync_rate;
char verify_alg[SHARED_SECRET_MAX]; struct_group(algs,
char csums_alg[SHARED_SECRET_MAX]; char verify_alg[SHARED_SECRET_MAX];
char csums_alg[SHARED_SECRET_MAX];
);
u32 c_plan_ahead; u32 c_plan_ahead;
u32 c_delay_target; u32 c_delay_target;
u32 c_fill_target; u32 c_fill_target;
......
...@@ -3921,7 +3921,8 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i ...@@ -3921,7 +3921,8 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
/* initialize verify_alg and csums_alg */ /* initialize verify_alg and csums_alg */
p = pi->data; p = pi->data;
memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); BUILD_BUG_ON(sizeof(p->algs) != 2 * SHARED_SECRET_MAX);
memset(&p->algs, 0, sizeof(p->algs));
err = drbd_recv_all(peer_device->connection, p, header_size); err = drbd_recv_all(peer_device->connection, p, header_size);
if (err) if (err)
......
...@@ -1015,7 +1015,7 @@ static DECLARE_DELAYED_WORK(fd_timer, fd_timer_workfn); ...@@ -1015,7 +1015,7 @@ static DECLARE_DELAYED_WORK(fd_timer, fd_timer_workfn);
static void cancel_activity(void) static void cancel_activity(void)
{ {
do_floppy = NULL; do_floppy = NULL;
cancel_delayed_work_sync(&fd_timer); cancel_delayed_work(&fd_timer);
cancel_work_sync(&floppy_work); cancel_work_sync(&floppy_work);
} }
...@@ -3081,6 +3081,8 @@ static void raw_cmd_free(struct floppy_raw_cmd **ptr) ...@@ -3081,6 +3081,8 @@ static void raw_cmd_free(struct floppy_raw_cmd **ptr)
} }
} }
#define MAX_LEN (1UL << MAX_ORDER << PAGE_SHIFT)
static int raw_cmd_copyin(int cmd, void __user *param, static int raw_cmd_copyin(int cmd, void __user *param,
struct floppy_raw_cmd **rcmd) struct floppy_raw_cmd **rcmd)
{ {
...@@ -3108,7 +3110,7 @@ static int raw_cmd_copyin(int cmd, void __user *param, ...@@ -3108,7 +3110,7 @@ static int raw_cmd_copyin(int cmd, void __user *param,
ptr->resultcode = 0; ptr->resultcode = 0;
if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) { if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
if (ptr->length <= 0) if (ptr->length <= 0 || ptr->length >= MAX_LEN)
return -EINVAL; return -EINVAL;
ptr->kernel_data = (char *)fd_dma_mem_alloc(ptr->length); ptr->kernel_data = (char *)fd_dma_mem_alloc(ptr->length);
fallback_on_nodma_alloc(&ptr->kernel_data, ptr->length); fallback_on_nodma_alloc(&ptr->kernel_data, ptr->length);
......
...@@ -1082,13 +1082,10 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, ...@@ -1082,13 +1082,10 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
return error; return error;
} }
static int __loop_clr_fd(struct loop_device *lo, bool release) static void __loop_clr_fd(struct loop_device *lo)
{ {
struct file *filp = NULL; struct file *filp;
gfp_t gfp = lo->old_gfp_mask; gfp_t gfp = lo->old_gfp_mask;
int err = 0;
bool partscan = false;
int lo_number;
struct loop_worker *pos, *worker; struct loop_worker *pos, *worker;
/* /*
...@@ -1103,17 +1100,14 @@ static int __loop_clr_fd(struct loop_device *lo, bool release) ...@@ -1103,17 +1100,14 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
* became visible. * became visible.
*/ */
/*
* Since this function is called upon "ioctl(LOOP_CLR_FD)" xor "close()
* after ioctl(LOOP_CLR_FD)", it is a sign of something going wrong if
* lo->lo_state has changed while waiting for lo->lo_mutex.
*/
mutex_lock(&lo->lo_mutex); mutex_lock(&lo->lo_mutex);
if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) { BUG_ON(lo->lo_state != Lo_rundown);
err = -ENXIO; mutex_unlock(&lo->lo_mutex);
goto out_unlock;
}
filp = lo->lo_backing_file;
if (filp == NULL) {
err = -EINVAL;
goto out_unlock;
}
if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags)) if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags))
blk_queue_write_cache(lo->lo_queue, false, false); blk_queue_write_cache(lo->lo_queue, false, false);
...@@ -1134,6 +1128,7 @@ static int __loop_clr_fd(struct loop_device *lo, bool release) ...@@ -1134,6 +1128,7 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
del_timer_sync(&lo->timer); del_timer_sync(&lo->timer);
spin_lock_irq(&lo->lo_lock); spin_lock_irq(&lo->lo_lock);
filp = lo->lo_backing_file;
lo->lo_backing_file = NULL; lo->lo_backing_file = NULL;
spin_unlock_irq(&lo->lo_lock); spin_unlock_irq(&lo->lo_lock);
...@@ -1149,60 +1144,59 @@ static int __loop_clr_fd(struct loop_device *lo, bool release) ...@@ -1149,60 +1144,59 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
/* let user-space know about this change */ /* let user-space know about this change */
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
mapping_set_gfp_mask(filp->f_mapping, gfp); mapping_set_gfp_mask(filp->f_mapping, gfp);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
blk_mq_unfreeze_queue(lo->lo_queue); blk_mq_unfreeze_queue(lo->lo_queue);
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
lo_number = lo->lo_number;
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE); disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
out_unlock:
mutex_unlock(&lo->lo_mutex); if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
if (partscan) { int err;
/*
* open_mutex has been held already in release path, so don't mutex_lock(&lo->lo_disk->open_mutex);
* acquire it if this function is called in such case.
*
* If the reread partition isn't from release path, lo_refcnt
* must be at least one and it can only become zero when the
* current holder is released.
*/
if (!release)
mutex_lock(&lo->lo_disk->open_mutex);
err = bdev_disk_changed(lo->lo_disk, false); err = bdev_disk_changed(lo->lo_disk, false);
if (!release) mutex_unlock(&lo->lo_disk->open_mutex);
mutex_unlock(&lo->lo_disk->open_mutex);
if (err) if (err)
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n", pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
__func__, lo_number, err); __func__, lo->lo_number, err);
/* Device is gone, no point in returning error */ /* Device is gone, no point in returning error */
err = 0;
} }
/*
* lo->lo_state is set to Lo_unbound here after above partscan has
* finished.
*
* There cannot be anybody else entering __loop_clr_fd() as
* lo->lo_backing_file is already cleared and Lo_rundown state
* protects us from all the other places trying to change the 'lo'
* device.
*/
mutex_lock(&lo->lo_mutex);
lo->lo_flags = 0; lo->lo_flags = 0;
if (!part_shift) if (!part_shift)
lo->lo_disk->flags |= GENHD_FL_NO_PART; lo->lo_disk->flags |= GENHD_FL_NO_PART;
fput(filp);
}
static void loop_rundown_completed(struct loop_device *lo)
{
mutex_lock(&lo->lo_mutex);
lo->lo_state = Lo_unbound; lo->lo_state = Lo_unbound;
mutex_unlock(&lo->lo_mutex); mutex_unlock(&lo->lo_mutex);
module_put(THIS_MODULE);
}
/* static void loop_rundown_workfn(struct work_struct *work)
* Need not hold lo_mutex to fput backing file. Calling fput holding {
* lo_mutex triggers a circular lock dependency possibility warning as struct loop_device *lo = container_of(work, struct loop_device,
* fput can take open_mutex which is usually taken before lo_mutex. rundown_work);
*/ struct block_device *bdev = lo->lo_device;
if (filp) struct gendisk *disk = lo->lo_disk;
fput(filp);
return err; __loop_clr_fd(lo);
kobject_put(&bdev->bd_device.kobj);
module_put(disk->fops->owner);
loop_rundown_completed(lo);
}
static void loop_schedule_rundown(struct loop_device *lo)
{
struct block_device *bdev = lo->lo_device;
struct gendisk *disk = lo->lo_disk;
__module_get(disk->fops->owner);
kobject_get(&bdev->bd_device.kobj);
INIT_WORK(&lo->rundown_work, loop_rundown_workfn);
queue_work(system_long_wq, &lo->rundown_work);
} }
static int loop_clr_fd(struct loop_device *lo) static int loop_clr_fd(struct loop_device *lo)
...@@ -1234,7 +1228,9 @@ static int loop_clr_fd(struct loop_device *lo) ...@@ -1234,7 +1228,9 @@ static int loop_clr_fd(struct loop_device *lo)
lo->lo_state = Lo_rundown; lo->lo_state = Lo_rundown;
mutex_unlock(&lo->lo_mutex); mutex_unlock(&lo->lo_mutex);
return __loop_clr_fd(lo, false); __loop_clr_fd(lo);
loop_rundown_completed(lo);
return 0;
} }
static int static int
...@@ -1758,7 +1754,7 @@ static void lo_release(struct gendisk *disk, fmode_t mode) ...@@ -1758,7 +1754,7 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
* In autoclear mode, stop the loop thread * In autoclear mode, stop the loop thread
* and remove configuration after last close. * and remove configuration after last close.
*/ */
__loop_clr_fd(lo, true); loop_schedule_rundown(lo);
return; return;
} else if (lo->lo_state == Lo_bound) { } else if (lo->lo_state == Lo_bound) {
/* /*
......
...@@ -56,6 +56,7 @@ struct loop_device { ...@@ -56,6 +56,7 @@ struct loop_device {
struct gendisk *lo_disk; struct gendisk *lo_disk;
struct mutex lo_mutex; struct mutex lo_mutex;
bool idr_visible; bool idr_visible;
struct work_struct rundown_work;
}; };
struct loop_cmd { struct loop_cmd {
......
...@@ -136,16 +136,15 @@ struct mtip_compat_ide_task_request_s { ...@@ -136,16 +136,15 @@ struct mtip_compat_ide_task_request_s {
* return value * return value
* true if device removed, else false * true if device removed, else false
*/ */
static bool mtip_check_surprise_removal(struct pci_dev *pdev) static bool mtip_check_surprise_removal(struct driver_data *dd)
{ {
u16 vendor_id = 0; u16 vendor_id = 0;
struct driver_data *dd = pci_get_drvdata(pdev);
if (dd->sr) if (dd->sr)
return true; return true;
/* Read the vendorID from the configuration space */ /* Read the vendorID from the configuration space */
pci_read_config_word(pdev, 0x00, &vendor_id); pci_read_config_word(dd->pdev, 0x00, &vendor_id);
if (vendor_id == 0xFFFF) { if (vendor_id == 0xFFFF) {
dd->sr = true; dd->sr = true;
if (dd->queue) if (dd->queue)
...@@ -447,7 +446,7 @@ static int mtip_device_reset(struct driver_data *dd) ...@@ -447,7 +446,7 @@ static int mtip_device_reset(struct driver_data *dd)
{ {
int rv = 0; int rv = 0;
if (mtip_check_surprise_removal(dd->pdev)) if (mtip_check_surprise_removal(dd))
return 0; return 0;
if (mtip_hba_reset(dd) < 0) if (mtip_hba_reset(dd) < 0)
...@@ -727,7 +726,7 @@ static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat) ...@@ -727,7 +726,7 @@ static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
dev_warn(&dd->pdev->dev, dev_warn(&dd->pdev->dev,
"Port stat errors %x unhandled\n", "Port stat errors %x unhandled\n",
(port_stat & ~PORT_IRQ_HANDLED)); (port_stat & ~PORT_IRQ_HANDLED));
if (mtip_check_surprise_removal(dd->pdev)) if (mtip_check_surprise_removal(dd))
return; return;
} }
if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR))) { if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR))) {
...@@ -752,7 +751,7 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data) ...@@ -752,7 +751,7 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
/* Acknowledge the interrupt status on the port.*/ /* Acknowledge the interrupt status on the port.*/
port_stat = readl(port->mmio + PORT_IRQ_STAT); port_stat = readl(port->mmio + PORT_IRQ_STAT);
if (unlikely(port_stat == 0xFFFFFFFF)) { if (unlikely(port_stat == 0xFFFFFFFF)) {
mtip_check_surprise_removal(dd->pdev); mtip_check_surprise_removal(dd);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
writel(port_stat, port->mmio + PORT_IRQ_STAT); writel(port_stat, port->mmio + PORT_IRQ_STAT);
...@@ -796,7 +795,7 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data) ...@@ -796,7 +795,7 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
} }
if (unlikely(port_stat & PORT_IRQ_ERR)) { if (unlikely(port_stat & PORT_IRQ_ERR)) {
if (unlikely(mtip_check_surprise_removal(dd->pdev))) { if (unlikely(mtip_check_surprise_removal(dd))) {
/* don't proceed further */ /* don't proceed further */
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -915,7 +914,7 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout) ...@@ -915,7 +914,7 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
msleep(100); msleep(100);
if (mtip_check_surprise_removal(port->dd->pdev)) if (mtip_check_surprise_removal(port->dd))
goto err_fault; goto err_fault;
active = mtip_commands_active(port); active = mtip_commands_active(port);
...@@ -980,7 +979,7 @@ static int mtip_exec_internal_command(struct mtip_port *port, ...@@ -980,7 +979,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
return -EFAULT; return -EFAULT;
} }
if (mtip_check_surprise_removal(dd->pdev)) if (mtip_check_surprise_removal(dd))
return -EFAULT; return -EFAULT;
rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED); rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED);
...@@ -1022,7 +1021,7 @@ static int mtip_exec_internal_command(struct mtip_port *port, ...@@ -1022,7 +1021,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
fis->command, int_cmd->status); fis->command, int_cmd->status);
rv = -EIO; rv = -EIO;
if (mtip_check_surprise_removal(dd->pdev) || if (mtip_check_surprise_removal(dd) ||
test_bit(MTIP_DDF_REMOVE_PENDING_BIT, test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
&dd->dd_flag)) { &dd->dd_flag)) {
dev_err(&dd->pdev->dev, dev_err(&dd->pdev->dev,
...@@ -2513,7 +2512,7 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd) ...@@ -2513,7 +2512,7 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
&dd->dd_flag))) &dd->dd_flag)))
return -EFAULT; return -EFAULT;
if (mtip_check_surprise_removal(dd->pdev)) if (mtip_check_surprise_removal(dd))
return -EFAULT; return -EFAULT;
if (mtip_get_identify(dd->port, NULL) < 0) if (mtip_get_identify(dd->port, NULL) < 0)
...@@ -2891,7 +2890,7 @@ static int mtip_hw_init(struct driver_data *dd) ...@@ -2891,7 +2890,7 @@ static int mtip_hw_init(struct driver_data *dd)
time_before(jiffies, timeout)) { time_before(jiffies, timeout)) {
mdelay(100); mdelay(100);
} }
if (unlikely(mtip_check_surprise_removal(dd->pdev))) { if (unlikely(mtip_check_surprise_removal(dd))) {
timetaken = jiffies - timetaken; timetaken = jiffies - timetaken;
dev_warn(&dd->pdev->dev, dev_warn(&dd->pdev->dev,
"Surprise removal detected at %u ms\n", "Surprise removal detected at %u ms\n",
...@@ -4098,7 +4097,7 @@ static void mtip_pci_remove(struct pci_dev *pdev) ...@@ -4098,7 +4097,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
list_add(&dd->remove_list, &removing_list); list_add(&dd->remove_list, &removing_list);
spin_unlock_irqrestore(&dev_lock, flags); spin_unlock_irqrestore(&dev_lock, flags);
mtip_check_surprise_removal(pdev); mtip_check_surprise_removal(dd);
synchronize_irq(dd->pdev->irq); synchronize_irq(dd->pdev->irq);
/* Spin until workers are done */ /* Spin until workers are done */
...@@ -4145,36 +4144,17 @@ static void mtip_pci_remove(struct pci_dev *pdev) ...@@ -4145,36 +4144,17 @@ static void mtip_pci_remove(struct pci_dev *pdev)
* 0 Success * 0 Success
* <0 Error * <0 Error
*/ */
static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) static int __maybe_unused mtip_pci_suspend(struct device *dev)
{ {
int rv = 0; int rv = 0;
struct driver_data *dd = pci_get_drvdata(pdev); struct driver_data *dd = dev_get_drvdata(dev);
if (!dd) {
dev_err(&pdev->dev,
"Driver private datastructure is NULL\n");
return -EFAULT;
}
set_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag); set_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
/* Disable ports & interrupts then send standby immediate */ /* Disable ports & interrupts then send standby immediate */
rv = mtip_block_suspend(dd); rv = mtip_block_suspend(dd);
if (rv < 0) { if (rv < 0)
dev_err(&pdev->dev, dev_err(dev, "Failed to suspend controller\n");
"Failed to suspend controller\n");
return rv;
}
/*
* Save the pci config space to pdev structure &
* disable the device
*/
pci_save_state(pdev);
pci_disable_device(pdev);
/* Move to Low power state*/
pci_set_power_state(pdev, PCI_D3hot);
return rv; return rv;
} }
...@@ -4186,32 +4166,10 @@ static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) ...@@ -4186,32 +4166,10 @@ static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
* 0 Success * 0 Success
* <0 Error * <0 Error
*/ */
static int mtip_pci_resume(struct pci_dev *pdev) static int __maybe_unused mtip_pci_resume(struct device *dev)
{ {
int rv = 0; int rv = 0;
struct driver_data *dd; struct driver_data *dd = dev_get_drvdata(dev);
dd = pci_get_drvdata(pdev);
if (!dd) {
dev_err(&pdev->dev,
"Driver private datastructure is NULL\n");
return -EFAULT;
}
/* Move the device to active State */
pci_set_power_state(pdev, PCI_D0);
/* Restore PCI configuration space */
pci_restore_state(pdev);
/* Enable the PCI device*/
rv = pcim_enable_device(pdev);
if (rv < 0) {
dev_err(&pdev->dev,
"Failed to enable card during resume\n");
goto err;
}
pci_set_master(pdev);
/* /*
* Calls hbaReset, initPort, & startPort function * Calls hbaReset, initPort, & startPort function
...@@ -4219,9 +4177,8 @@ static int mtip_pci_resume(struct pci_dev *pdev) ...@@ -4219,9 +4177,8 @@ static int mtip_pci_resume(struct pci_dev *pdev)
*/ */
rv = mtip_block_resume(dd); rv = mtip_block_resume(dd);
if (rv < 0) if (rv < 0)
dev_err(&pdev->dev, "Unable to resume\n"); dev_err(dev, "Unable to resume\n");
err:
clear_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag); clear_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
return rv; return rv;
...@@ -4252,14 +4209,15 @@ static const struct pci_device_id mtip_pci_tbl[] = { ...@@ -4252,14 +4209,15 @@ static const struct pci_device_id mtip_pci_tbl[] = {
{ 0 } { 0 }
}; };
static SIMPLE_DEV_PM_OPS(mtip_pci_pm_ops, mtip_pci_suspend, mtip_pci_resume);
/* Structure that describes the PCI driver functions. */ /* Structure that describes the PCI driver functions. */
static struct pci_driver mtip_pci_driver = { static struct pci_driver mtip_pci_driver = {
.name = MTIP_DRV_NAME, .name = MTIP_DRV_NAME,
.id_table = mtip_pci_tbl, .id_table = mtip_pci_tbl,
.probe = mtip_pci_probe, .probe = mtip_pci_probe,
.remove = mtip_pci_remove, .remove = mtip_pci_remove,
.suspend = mtip_pci_suspend, .driver.pm = &mtip_pci_pm_ops,
.resume = mtip_pci_resume,
.shutdown = mtip_pci_shutdown, .shutdown = mtip_pci_shutdown,
}; };
......
...@@ -340,9 +340,9 @@ static int nullb_update_nr_hw_queues(struct nullb_device *dev, ...@@ -340,9 +340,9 @@ static int nullb_update_nr_hw_queues(struct nullb_device *dev,
return 0; return 0;
/* /*
* Make sure at least one queue exists for each of submit and poll. * Make sure at least one submit queue exists.
*/ */
if (!submit_queues || !poll_queues) if (!submit_queues)
return -EINVAL; return -EINVAL;
/* /*
...@@ -1574,7 +1574,9 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) ...@@ -1574,7 +1574,9 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
cmd = blk_mq_rq_to_pdu(req); cmd = blk_mq_rq_to_pdu(req);
cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req), cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
blk_rq_sectors(req)); blk_rq_sectors(req));
end_cmd(cmd); if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error,
blk_mq_end_request_batch))
end_cmd(cmd);
nr++; nr++;
} }
...@@ -1890,7 +1892,7 @@ static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set) ...@@ -1890,7 +1892,7 @@ static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
if (g_shared_tag_bitmap) if (g_shared_tag_bitmap)
set->flags |= BLK_MQ_F_TAG_HCTX_SHARED; set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
set->driver_data = nullb; set->driver_data = nullb;
if (g_poll_queues) if (poll_queues)
set->nr_maps = 3; set->nr_maps = 3;
else else
set->nr_maps = 1; set->nr_maps = 1;
...@@ -1917,8 +1919,6 @@ static int null_validate_conf(struct nullb_device *dev) ...@@ -1917,8 +1919,6 @@ static int null_validate_conf(struct nullb_device *dev)
if (dev->poll_queues > g_poll_queues) if (dev->poll_queues > g_poll_queues)
dev->poll_queues = g_poll_queues; dev->poll_queues = g_poll_queues;
else if (dev->poll_queues == 0)
dev->poll_queues = 1;
dev->prev_poll_queues = dev->poll_queues; dev->prev_poll_queues = dev->poll_queues;
dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ); dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
......
This diff is collapsed.
...@@ -452,6 +452,7 @@ static struct attribute *rnbd_dev_attrs[] = { ...@@ -452,6 +452,7 @@ static struct attribute *rnbd_dev_attrs[] = {
&rnbd_clt_nr_poll_queues.attr, &rnbd_clt_nr_poll_queues.attr,
NULL, NULL,
}; };
ATTRIBUTE_GROUPS(rnbd_dev);
void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev) void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
{ {
...@@ -474,7 +475,7 @@ void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev) ...@@ -474,7 +475,7 @@ void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
static struct kobj_type rnbd_dev_ktype = { static struct kobj_type rnbd_dev_ktype = {
.sysfs_ops = &kobj_sysfs_ops, .sysfs_ops = &kobj_sysfs_ops,
.default_attrs = rnbd_dev_attrs, .default_groups = rnbd_dev_groups,
}; };
static int rnbd_clt_add_dev_kobj(struct rnbd_clt_dev *dev) static int rnbd_clt_add_dev_kobj(struct rnbd_clt_dev *dev)
......
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o
rsxx-objs := config.o core.o cregs.o dev.o dma.o
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Filename: config.c
*
* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
*
* (C) Copyright 2013 IBM Corporation
*/
#include <linux/types.h>
#include <linux/crc32.h>
#include <linux/swab.h>
#include "rsxx_priv.h"
#include "rsxx_cfg.h"
static void initialize_config(struct rsxx_card_cfg *cfg)
{
cfg->hdr.version = RSXX_CFG_VERSION;
cfg->data.block_size = RSXX_HW_BLK_SIZE;
cfg->data.stripe_size = RSXX_HW_BLK_SIZE;
cfg->data.vendor_id = RSXX_VENDOR_ID_IBM;
cfg->data.cache_order = (-1);
cfg->data.intr_coal.mode = RSXX_INTR_COAL_DISABLED;
cfg->data.intr_coal.count = 0;
cfg->data.intr_coal.latency = 0;
}
static u32 config_data_crc32(struct rsxx_card_cfg *cfg)
{
/*
* Return the compliment of the CRC to ensure compatibility
* (i.e. this is how early rsxx drivers did it.)
*/
return ~crc32(~0, &cfg->data, sizeof(cfg->data));
}
/*----------------- Config Byte Swap Functions -------------------*/
static void config_hdr_be_to_cpu(struct card_cfg_hdr *hdr)
{
hdr->version = be32_to_cpu((__force __be32) hdr->version);
hdr->crc = be32_to_cpu((__force __be32) hdr->crc);
}
static void config_hdr_cpu_to_be(struct card_cfg_hdr *hdr)
{
hdr->version = (__force u32) cpu_to_be32(hdr->version);
hdr->crc = (__force u32) cpu_to_be32(hdr->crc);
}
static void config_data_swab(struct rsxx_card_cfg *cfg)
{
u32 *data = (u32 *) &cfg->data;
int i;
for (i = 0; i < (sizeof(cfg->data) / 4); i++)
data[i] = swab32(data[i]);
}
static void config_data_le_to_cpu(struct rsxx_card_cfg *cfg)
{
u32 *data = (u32 *) &cfg->data;
int i;
for (i = 0; i < (sizeof(cfg->data) / 4); i++)
data[i] = le32_to_cpu((__force __le32) data[i]);
}
static void config_data_cpu_to_le(struct rsxx_card_cfg *cfg)
{
u32 *data = (u32 *) &cfg->data;
int i;
for (i = 0; i < (sizeof(cfg->data) / 4); i++)
data[i] = (__force u32) cpu_to_le32(data[i]);
}
/*----------------- Config Operations ------------------*/
static int rsxx_save_config(struct rsxx_cardinfo *card)
{
struct rsxx_card_cfg cfg;
int st;
memcpy(&cfg, &card->config, sizeof(cfg));
if (unlikely(cfg.hdr.version != RSXX_CFG_VERSION)) {
dev_err(CARD_TO_DEV(card),
"Cannot save config with invalid version %d\n",
cfg.hdr.version);
return -EINVAL;
}
/* Convert data to little endian for the CRC calculation. */
config_data_cpu_to_le(&cfg);
cfg.hdr.crc = config_data_crc32(&cfg);
/*
* Swap the data from little endian to big endian so it can be
* stored.
*/
config_data_swab(&cfg);
config_hdr_cpu_to_be(&cfg.hdr);
st = rsxx_creg_write(card, CREG_ADD_CONFIG, sizeof(cfg), &cfg, 1);
if (st)
return st;
return 0;
}
int rsxx_load_config(struct rsxx_cardinfo *card)
{
int st;
u32 crc;
st = rsxx_creg_read(card, CREG_ADD_CONFIG, sizeof(card->config),
&card->config, 1);
if (st) {
dev_err(CARD_TO_DEV(card),
"Failed reading card config.\n");
return st;
}
config_hdr_be_to_cpu(&card->config.hdr);
if (card->config.hdr.version == RSXX_CFG_VERSION) {
/*
* We calculate the CRC with the data in little endian, because
* early drivers did not take big endian CPUs into account.
* The data is always stored in big endian, so we need to byte
* swap it before calculating the CRC.
*/
config_data_swab(&card->config);
/* Check the CRC */
crc = config_data_crc32(&card->config);
if (crc != card->config.hdr.crc) {
dev_err(CARD_TO_DEV(card),
"Config corruption detected!\n");
dev_info(CARD_TO_DEV(card),
"CRC (sb x%08x is x%08x)\n",
card->config.hdr.crc, crc);
return -EIO;
}
/* Convert the data to CPU byteorder */
config_data_le_to_cpu(&card->config);
} else if (card->config.hdr.version != 0) {
dev_err(CARD_TO_DEV(card),
"Invalid config version %d.\n",
card->config.hdr.version);
/*
* Config version changes require special handling from the
* user
*/
return -EINVAL;
} else {
dev_info(CARD_TO_DEV(card),
"Initializing card configuration.\n");
initialize_config(&card->config);
st = rsxx_save_config(card);
if (st)
return st;
}
card->config_valid = 1;
dev_dbg(CARD_TO_DEV(card), "version: x%08x\n",
card->config.hdr.version);
dev_dbg(CARD_TO_DEV(card), "crc: x%08x\n",
card->config.hdr.crc);
dev_dbg(CARD_TO_DEV(card), "block_size: x%08x\n",
card->config.data.block_size);
dev_dbg(CARD_TO_DEV(card), "stripe_size: x%08x\n",
card->config.data.stripe_size);
dev_dbg(CARD_TO_DEV(card), "vendor_id: x%08x\n",
card->config.data.vendor_id);
dev_dbg(CARD_TO_DEV(card), "cache_order: x%08x\n",
card->config.data.cache_order);
dev_dbg(CARD_TO_DEV(card), "mode: x%08x\n",
card->config.data.intr_coal.mode);
dev_dbg(CARD_TO_DEV(card), "count: x%08x\n",
card->config.data.intr_coal.count);
dev_dbg(CARD_TO_DEV(card), "latency: x%08x\n",
card->config.data.intr_coal.latency);
return 0;
}
This diff is collapsed.
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Filename: dev.c
*
* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
*
* (C) Copyright 2013 IBM Corporation
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/hdreg.h>
#include <linux/genhd.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/fs.h>
#include "rsxx_priv.h"
static unsigned int blkdev_minors = 64;
module_param(blkdev_minors, uint, 0444);
MODULE_PARM_DESC(blkdev_minors, "Number of minors(partitions)");
/*
* For now I'm making this tweakable in case any applications hit this limit.
* If you see a "bio too big" error in the log you will need to raise this
* value.
*/
static unsigned int blkdev_max_hw_sectors = 1024;
module_param(blkdev_max_hw_sectors, uint, 0444);
MODULE_PARM_DESC(blkdev_max_hw_sectors, "Max hw sectors for a single BIO");
static unsigned int enable_blkdev = 1;
module_param(enable_blkdev , uint, 0444);
MODULE_PARM_DESC(enable_blkdev, "Enable block device interfaces");
struct rsxx_bio_meta {
struct bio *bio;
atomic_t pending_dmas;
atomic_t error;
unsigned long start_time;
};
static struct kmem_cache *bio_meta_pool;
static void rsxx_submit_bio(struct bio *bio);
/*----------------- Block Device Operations -----------------*/
static int rsxx_blkdev_ioctl(struct block_device *bdev,
fmode_t mode,
unsigned int cmd,
unsigned long arg)
{
struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
switch (cmd) {
case RSXX_GETREG:
return rsxx_reg_access(card, (void __user *)arg, 1);
case RSXX_SETREG:
return rsxx_reg_access(card, (void __user *)arg, 0);
}
return -ENOTTY;
}
static int rsxx_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
u64 blocks = card->size8 >> 9;
/*
* get geometry: Fake it. I haven't found any drivers that set
* geo->start, so we won't either.
*/
if (card->size8) {
geo->heads = 64;
geo->sectors = 16;
do_div(blocks, (geo->heads * geo->sectors));
geo->cylinders = blocks;
} else {
geo->heads = 0;
geo->sectors = 0;
geo->cylinders = 0;
}
return 0;
}
static const struct block_device_operations rsxx_fops = {
.owner = THIS_MODULE,
.submit_bio = rsxx_submit_bio,
.getgeo = rsxx_getgeo,
.ioctl = rsxx_blkdev_ioctl,
};
static void bio_dma_done_cb(struct rsxx_cardinfo *card,
void *cb_data,
unsigned int error)
{
struct rsxx_bio_meta *meta = cb_data;
if (error)
atomic_set(&meta->error, 1);
if (atomic_dec_and_test(&meta->pending_dmas)) {
if (!card->eeh_state && card->gendisk)
bio_end_io_acct(meta->bio, meta->start_time);
if (atomic_read(&meta->error))
bio_io_error(meta->bio);
else
bio_endio(meta->bio);
kmem_cache_free(bio_meta_pool, meta);
}
}
static void rsxx_submit_bio(struct bio *bio)
{
struct rsxx_cardinfo *card = bio->bi_bdev->bd_disk->private_data;
struct rsxx_bio_meta *bio_meta;
blk_status_t st = BLK_STS_IOERR;
blk_queue_split(&bio);
might_sleep();
if (!card)
goto req_err;
if (bio_end_sector(bio) > get_capacity(card->gendisk))
goto req_err;
if (unlikely(card->halt))
goto req_err;
if (unlikely(card->dma_fault))
goto req_err;
if (bio->bi_iter.bi_size == 0) {
dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
goto req_err;
}
bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL);
if (!bio_meta) {
st = BLK_STS_RESOURCE;
goto req_err;
}
bio_meta->bio = bio;
atomic_set(&bio_meta->error, 0);
atomic_set(&bio_meta->pending_dmas, 0);
if (!unlikely(card->halt))
bio_meta->start_time = bio_start_io_acct(bio);
dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
bio_data_dir(bio) ? 'W' : 'R', bio_meta,
(u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
bio_dma_done_cb, bio_meta);
if (st)
goto queue_err;
return;
queue_err:
kmem_cache_free(bio_meta_pool, bio_meta);
req_err:
if (st)
bio->bi_status = st;
bio_endio(bio);
}
/*----------------- Device Setup -------------------*/
static bool rsxx_discard_supported(struct rsxx_cardinfo *card)
{
unsigned char pci_rev;
pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
return (pci_rev >= RSXX_DISCARD_SUPPORT);
}
int rsxx_attach_dev(struct rsxx_cardinfo *card)
{
int err = 0;
mutex_lock(&card->dev_lock);
/* The block device requires the stripe size from the config. */
if (enable_blkdev) {
if (card->config_valid)
set_capacity(card->gendisk, card->size8 >> 9);
else
set_capacity(card->gendisk, 0);
err = device_add_disk(CARD_TO_DEV(card), card->gendisk, NULL);
if (err == 0)
card->bdev_attached = 1;
}
mutex_unlock(&card->dev_lock);
if (err)
blk_cleanup_disk(card->gendisk);
return err;
}
void rsxx_detach_dev(struct rsxx_cardinfo *card)
{
mutex_lock(&card->dev_lock);
if (card->bdev_attached) {
del_gendisk(card->gendisk);
card->bdev_attached = 0;
}
mutex_unlock(&card->dev_lock);
}
int rsxx_setup_dev(struct rsxx_cardinfo *card)
{
unsigned short blk_size;
mutex_init(&card->dev_lock);
if (!enable_blkdev)
return 0;
card->major = register_blkdev(0, DRIVER_NAME);
if (card->major < 0) {
dev_err(CARD_TO_DEV(card), "Failed to get major number\n");
return -ENOMEM;
}
card->gendisk = blk_alloc_disk(blkdev_minors);
if (!card->gendisk) {
dev_err(CARD_TO_DEV(card), "Failed disk alloc\n");
unregister_blkdev(card->major, DRIVER_NAME);
return -ENOMEM;
}
if (card->config_valid) {
blk_size = card->config.data.block_size;
blk_queue_dma_alignment(card->gendisk->queue, blk_size - 1);
blk_queue_logical_block_size(card->gendisk->queue, blk_size);
}
blk_queue_max_hw_sectors(card->gendisk->queue, blkdev_max_hw_sectors);
blk_queue_physical_block_size(card->gendisk->queue, RSXX_HW_BLK_SIZE);
blk_queue_flag_set(QUEUE_FLAG_NONROT, card->gendisk->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, card->gendisk->queue);
if (rsxx_discard_supported(card)) {
blk_queue_flag_set(QUEUE_FLAG_DISCARD, card->gendisk->queue);
blk_queue_max_discard_sectors(card->gendisk->queue,
RSXX_HW_BLK_SIZE >> 9);
card->gendisk->queue->limits.discard_granularity =
RSXX_HW_BLK_SIZE;
card->gendisk->queue->limits.discard_alignment =
RSXX_HW_BLK_SIZE;
}
snprintf(card->gendisk->disk_name, sizeof(card->gendisk->disk_name),
"rsxx%d", card->disk_id);
card->gendisk->major = card->major;
card->gendisk->minors = blkdev_minors;
card->gendisk->fops = &rsxx_fops;
card->gendisk->private_data = card;
return 0;
}
void rsxx_destroy_dev(struct rsxx_cardinfo *card)
{
if (!enable_blkdev)
return;
blk_cleanup_disk(card->gendisk);
card->gendisk = NULL;
unregister_blkdev(card->major, DRIVER_NAME);
}
int rsxx_dev_init(void)
{
bio_meta_pool = KMEM_CACHE(rsxx_bio_meta, SLAB_HWCACHE_ALIGN);
if (!bio_meta_pool)
return -ENOMEM;
return 0;
}
void rsxx_dev_cleanup(void)
{
kmem_cache_destroy(bio_meta_pool);
}
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Filename: rsxx.h
*
* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
*
* (C) Copyright 2013 IBM Corporation
*/
#ifndef __RSXX_H__
#define __RSXX_H__
/*----------------- IOCTL Definitions -------------------*/
#define RSXX_MAX_DATA 8
struct rsxx_reg_access {
__u32 addr;
__u32 cnt;
__u32 stat;
__u32 stream;
__u32 data[RSXX_MAX_DATA];
};
#define RSXX_MAX_REG_CNT (RSXX_MAX_DATA * (sizeof(__u32)))
#define RSXX_IOC_MAGIC 'r'
#define RSXX_GETREG _IOWR(RSXX_IOC_MAGIC, 0x20, struct rsxx_reg_access)
#define RSXX_SETREG _IOWR(RSXX_IOC_MAGIC, 0x21, struct rsxx_reg_access)
#endif /* __RSXX_H_ */
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Filename: rsXX_cfg.h
*
* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
*
* (C) Copyright 2013 IBM Corporation
*/
#ifndef __RSXX_CFG_H__
#define __RSXX_CFG_H__
/* NOTE: Config values will be saved in network byte order (i.e. Big endian) */
#include <linux/types.h>
/*
* The card config version must match the driver's expected version. If it does
* not, the DMA interfaces will not be attached and the user will need to
* initialize/upgrade the card configuration using the card config utility.
*/
#define RSXX_CFG_VERSION 4
struct card_cfg_hdr {
__u32 version;
__u32 crc;
};
struct card_cfg_data {
__u32 block_size;
__u32 stripe_size;
__u32 vendor_id;
__u32 cache_order;
struct {
__u32 mode; /* Disabled, manual, auto-tune... */
__u32 count; /* Number of intr to coalesce */
__u32 latency;/* Max wait time (in ns) */
} intr_coal;
};
struct rsxx_card_cfg {
struct card_cfg_hdr hdr;
struct card_cfg_data data;
};
/* Vendor ID Values */
#define RSXX_VENDOR_ID_IBM 0
#define RSXX_VENDOR_ID_DSI 1
#define RSXX_VENDOR_COUNT 2
/* Interrupt Coalescing Values */
#define RSXX_INTR_COAL_DISABLED 0
#define RSXX_INTR_COAL_EXPLICIT 1
#define RSXX_INTR_COAL_AUTO_TUNE 2
#endif /* __RSXX_CFG_H__ */
This diff is collapsed.
...@@ -574,7 +574,7 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg) ...@@ -574,7 +574,7 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
int ret = 0; int ret = 0;
if (WARN(mddev->cluster_info->slot_number - 1 == le32_to_cpu(msg->slot), if (WARN(mddev->cluster_info->slot_number - 1 == le32_to_cpu(msg->slot),
"node %d received it's own msg\n", le32_to_cpu(msg->slot))) "node %d received its own msg\n", le32_to_cpu(msg->slot)))
return -1; return -1;
switch (le32_to_cpu(msg->type)) { switch (le32_to_cpu(msg->type)) {
case METADATA_UPDATED: case METADATA_UPDATED:
......
This diff is collapsed.
...@@ -721,6 +721,8 @@ extern void md_error(struct mddev *mddev, struct md_rdev *rdev); ...@@ -721,6 +721,8 @@ extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
extern void md_finish_reshape(struct mddev *mddev); extern void md_finish_reshape(struct mddev *mddev);
void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
struct bio *bio, sector_t start, sector_t size); struct bio *bio, sector_t start, sector_t size);
int acct_bioset_init(struct mddev *mddev);
void acct_bioset_exit(struct mddev *mddev);
void md_account_bio(struct mddev *mddev, struct bio **bio); void md_account_bio(struct mddev *mddev, struct bio **bio);
extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio); extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
......
This diff is collapsed.
...@@ -22,12 +22,6 @@ ...@@ -22,12 +22,6 @@
#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
/* When there are this many requests queue to be written by
* the raid thread, we become 'congested' to provide back-pressure
* for writeback.
*/
static int max_queued_requests = 1024;
/* for managing resync I/O pages */ /* for managing resync I/O pages */
struct resync_pages { struct resync_pages {
void *raid_bio; void *raid_bio;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <linux/raid/xor.h> #include <linux/raid/xor.h>
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <linux/local_lock.h>
/* /*
* *
...@@ -640,7 +641,8 @@ struct r5conf { ...@@ -640,7 +641,8 @@ struct r5conf {
* lists and performing address * lists and performing address
* conversions * conversions
*/ */
int scribble_obj_size; int scribble_obj_size;
local_lock_t lock;
} __percpu *percpu; } __percpu *percpu;
int scribble_disks; int scribble_disks;
int scribble_sectors; int scribble_sectors;
......
...@@ -991,7 +991,6 @@ EXPORT_SYMBOL_GPL(nvme_cleanup_cmd); ...@@ -991,7 +991,6 @@ EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req) blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
{ {
struct nvme_command *cmd = nvme_req(req)->cmd; struct nvme_command *cmd = nvme_req(req)->cmd;
struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
blk_status_t ret = BLK_STS_OK; blk_status_t ret = BLK_STS_OK;
if (!(req->rq_flags & RQF_DONTPREP)) if (!(req->rq_flags & RQF_DONTPREP))
...@@ -1038,8 +1037,6 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req) ...@@ -1038,8 +1037,6 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
return BLK_STS_IOERR; return BLK_STS_IOERR;
} }
if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
nvme_req(req)->genctr++;
cmd->common.command_id = nvme_cid(req); cmd->common.command_id = nvme_cid(req);
trace_nvme_setup_cmd(req, cmd); trace_nvme_setup_cmd(req, cmd);
return ret; return ret;
...@@ -2762,9 +2759,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) ...@@ -2762,9 +2759,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
return -EINVAL; return -EINVAL;
} }
subsys->awupf = le16_to_cpu(id->awupf); subsys->awupf = le16_to_cpu(id->awupf);
#ifdef CONFIG_NVME_MULTIPATH nvme_mpath_default_iopolicy(subsys);
subsys->iopolicy = NVME_IOPOLICY_NUMA;
#endif
subsys->dev.class = nvme_subsys_class; subsys->dev.class = nvme_subsys_class;
subsys->dev.release = nvme_release_subsystem; subsys->dev.release = nvme_release_subsystem;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -81,7 +81,7 @@ struct raid6_calls { ...@@ -81,7 +81,7 @@ struct raid6_calls {
void (*xor_syndrome)(int, int, int, size_t, void **); void (*xor_syndrome)(int, int, int, size_t, void **);
int (*valid)(void); /* Returns 1 if this routine set is usable */ int (*valid)(void); /* Returns 1 if this routine set is usable */
const char *name; /* Name of this routine set */ const char *name; /* Name of this routine set */
int prefer; /* Has special performance attribute */ int priority; /* Relative priority ranking if non-zero */
}; };
/* Selected algorithm */ /* Selected algorithm */
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment