Commit 00e375e7 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'for-2.6.37/drivers' into for-linus

Conflicts:
	drivers/block/cciss.c
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parents f85acd81 4205df34
...@@ -113,6 +113,8 @@ static struct board_type products[] = { ...@@ -113,6 +113,8 @@ static struct board_type products[] = {
{0x409D0E11, "Smart Array 6400 EM", &SA5_access}, {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
{0x40910E11, "Smart Array 6i", &SA5_access}, {0x40910E11, "Smart Array 6i", &SA5_access},
{0x3225103C, "Smart Array P600", &SA5_access}, {0x3225103C, "Smart Array P600", &SA5_access},
{0x3223103C, "Smart Array P800", &SA5_access},
{0x3234103C, "Smart Array P400", &SA5_access},
{0x3235103C, "Smart Array P400i", &SA5_access}, {0x3235103C, "Smart Array P400i", &SA5_access},
{0x3211103C, "Smart Array E200i", &SA5_access}, {0x3211103C, "Smart Array E200i", &SA5_access},
{0x3212103C, "Smart Array E200", &SA5_access}, {0x3212103C, "Smart Array E200", &SA5_access},
...@@ -3753,7 +3755,7 @@ static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h) ...@@ -3753,7 +3755,7 @@ static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h)
for (i = 0; i < MAX_CONFIG_WAIT; i++) { for (i = 0; i < MAX_CONFIG_WAIT; i++) {
if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
break; break;
msleep(10); usleep_range(10000, 20000);
} }
} }
...@@ -3937,10 +3939,9 @@ static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id) ...@@ -3937,10 +3939,9 @@ static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
*board_id = ((subsystem_device_id << 16) & 0xffff0000) | *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
subsystem_vendor_id; subsystem_vendor_id;
for (i = 0; i < ARRAY_SIZE(products); i++) { for (i = 0; i < ARRAY_SIZE(products); i++)
if (*board_id == products[i].board_id) if (*board_id == products[i].board_id)
return i; return i;
}
dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n", dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
*board_id); *board_id);
return -ENODEV; return -ENODEV;
...@@ -3971,18 +3972,31 @@ static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev, ...@@ -3971,18 +3972,31 @@ static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
return -ENODEV; return -ENODEV;
} }
static int __devinit cciss_wait_for_board_ready(ctlr_info_t *h) static int __devinit cciss_wait_for_board_state(struct pci_dev *pdev,
void __iomem *vaddr, int wait_for_ready)
#define BOARD_READY 1
#define BOARD_NOT_READY 0
{ {
int i; int i, iterations;
u32 scratchpad; u32 scratchpad;
for (i = 0; i < CCISS_BOARD_READY_ITERATIONS; i++) { if (wait_for_ready)
scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); iterations = CCISS_BOARD_READY_ITERATIONS;
else
iterations = CCISS_BOARD_NOT_READY_ITERATIONS;
for (i = 0; i < iterations; i++) {
scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
if (wait_for_ready) {
if (scratchpad == CCISS_FIRMWARE_READY) if (scratchpad == CCISS_FIRMWARE_READY)
return 0; return 0;
} else {
if (scratchpad != CCISS_FIRMWARE_READY)
return 0;
}
msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS); msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS);
} }
dev_warn(&h->pdev->dev, "board not ready, timed out.\n"); dev_warn(&pdev->dev, "board not ready, timed out.\n");
return -ENODEV; return -ENODEV;
} }
...@@ -4031,6 +4045,11 @@ static int __devinit cciss_find_cfgtables(ctlr_info_t *h) ...@@ -4031,6 +4045,11 @@ static int __devinit cciss_find_cfgtables(ctlr_info_t *h)
static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h) static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
{ {
h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
/* Limit commands in memory limited kdump scenario. */
if (reset_devices && h->max_commands > 32)
h->max_commands = 32;
if (h->max_commands < 16) { if (h->max_commands < 16) {
dev_warn(&h->pdev->dev, "Controller reports " dev_warn(&h->pdev->dev, "Controller reports "
"max supported commands of %d, an obvious lie. " "max supported commands of %d, an obvious lie. "
...@@ -4148,7 +4167,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h) ...@@ -4148,7 +4167,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
err = -ENOMEM; err = -ENOMEM;
goto err_out_free_res; goto err_out_free_res;
} }
err = cciss_wait_for_board_ready(h); err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
if (err) if (err)
goto err_out_free_res; goto err_out_free_res;
err = cciss_find_cfgtables(h); err = cciss_find_cfgtables(h);
...@@ -4313,36 +4332,6 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u ...@@ -4313,36 +4332,6 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
#define cciss_soft_reset_controller(p) cciss_message(p, 1, 0) #define cciss_soft_reset_controller(p) cciss_message(p, 1, 0)
#define cciss_noop(p) cciss_message(p, 3, 0) #define cciss_noop(p) cciss_message(p, 3, 0)
static __devinit int cciss_reset_msi(struct pci_dev *pdev)
{
/* the #defines are stolen from drivers/pci/msi.h. */
#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
int pos;
u16 control = 0;
pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
if (pos) {
pci_read_config_word(pdev, msi_control_reg(pos), &control);
if (control & PCI_MSI_FLAGS_ENABLE) {
dev_info(&pdev->dev, "resetting MSI\n");
pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE);
}
}
pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
if (pos) {
pci_read_config_word(pdev, msi_control_reg(pos), &control);
if (control & PCI_MSIX_FLAGS_ENABLE) {
dev_info(&pdev->dev, "resetting MSI-X\n");
pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE);
}
}
return 0;
}
static int cciss_controller_hard_reset(struct pci_dev *pdev, static int cciss_controller_hard_reset(struct pci_dev *pdev,
void * __iomem vaddr, bool use_doorbell) void * __iomem vaddr, bool use_doorbell)
{ {
...@@ -4397,17 +4386,17 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev, ...@@ -4397,17 +4386,17 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev,
* states or using the doorbell register. */ * states or using the doorbell register. */
static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
{ {
u16 saved_config_space[32];
u64 cfg_offset; u64 cfg_offset;
u32 cfg_base_addr; u32 cfg_base_addr;
u64 cfg_base_addr_index; u64 cfg_base_addr_index;
void __iomem *vaddr; void __iomem *vaddr;
unsigned long paddr; unsigned long paddr;
u32 misc_fw_support, active_transport; u32 misc_fw_support, active_transport;
int rc, i; int rc;
CfgTable_struct __iomem *cfgtable; CfgTable_struct __iomem *cfgtable;
bool use_doorbell; bool use_doorbell;
u32 board_id; u32 board_id;
u16 command_register;
/* For controllers as old a the p600, this is very nearly /* For controllers as old a the p600, this is very nearly
* the same thing as * the same thing as
...@@ -4417,14 +4406,6 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) ...@@ -4417,14 +4406,6 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
* pci_set_power_state(pci_dev, PCI_D0); * pci_set_power_state(pci_dev, PCI_D0);
* pci_restore_state(pci_dev); * pci_restore_state(pci_dev);
* *
* but we can't use these nice canned kernel routines on
* kexec, because they also check the MSI/MSI-X state in PCI
* configuration space and do the wrong thing when it is
* set/cleared. Also, the pci_save/restore_state functions
* violate the ordering requirements for restoring the
* configuration space from the CCISS document (see the
* comment below). So we roll our own ....
*
* For controllers newer than the P600, the pci power state * For controllers newer than the P600, the pci power state
* method of resetting doesn't work so we have another way * method of resetting doesn't work so we have another way
* using the doorbell register. * using the doorbell register.
...@@ -4443,8 +4424,13 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) ...@@ -4443,8 +4424,13 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
return -ENODEV; return -ENODEV;
} }
for (i = 0; i < 32; i++) /* Save the PCI command register */
pci_read_config_word(pdev, 2*i, &saved_config_space[i]); pci_read_config_word(pdev, 4, &command_register);
/* Turn the board off. This is so that later pci_restore_state()
* won't turn the board on before the rest of config space is ready.
*/
pci_disable_device(pdev);
pci_save_state(pdev);
/* find the first memory BAR, so we can find the cfg table */ /* find the first memory BAR, so we can find the cfg table */
rc = cciss_pci_find_memory_BAR(pdev, &paddr); rc = cciss_pci_find_memory_BAR(pdev, &paddr);
...@@ -4479,26 +4465,32 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) ...@@ -4479,26 +4465,32 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
if (rc) if (rc)
goto unmap_cfgtable; goto unmap_cfgtable;
pci_restore_state(pdev);
/* Restore the PCI configuration space. The Open CISS rc = pci_enable_device(pdev);
* Specification says, "Restore the PCI Configuration if (rc) {
* Registers, offsets 00h through 60h. It is important to dev_warn(&pdev->dev, "failed to enable device.\n");
* restore the command register, 16-bits at offset 04h, goto unmap_cfgtable;
* last. Do not restore the configuration status register,
* 16-bits at offset 06h." Note that the offset is 2*i.
*/
for (i = 0; i < 32; i++) {
if (i == 2 || i == 3)
continue;
pci_write_config_word(pdev, 2*i, saved_config_space[i]);
} }
wmb(); pci_write_config_word(pdev, 4, command_register);
pci_write_config_word(pdev, 4, saved_config_space[2]);
/* Some devices (notably the HP Smart Array 5i Controller) /* Some devices (notably the HP Smart Array 5i Controller)
need a little pause here */ need a little pause here */
msleep(CCISS_POST_RESET_PAUSE_MSECS); msleep(CCISS_POST_RESET_PAUSE_MSECS);
/* Wait for board to become not ready, then ready. */
dev_info(&pdev->dev, "Waiting for board to become ready.\n");
rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
if (rc) /* Don't bail, might be E500, etc. which can't be reset */
dev_warn(&pdev->dev,
"failed waiting for board to become not ready\n");
rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY);
if (rc) {
dev_warn(&pdev->dev,
"failed waiting for board to become ready\n");
goto unmap_cfgtable;
}
dev_info(&pdev->dev, "board ready.\n");
/* Controller should be in simple mode at this point. If it's not, /* Controller should be in simple mode at this point. If it's not,
* It means we're on one of those controllers which doesn't support * It means we're on one of those controllers which doesn't support
* the doorbell reset method and on which the PCI power management reset * the doorbell reset method and on which the PCI power management reset
...@@ -4539,8 +4531,6 @@ static __devinit int cciss_init_reset_devices(struct pci_dev *pdev) ...@@ -4539,8 +4531,6 @@ static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
return 0; /* just try to do the kdump anyhow. */ return 0; /* just try to do the kdump anyhow. */
if (rc) if (rc)
return -ENODEV; return -ENODEV;
if (cciss_reset_msi(pdev))
return -ENODEV;
/* Now try to get the controller to respond to a no-op */ /* Now try to get the controller to respond to a no-op */
for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) { for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) {
......
...@@ -200,10 +200,14 @@ struct ctlr_info ...@@ -200,10 +200,14 @@ struct ctlr_info
* the above. * the above.
*/ */
#define CCISS_BOARD_READY_WAIT_SECS (120) #define CCISS_BOARD_READY_WAIT_SECS (120)
#define CCISS_BOARD_NOT_READY_WAIT_SECS (10)
#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100) #define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100)
#define CCISS_BOARD_READY_ITERATIONS \ #define CCISS_BOARD_READY_ITERATIONS \
((CCISS_BOARD_READY_WAIT_SECS * 1000) / \ ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \
CCISS_BOARD_READY_POLL_INTERVAL_MSECS) CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
#define CCISS_BOARD_NOT_READY_ITERATIONS \
((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \
CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
#define CCISS_POST_RESET_PAUSE_MSECS (3000) #define CCISS_POST_RESET_PAUSE_MSECS (3000)
#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000) #define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000)
#define CCISS_POST_RESET_NOOP_RETRIES (12) #define CCISS_POST_RESET_NOOP_RETRIES (12)
......
...@@ -78,11 +78,10 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, ...@@ -78,11 +78,10 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
init_completion(&md_io.event); init_completion(&md_io.event);
md_io.error = 0; md_io.error = 0;
if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags)) if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
rw |= REQ_HARDBARRIER; rw |= REQ_FUA;
rw |= REQ_UNPLUG | REQ_SYNC; rw |= REQ_UNPLUG | REQ_SYNC;
retry:
bio = bio_alloc(GFP_NOIO, 1); bio = bio_alloc(GFP_NOIO, 1);
bio->bi_bdev = bdev->md_bdev; bio->bi_bdev = bdev->md_bdev;
bio->bi_sector = sector; bio->bi_sector = sector;
...@@ -100,17 +99,6 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, ...@@ -100,17 +99,6 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
wait_for_completion(&md_io.event); wait_for_completion(&md_io.event);
ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0; ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;
/* check for unsupported barrier op.
* would rather check on EOPNOTSUPP, but that is not reliable.
* don't try again for ANY return value != 0 */
if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) {
/* Try again with no barrier */
dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
set_bit(MD_NO_BARRIER, &mdev->flags);
rw &= ~REQ_HARDBARRIER;
bio_put(bio);
goto retry;
}
out: out:
bio_put(bio); bio_put(bio);
return ok; return ok;
...@@ -284,18 +272,32 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) ...@@ -284,18 +272,32 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
u32 xor_sum = 0; u32 xor_sum = 0;
if (!get_ldev(mdev)) { if (!get_ldev(mdev)) {
dev_err(DEV, "get_ldev() failed in w_al_write_transaction\n"); dev_err(DEV,
"disk is %s, cannot start al transaction (-%d +%d)\n",
drbd_disk_str(mdev->state.disk), evicted, new_enr);
complete(&((struct update_al_work *)w)->event); complete(&((struct update_al_work *)w)->event);
return 1; return 1;
} }
/* do we have to do a bitmap write, first? /* do we have to do a bitmap write, first?
* TODO reduce maximum latency: * TODO reduce maximum latency:
* submit both bios, then wait for both, * submit both bios, then wait for both,
* instead of doing two synchronous sector writes. */ * instead of doing two synchronous sector writes.
* For now, we must not write the transaction,
* if we cannot write out the bitmap of the evicted extent. */
if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE) if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE)
drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT); drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT);
mutex_lock(&mdev->md_io_mutex); /* protects md_io_page, al_tr_cycle, ... */ /* The bitmap write may have failed, causing a state change. */
if (mdev->state.disk < D_INCONSISTENT) {
dev_err(DEV,
"disk is %s, cannot write al transaction (-%d +%d)\n",
drbd_disk_str(mdev->state.disk), evicted, new_enr);
complete(&((struct update_al_work *)w)->event);
put_ldev(mdev);
return 1;
}
mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */
buffer = (struct al_transaction *)page_address(mdev->md_io_page); buffer = (struct al_transaction *)page_address(mdev->md_io_page);
buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC); buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
...@@ -739,7 +741,7 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev) ...@@ -739,7 +741,7 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev)
unsigned int enr; unsigned int enr;
unsigned long add = 0; unsigned long add = 0;
char ppb[10]; char ppb[10];
int i; int i, tmp;
wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
...@@ -747,7 +749,9 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev) ...@@ -747,7 +749,9 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev)
enr = lc_element_by_index(mdev->act_log, i)->lc_number; enr = lc_element_by_index(mdev->act_log, i)->lc_number;
if (enr == LC_FREE) if (enr == LC_FREE)
continue; continue;
add += drbd_bm_ALe_set_all(mdev, enr); tmp = drbd_bm_ALe_set_all(mdev, enr);
dynamic_dev_dbg(DEV, "AL: set %d bits in extent %u\n", tmp, enr);
add += tmp;
} }
lc_unlock(mdev->act_log); lc_unlock(mdev->act_log);
......
...@@ -116,7 +116,7 @@ struct drbd_conf; ...@@ -116,7 +116,7 @@ struct drbd_conf;
#define ERR_IF(exp) if (({ \ #define ERR_IF(exp) if (({ \
int _b = (exp) != 0; \ int _b = (exp) != 0; \
if (_b) dev_err(DEV, "%s: (%s) in %s:%d\n", \ if (_b) dev_err(DEV, "ASSERT FAILED: %s: (%s) in %s:%d\n", \
__func__, #exp, __FILE__, __LINE__); \ __func__, #exp, __FILE__, __LINE__); \
_b; \ _b; \
})) }))
...@@ -749,17 +749,12 @@ struct drbd_epoch { ...@@ -749,17 +749,12 @@ struct drbd_epoch {
/* drbd_epoch flag bits */ /* drbd_epoch flag bits */
enum { enum {
DE_BARRIER_IN_NEXT_EPOCH_ISSUED,
DE_BARRIER_IN_NEXT_EPOCH_DONE,
DE_CONTAINS_A_BARRIER,
DE_HAVE_BARRIER_NUMBER, DE_HAVE_BARRIER_NUMBER,
DE_IS_FINISHING,
}; };
enum epoch_event { enum epoch_event {
EV_PUT, EV_PUT,
EV_GOT_BARRIER_NR, EV_GOT_BARRIER_NR,
EV_BARRIER_DONE,
EV_BECAME_LAST, EV_BECAME_LAST,
EV_CLEANUP = 32, /* used as flag */ EV_CLEANUP = 32, /* used as flag */
}; };
...@@ -801,11 +796,6 @@ enum { ...@@ -801,11 +796,6 @@ enum {
__EE_CALL_AL_COMPLETE_IO, __EE_CALL_AL_COMPLETE_IO,
__EE_MAY_SET_IN_SYNC, __EE_MAY_SET_IN_SYNC,
/* This epoch entry closes an epoch using a barrier.
* On sucessful completion, the epoch is released,
* and the P_BARRIER_ACK send. */
__EE_IS_BARRIER,
/* In case a barrier failed, /* In case a barrier failed,
* we need to resubmit without the barrier flag. */ * we need to resubmit without the barrier flag. */
__EE_RESUBMITTED, __EE_RESUBMITTED,
...@@ -820,7 +810,6 @@ enum { ...@@ -820,7 +810,6 @@ enum {
}; };
#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
#define EE_IS_BARRIER (1<<__EE_IS_BARRIER)
#define EE_RESUBMITTED (1<<__EE_RESUBMITTED) #define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
#define EE_WAS_ERROR (1<<__EE_WAS_ERROR) #define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST) #define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
...@@ -843,16 +832,15 @@ enum { ...@@ -843,16 +832,15 @@ enum {
* Gets cleared when the state.conn * Gets cleared when the state.conn
* goes into C_CONNECTED state. */ * goes into C_CONNECTED state. */
WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */ WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */
NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */
CONSIDER_RESYNC, CONSIDER_RESYNC,
MD_NO_BARRIER, /* meta data device does not support barriers, MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */
so don't even try */
SUSPEND_IO, /* suspend application io */ SUSPEND_IO, /* suspend application io */
BITMAP_IO, /* suspend application io; BITMAP_IO, /* suspend application io;
once no more io in flight, start bitmap io */ once no more io in flight, start bitmap io */
BITMAP_IO_QUEUED, /* Started bitmap IO */ BITMAP_IO_QUEUED, /* Started bitmap IO */
GO_DISKLESS, /* Disk failed, local_cnt reached zero, we are going diskless */ GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */
WAS_IO_ERROR, /* Local disk failed returned IO error */
RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
NET_CONGESTED, /* The data socket is congested */ NET_CONGESTED, /* The data socket is congested */
...@@ -947,7 +935,6 @@ enum write_ordering_e { ...@@ -947,7 +935,6 @@ enum write_ordering_e {
WO_none, WO_none,
WO_drain_io, WO_drain_io,
WO_bdev_flush, WO_bdev_flush,
WO_bio_barrier
}; };
struct fifo_buffer { struct fifo_buffer {
...@@ -1281,6 +1268,7 @@ extern int drbd_bmio_set_n_write(struct drbd_conf *mdev); ...@@ -1281,6 +1268,7 @@ extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev); extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why); extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why);
extern void drbd_go_diskless(struct drbd_conf *mdev); extern void drbd_go_diskless(struct drbd_conf *mdev);
extern void drbd_ldev_destroy(struct drbd_conf *mdev);
/* Meta data layout /* Meta data layout
...@@ -1798,17 +1786,17 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, ...@@ -1798,17 +1786,17 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
case EP_PASS_ON: case EP_PASS_ON:
if (!forcedetach) { if (!forcedetach) {
if (__ratelimit(&drbd_ratelimit_state)) if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Local IO failed in %s." dev_err(DEV, "Local IO failed in %s.\n", where);
"Passing error on...\n", where);
break; break;
} }
/* NOTE fall through to detach case if forcedetach set */ /* NOTE fall through to detach case if forcedetach set */
case EP_DETACH: case EP_DETACH:
case EP_CALL_HELPER: case EP_CALL_HELPER:
set_bit(WAS_IO_ERROR, &mdev->flags);
if (mdev->state.disk > D_FAILED) { if (mdev->state.disk > D_FAILED) {
_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL); _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
dev_err(DEV, "Local IO failed in %s." dev_err(DEV,
"Detaching...\n", where); "Local IO failed in %s. Detaching...\n", where);
} }
break; break;
} }
...@@ -2127,7 +2115,11 @@ static inline void put_ldev(struct drbd_conf *mdev) ...@@ -2127,7 +2115,11 @@ static inline void put_ldev(struct drbd_conf *mdev)
__release(local); __release(local);
D_ASSERT(i >= 0); D_ASSERT(i >= 0);
if (i == 0) { if (i == 0) {
if (mdev->state.disk == D_DISKLESS)
/* even internal references gone, safe to destroy */
drbd_ldev_destroy(mdev);
if (mdev->state.disk == D_FAILED) if (mdev->state.disk == D_FAILED)
/* all application IO references gone. */
drbd_go_diskless(mdev); drbd_go_diskless(mdev);
wake_up(&mdev->misc_wait); wake_up(&mdev->misc_wait);
} }
...@@ -2138,6 +2130,10 @@ static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_stat ...@@ -2138,6 +2130,10 @@ static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_stat
{ {
int io_allowed; int io_allowed;
/* never get a reference while D_DISKLESS */
if (mdev->state.disk == D_DISKLESS)
return 0;
atomic_inc(&mdev->local_cnt); atomic_inc(&mdev->local_cnt);
io_allowed = (mdev->state.disk >= mins); io_allowed = (mdev->state.disk >= mins);
if (!io_allowed) if (!io_allowed)
...@@ -2406,12 +2402,12 @@ static inline void drbd_md_flush(struct drbd_conf *mdev) ...@@ -2406,12 +2402,12 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
{ {
int r; int r;
if (test_bit(MD_NO_BARRIER, &mdev->flags)) if (test_bit(MD_NO_FUA, &mdev->flags))
return; return;
r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL); r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL);
if (r) { if (r) {
set_bit(MD_NO_BARRIER, &mdev->flags); set_bit(MD_NO_FUA, &mdev->flags);
dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r); dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
} }
} }
......
...@@ -835,6 +835,15 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ...@@ -835,6 +835,15 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN) ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
ns.conn = os.conn; ns.conn = os.conn;
/* we cannot fail (again) if we already detached */
if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
ns.disk = D_DISKLESS;
/* if we are only D_ATTACHING yet,
* we can (and should) go directly to D_DISKLESS. */
if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
ns.disk = D_DISKLESS;
/* After C_DISCONNECTING only C_STANDALONE may follow */ /* After C_DISCONNECTING only C_STANDALONE may follow */
if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE) if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
ns.conn = os.conn; ns.conn = os.conn;
...@@ -1056,7 +1065,15 @@ int __drbd_set_state(struct drbd_conf *mdev, ...@@ -1056,7 +1065,15 @@ int __drbd_set_state(struct drbd_conf *mdev,
!test_and_set_bit(CONFIG_PENDING, &mdev->flags)) !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
set_bit(DEVICE_DYING, &mdev->flags); set_bit(DEVICE_DYING, &mdev->flags);
mdev->state.i = ns.i; /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
* on the ldev here, to be sure the transition -> D_DISKLESS resp.
* drbd_ldev_destroy() won't happen before our corresponding
* after_state_ch works run, where we put_ldev again. */
if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
(os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
atomic_inc(&mdev->local_cnt);
mdev->state = ns;
wake_up(&mdev->misc_wait); wake_up(&mdev->misc_wait);
wake_up(&mdev->state_wait); wake_up(&mdev->state_wait);
...@@ -1268,7 +1285,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, ...@@ -1268,7 +1285,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
if (test_bit(NEW_CUR_UUID, &mdev->flags)) { if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
drbd_uuid_new_current(mdev); drbd_uuid_new_current(mdev);
clear_bit(NEW_CUR_UUID, &mdev->flags); clear_bit(NEW_CUR_UUID, &mdev->flags);
drbd_md_sync(mdev);
} }
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->req_lock);
_drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL); _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
...@@ -1365,35 +1381,46 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, ...@@ -1365,35 +1381,46 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT) os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate"); drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
/* first half of local IO error */ /* first half of local IO error, failure to attach,
if (os.disk > D_FAILED && ns.disk == D_FAILED) { * or administrative detach */
enum drbd_io_error_p eh = EP_PASS_ON; if (os.disk != D_FAILED && ns.disk == D_FAILED) {
enum drbd_io_error_p eh;
int was_io_error;
/* corresponding get_ldev was in __drbd_set_state, to serialize
* our cleanup here with the transition to D_DISKLESS,
* so it is safe to dreference ldev here. */
eh = mdev->ldev->dc.on_io_error;
was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
/* current state still has to be D_FAILED,
* there is only one way out: to D_DISKLESS,
* and that may only happen after our put_ldev below. */
if (mdev->state.disk != D_FAILED)
dev_err(DEV,
"ASSERT FAILED: disk is %s during detach\n",
drbd_disk_str(mdev->state.disk));
if (drbd_send_state(mdev)) if (drbd_send_state(mdev))
dev_warn(DEV, "Notified peer that my disk is broken.\n"); dev_warn(DEV, "Notified peer that I am detaching my disk\n");
else else
dev_err(DEV, "Sending state for drbd_io_error() failed\n"); dev_err(DEV, "Sending state for detaching disk failed\n");
drbd_rs_cancel_all(mdev); drbd_rs_cancel_all(mdev);
if (get_ldev_if_state(mdev, D_FAILED)) { /* In case we want to get something to stable storage still,
eh = mdev->ldev->dc.on_io_error; * this may be the last chance.
* Following put_ldev may transition to D_DISKLESS. */
drbd_md_sync(mdev);
put_ldev(mdev); put_ldev(mdev);
}
if (eh == EP_CALL_HELPER)
drbd_khelper(mdev, "local-io-error");
}
/* second half of local IO error handling, if (was_io_error && eh == EP_CALL_HELPER)
* after local_cnt references have reached zero: */ drbd_khelper(mdev, "local-io-error");
if (os.disk == D_FAILED && ns.disk == D_DISKLESS) {
mdev->rs_total = 0;
mdev->rs_failed = 0;
atomic_set(&mdev->rs_pending_cnt, 0);
} }
if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) { /* second half of local IO error, failure to attach,
* or administrative detach,
* after local_cnt references have reached zero again */
if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
/* We must still be diskless, /* We must still be diskless,
* re-attach has to be serialized with this! */ * re-attach has to be serialized with this! */
if (mdev->state.disk != D_DISKLESS) if (mdev->state.disk != D_DISKLESS)
...@@ -1401,27 +1428,17 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, ...@@ -1401,27 +1428,17 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
"ASSERT FAILED: disk is %s while going diskless\n", "ASSERT FAILED: disk is %s while going diskless\n",
drbd_disk_str(mdev->state.disk)); drbd_disk_str(mdev->state.disk));
/* we cannot assert local_cnt == 0 here, as get_ldev_if_state mdev->rs_total = 0;
* will inc/dec it frequently. Since we became D_DISKLESS, no mdev->rs_failed = 0;
* one has touched the protected members anymore, though, so we atomic_set(&mdev->rs_pending_cnt, 0);
* are safe to free them here. */
if (drbd_send_state(mdev)) if (drbd_send_state(mdev))
dev_warn(DEV, "Notified peer that I detached my disk.\n"); dev_warn(DEV, "Notified peer that I'm now diskless.\n");
else else
dev_err(DEV, "Sending state for detach failed\n"); dev_err(DEV, "Sending state for being diskless failed\n");
/* corresponding get_ldev in __drbd_set_state
lc_destroy(mdev->resync); * this may finaly trigger drbd_ldev_destroy. */
mdev->resync = NULL; put_ldev(mdev);
lc_destroy(mdev->act_log);
mdev->act_log = NULL;
__no_warn(local,
drbd_free_bc(mdev->ldev);
mdev->ldev = NULL;);
if (mdev->md_io_tmpp) {
__free_page(mdev->md_io_tmpp);
mdev->md_io_tmpp = NULL;
}
} }
/* Disks got bigger while they were detached */ /* Disks got bigger while they were detached */
...@@ -2772,11 +2789,6 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) ...@@ -2772,11 +2789,6 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
drbd_set_defaults(mdev); drbd_set_defaults(mdev);
/* for now, we do NOT yet support it,
* even though we start some framework
* to eventually support barriers */
set_bit(NO_BARRIER_SUPP, &mdev->flags);
atomic_set(&mdev->ap_bio_cnt, 0); atomic_set(&mdev->ap_bio_cnt, 0);
atomic_set(&mdev->ap_pending_cnt, 0); atomic_set(&mdev->ap_pending_cnt, 0);
atomic_set(&mdev->rs_pending_cnt, 0); atomic_set(&mdev->rs_pending_cnt, 0);
...@@ -2842,7 +2854,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) ...@@ -2842,7 +2854,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
drbd_thread_init(mdev, &mdev->asender, drbd_asender); drbd_thread_init(mdev, &mdev->asender, drbd_asender);
mdev->agreed_pro_version = PRO_VERSION_MAX; mdev->agreed_pro_version = PRO_VERSION_MAX;
mdev->write_ordering = WO_bio_barrier; mdev->write_ordering = WO_bdev_flush;
mdev->resync_wenr = LC_FREE; mdev->resync_wenr = LC_FREE;
} }
...@@ -2899,7 +2911,6 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev) ...@@ -2899,7 +2911,6 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
D_ASSERT(list_empty(&mdev->resync_work.list)); D_ASSERT(list_empty(&mdev->resync_work.list));
D_ASSERT(list_empty(&mdev->unplug_work.list)); D_ASSERT(list_empty(&mdev->unplug_work.list));
D_ASSERT(list_empty(&mdev->go_diskless.list)); D_ASSERT(list_empty(&mdev->go_diskless.list));
} }
...@@ -3660,6 +3671,8 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local) ...@@ -3660,6 +3671,8 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
get_random_bytes(&val, sizeof(u64)); get_random_bytes(&val, sizeof(u64));
_drbd_uuid_set(mdev, UI_CURRENT, val); _drbd_uuid_set(mdev, UI_CURRENT, val);
/* get it to stable storage _now_ */
drbd_md_sync(mdev);
} }
void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local) void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
...@@ -3756,19 +3769,31 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused) ...@@ -3756,19 +3769,31 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
return 1; return 1;
} }
void drbd_ldev_destroy(struct drbd_conf *mdev)
{
lc_destroy(mdev->resync);
mdev->resync = NULL;
lc_destroy(mdev->act_log);
mdev->act_log = NULL;
__no_warn(local,
drbd_free_bc(mdev->ldev);
mdev->ldev = NULL;);
if (mdev->md_io_tmpp) {
__free_page(mdev->md_io_tmpp);
mdev->md_io_tmpp = NULL;
}
clear_bit(GO_DISKLESS, &mdev->flags);
}
static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused) static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
{ {
D_ASSERT(mdev->state.disk == D_FAILED); D_ASSERT(mdev->state.disk == D_FAILED);
/* we cannot assert local_cnt == 0 here, as get_ldev_if_state will /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
* inc/dec it frequently. Once we are D_DISKLESS, no one will touch * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
* the protected members anymore, though, so in the after_state_ch work * the protected members anymore, though, so once put_ldev reaches zero
* it will be safe to free them. */ * again, it will be safe to free them. */
drbd_force_state(mdev, NS(disk, D_DISKLESS)); drbd_force_state(mdev, NS(disk, D_DISKLESS));
/* We need to wait for return of references checked out while we still
* have been D_FAILED, though (drbd_md_sync, bitmap io). */
wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
clear_bit(GO_DISKLESS, &mdev->flags);
return 1; return 1;
} }
...@@ -3777,9 +3802,6 @@ void drbd_go_diskless(struct drbd_conf *mdev) ...@@ -3777,9 +3802,6 @@ void drbd_go_diskless(struct drbd_conf *mdev)
D_ASSERT(mdev->state.disk == D_FAILED); D_ASSERT(mdev->state.disk == D_FAILED);
if (!test_and_set_bit(GO_DISKLESS, &mdev->flags)) if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
drbd_queue_work(&mdev->data.work, &mdev->go_diskless); drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
/* don't drbd_queue_work_front,
* we need to serialize with the after_state_ch work
* of the -> D_FAILED transition. */
} }
/** /**
......
...@@ -870,6 +870,11 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp ...@@ -870,6 +870,11 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
retcode = ERR_DISK_CONFIGURED; retcode = ERR_DISK_CONFIGURED;
goto fail; goto fail;
} }
/* It may just now have detached because of IO error. Make sure
* drbd_ldev_destroy is done already, we may end up here very fast,
* e.g. if someone calls attach from the on-io-error handler,
* to realize a "hot spare" feature (not that I'd recommend that) */
wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
/* allocation not in the IO path, cqueue thread context */ /* allocation not in the IO path, cqueue thread context */
nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
...@@ -1098,9 +1103,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp ...@@ -1098,9 +1103,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
/* Reset the "barriers don't work" bits here, then force meta data to /* Reset the "barriers don't work" bits here, then force meta data to
* be written, to ensure we determine if barriers are supported. */ * be written, to ensure we determine if barriers are supported. */
if (nbc->dc.no_md_flush) if (nbc->dc.no_md_flush)
set_bit(MD_NO_BARRIER, &mdev->flags); set_bit(MD_NO_FUA, &mdev->flags);
else else
clear_bit(MD_NO_BARRIER, &mdev->flags); clear_bit(MD_NO_FUA, &mdev->flags);
/* Point of no return reached. /* Point of no return reached.
* Devices and memory are no longer released by error cleanup below. * Devices and memory are no longer released by error cleanup below.
...@@ -1112,8 +1117,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp ...@@ -1112,8 +1117,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
nbc = NULL; nbc = NULL;
resync_lru = NULL; resync_lru = NULL;
mdev->write_ordering = WO_bio_barrier; mdev->write_ordering = WO_bdev_flush;
drbd_bump_write_ordering(mdev, WO_bio_barrier); drbd_bump_write_ordering(mdev, WO_bdev_flush);
if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY)) if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
set_bit(CRASHED_PRIMARY, &mdev->flags); set_bit(CRASHED_PRIMARY, &mdev->flags);
...@@ -1262,7 +1267,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp ...@@ -1262,7 +1267,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
force_diskless_dec: force_diskless_dec:
put_ldev(mdev); put_ldev(mdev);
force_diskless: force_diskless:
drbd_force_state(mdev, NS(disk, D_DISKLESS)); drbd_force_state(mdev, NS(disk, D_FAILED));
drbd_md_sync(mdev); drbd_md_sync(mdev);
release_bdev2_fail: release_bdev2_fail:
if (nbc) if (nbc)
...@@ -1285,10 +1290,19 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp ...@@ -1285,10 +1290,19 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
return 0; return 0;
} }
/* Detaching the disk is a process in multiple stages. First we need to lock
* out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
* Then we transition to D_DISKLESS, and wait for put_ldev() to return all
* internal references as well.
* Only then we have finally detached. */
static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply) struct drbd_nl_cfg_reply *reply)
{ {
drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS)); reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS));
if (mdev->state.disk == D_DISKLESS)
wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
drbd_resume_io(mdev);
return 0; return 0;
} }
...@@ -1953,7 +1967,6 @@ static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp ...@@ -1953,7 +1967,6 @@ static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
if (test_bit(NEW_CUR_UUID, &mdev->flags)) { if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
drbd_uuid_new_current(mdev); drbd_uuid_new_current(mdev);
clear_bit(NEW_CUR_UUID, &mdev->flags); clear_bit(NEW_CUR_UUID, &mdev->flags);
drbd_md_sync(mdev);
} }
drbd_suspend_io(mdev); drbd_suspend_io(mdev);
reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0)); reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
......
...@@ -158,7 +158,6 @@ static int drbd_seq_show(struct seq_file *seq, void *v) ...@@ -158,7 +158,6 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
[WO_none] = 'n', [WO_none] = 'n',
[WO_drain_io] = 'd', [WO_drain_io] = 'd',
[WO_bdev_flush] = 'f', [WO_bdev_flush] = 'f',
[WO_bio_barrier] = 'b',
}; };
seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n", seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n",
......
This diff is collapsed.
...@@ -258,7 +258,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m) ...@@ -258,7 +258,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
if (!hlist_unhashed(&req->colision)) if (!hlist_unhashed(&req->colision))
hlist_del(&req->colision); hlist_del(&req->colision);
else else
D_ASSERT((s & RQ_NET_MASK) == 0); D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
/* for writes we need to do some extra housekeeping */ /* for writes we need to do some extra housekeeping */
if (rw == WRITE) if (rw == WRITE)
...@@ -813,6 +813,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) ...@@ -813,6 +813,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
mdev->state.conn >= C_CONNECTED)); mdev->state.conn >= C_CONNECTED));
if (!(local || remote) && !is_susp(mdev->state)) { if (!(local || remote) && !is_susp(mdev->state)) {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
goto fail_free_complete; goto fail_free_complete;
} }
...@@ -942,12 +943,21 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) ...@@ -942,12 +943,21 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
if (local) { if (local) {
req->private_bio->bi_bdev = mdev->ldev->backing_bdev; req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
/* State may have changed since we grabbed our reference on the
* mdev->ldev member. Double check, and short-circuit to endio.
* In case the last activity log transaction failed to get on
* stable storage, and this is a WRITE, we may not even submit
* this bio. */
if (get_ldev(mdev)) {
if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
: rw == READ ? DRBD_FAULT_DT_RD : rw == READ ? DRBD_FAULT_DT_RD
: DRBD_FAULT_DT_RA)) : DRBD_FAULT_DT_RA))
bio_endio(req->private_bio, -EIO); bio_endio(req->private_bio, -EIO);
else else
generic_make_request(req->private_bio); generic_make_request(req->private_bio);
put_ldev(mdev);
} else
bio_endio(req->private_bio, -EIO);
} }
/* we need to plug ALWAYS since we possibly need to kick lo_dev. /* we need to plug ALWAYS since we possibly need to kick lo_dev.
...@@ -1022,20 +1032,6 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) ...@@ -1022,20 +1032,6 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
return 0; return 0;
} }
/* Reject barrier requests if we know the underlying device does
* not support them.
* XXX: Need to get this info from peer as well some how so we
* XXX: reject if EITHER side/data/metadata area does not support them.
*
* because of those XXX, this is not yet enabled,
* i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit.
*/
if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) {
/* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */
bio_endio(bio, -EOPNOTSUPP);
return 0;
}
/* /*
* what we "blindly" assume: * what we "blindly" assume:
*/ */
......
...@@ -102,12 +102,6 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local) ...@@ -102,12 +102,6 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
put_ldev(mdev); put_ldev(mdev);
} }
static int is_failed_barrier(int ee_flags)
{
return (ee_flags & (EE_IS_BARRIER|EE_WAS_ERROR|EE_RESUBMITTED))
== (EE_IS_BARRIER|EE_WAS_ERROR);
}
/* writes on behalf of the partner, or resync writes, /* writes on behalf of the partner, or resync writes,
* "submitted" by the receiver, final stage. */ * "submitted" by the receiver, final stage. */
static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local) static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
...@@ -119,21 +113,6 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo ...@@ -119,21 +113,6 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
int is_syncer_req; int is_syncer_req;
int do_al_complete_io; int do_al_complete_io;
/* if this is a failed barrier request, disable use of barriers,
* and schedule for resubmission */
if (is_failed_barrier(e->flags)) {
drbd_bump_write_ordering(mdev, WO_bdev_flush);
spin_lock_irqsave(&mdev->req_lock, flags);
list_del(&e->w.list);
e->flags = (e->flags & ~EE_WAS_ERROR) | EE_RESUBMITTED;
e->w.cb = w_e_reissue;
/* put_ldev actually happens below, once we come here again. */
__release(local);
spin_unlock_irqrestore(&mdev->req_lock, flags);
drbd_queue_work(&mdev->data.work, &e->w);
return;
}
D_ASSERT(e->block_id != ID_VACANT); D_ASSERT(e->block_id != ID_VACANT);
/* after we moved e to done_ee, /* after we moved e to done_ee,
...@@ -925,7 +904,7 @@ int drbd_resync_finished(struct drbd_conf *mdev) ...@@ -925,7 +904,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
drbd_md_sync(mdev); drbd_md_sync(mdev);
if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) { if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) {
dev_warn(DEV, "Writing the whole bitmap, due to failed kmalloc\n"); dev_info(DEV, "Writing the whole bitmap\n");
drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished"); drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
} }
......
...@@ -53,7 +53,7 @@ ...@@ -53,7 +53,7 @@
extern const char *drbd_buildtag(void); extern const char *drbd_buildtag(void);
#define REL_VERSION "8.3.9rc2" #define REL_VERSION "8.3.9"
#define API_VERSION 88 #define API_VERSION 88
#define PRO_VERSION_MIN 86 #define PRO_VERSION_MIN 86
#define PRO_VERSION_MAX 95 #define PRO_VERSION_MAX 95
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment