Commit 88626272 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm

* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm:
  dm: sysfs revert add empty release function to avoid debug warning
  dm mpath: fix stall when requeueing io
  dm raid1: fix null pointer dereference in suspend
  dm raid1: fail writes if errors are not handled and log fails
  dm log: userspace fix overhead_size calcuations
  dm snapshot: persistent annotate work_queue as on stack
  dm stripe: avoid divide by zero with invalid stripe count
parents 5ae1d955 9307f6b1
...@@ -172,11 +172,15 @@ int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type, ...@@ -172,11 +172,15 @@ int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
{ {
int r = 0; int r = 0;
size_t dummy = 0; size_t dummy = 0;
int overhead_size = int overhead_size = sizeof(struct dm_ulog_request) + sizeof(struct cn_msg);
sizeof(struct dm_ulog_request *) + sizeof(struct cn_msg);
struct dm_ulog_request *tfr = prealloced_ulog_tfr; struct dm_ulog_request *tfr = prealloced_ulog_tfr;
struct receiving_pkg pkg; struct receiving_pkg pkg;
/*
* Given the space needed to hold the 'struct cn_msg' and
* 'struct dm_ulog_request' - do we have enough payload
* space remaining?
*/
if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) { if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) {
DMINFO("Size of tfr exceeds preallocated size"); DMINFO("Size of tfr exceeds preallocated size");
return -EINVAL; return -EINVAL;
...@@ -191,7 +195,7 @@ int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type, ...@@ -191,7 +195,7 @@ int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
*/ */
mutex_lock(&dm_ulog_lock); mutex_lock(&dm_ulog_lock);
memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size); memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg));
memcpy(tfr->uuid, uuid, DM_UUID_LEN); memcpy(tfr->uuid, uuid, DM_UUID_LEN);
tfr->luid = luid; tfr->luid = luid;
tfr->seq = dm_ulog_seq++; tfr->seq = dm_ulog_seq++;
......
...@@ -724,7 +724,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) ...@@ -724,7 +724,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
/* /*
* Dispatch io. * Dispatch io.
*/ */
if (unlikely(ms->log_failure)) { if (unlikely(ms->log_failure) && errors_handled(ms)) {
spin_lock_irq(&ms->lock); spin_lock_irq(&ms->lock);
bio_list_merge(&ms->failures, &sync); bio_list_merge(&ms->failures, &sync);
spin_unlock_irq(&ms->lock); spin_unlock_irq(&ms->lock);
......
...@@ -660,10 +660,9 @@ void dm_rh_recovery_end(struct dm_region *reg, int success) ...@@ -660,10 +660,9 @@ void dm_rh_recovery_end(struct dm_region *reg, int success)
spin_lock_irq(&rh->region_lock); spin_lock_irq(&rh->region_lock);
if (success) if (success)
list_add(&reg->list, &reg->rh->recovered_regions); list_add(&reg->list, &reg->rh->recovered_regions);
else { else
reg->state = DM_RH_NOSYNC;
list_add(&reg->list, &reg->rh->failed_recovered_regions); list_add(&reg->list, &reg->rh->failed_recovered_regions);
}
spin_unlock_irq(&rh->region_lock); spin_unlock_irq(&rh->region_lock);
rh->wakeup_workers(rh->context); rh->wakeup_workers(rh->context);
......
...@@ -254,7 +254,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, ...@@ -254,7 +254,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
* Issue the synchronous I/O from a different thread * Issue the synchronous I/O from a different thread
* to avoid generic_make_request recursion. * to avoid generic_make_request recursion.
*/ */
INIT_WORK(&req.work, do_metadata); INIT_WORK_ON_STACK(&req.work, do_metadata);
queue_work(ps->metadata_wq, &req.work); queue_work(ps->metadata_wq, &req.work);
flush_workqueue(ps->metadata_wq); flush_workqueue(ps->metadata_wq);
......
...@@ -110,7 +110,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -110,7 +110,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
} }
stripes = simple_strtoul(argv[0], &end, 10); stripes = simple_strtoul(argv[0], &end, 10);
if (*end) { if (!stripes || *end) {
ti->error = "Invalid stripe count"; ti->error = "Invalid stripe count";
return -EINVAL; return -EINVAL;
} }
......
...@@ -79,13 +79,6 @@ static struct sysfs_ops dm_sysfs_ops = { ...@@ -79,13 +79,6 @@ static struct sysfs_ops dm_sysfs_ops = {
.show = dm_attr_show, .show = dm_attr_show,
}; };
/*
* The sysfs structure is embedded in md struct, nothing to do here
*/
static void dm_sysfs_release(struct kobject *kobj)
{
}
/* /*
* dm kobject is embedded in mapped_device structure * dm kobject is embedded in mapped_device structure
* no need to define release function here * no need to define release function here
...@@ -93,7 +86,6 @@ static void dm_sysfs_release(struct kobject *kobj) ...@@ -93,7 +86,6 @@ static void dm_sysfs_release(struct kobject *kobj)
static struct kobj_type dm_ktype = { static struct kobj_type dm_ktype = {
.sysfs_ops = &dm_sysfs_ops, .sysfs_ops = &dm_sysfs_ops,
.default_attrs = dm_attrs, .default_attrs = dm_attrs,
.release = dm_sysfs_release
}; };
/* /*
......
...@@ -1595,10 +1595,15 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq) ...@@ -1595,10 +1595,15 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
return BLKPREP_OK; return BLKPREP_OK;
} }
static void map_request(struct dm_target *ti, struct request *clone, /*
* Returns:
* 0 : the request has been processed (not requeued)
* !0 : the request has been requeued
*/
static int map_request(struct dm_target *ti, struct request *clone,
struct mapped_device *md) struct mapped_device *md)
{ {
int r; int r, requeued = 0;
struct dm_rq_target_io *tio = clone->end_io_data; struct dm_rq_target_io *tio = clone->end_io_data;
/* /*
...@@ -1625,6 +1630,7 @@ static void map_request(struct dm_target *ti, struct request *clone, ...@@ -1625,6 +1630,7 @@ static void map_request(struct dm_target *ti, struct request *clone,
case DM_MAPIO_REQUEUE: case DM_MAPIO_REQUEUE:
/* The target wants to requeue the I/O */ /* The target wants to requeue the I/O */
dm_requeue_unmapped_request(clone); dm_requeue_unmapped_request(clone);
requeued = 1;
break; break;
default: default:
if (r > 0) { if (r > 0) {
...@@ -1636,6 +1642,8 @@ static void map_request(struct dm_target *ti, struct request *clone, ...@@ -1636,6 +1642,8 @@ static void map_request(struct dm_target *ti, struct request *clone,
dm_kill_unmapped_request(clone, r); dm_kill_unmapped_request(clone, r);
break; break;
} }
return requeued;
} }
/* /*
...@@ -1677,12 +1685,17 @@ static void dm_request_fn(struct request_queue *q) ...@@ -1677,12 +1685,17 @@ static void dm_request_fn(struct request_queue *q)
atomic_inc(&md->pending[rq_data_dir(clone)]); atomic_inc(&md->pending[rq_data_dir(clone)]);
spin_unlock(q->queue_lock); spin_unlock(q->queue_lock);
map_request(ti, clone, md); if (map_request(ti, clone, md))
goto requeued;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
} }
goto out; goto out;
requeued:
spin_lock_irq(q->queue_lock);
plug_and_out: plug_and_out:
if (!elv_queue_empty(q)) if (!elv_queue_empty(q))
/* Some requests still remain, retry later */ /* Some requests still remain, retry later */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment