Commit 07581dd2 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] get_io_context fixes

- pass gfp_flags to get_io_context(): not all callers are forced to use
  GFP_ATOMIC().

- fix locking in get_io_context(): bump the refcount whilein the exclusive
  region.

- don't go oops in get_io_context() if the kmalloc failed.

- in as_get_io_context(): fail the whole thing if we were unable to
  allocate the AS-specific part.

- as_remove_queued_request() cleanup
parent 930805a2
......@@ -219,13 +219,17 @@ static struct as_io_context *alloc_as_io_context(void)
*/
static struct io_context *as_get_io_context(void)
{
struct io_context *ioc = get_io_context();
if (ioc && !ioc->aic)
struct io_context *ioc = get_io_context(GFP_ATOMIC);
if (ioc && !ioc->aic) {
ioc->aic = alloc_as_io_context();
if (!ioc->aic) {
put_io_context(ioc);
ioc = NULL;
}
}
return ioc;
}
/*
* the back merge hash support functions
*/
......@@ -971,10 +975,6 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
static void as_remove_queued_request(request_queue_t *q, struct request *rq)
{
struct as_rq *arq = RQ_DATA(rq);
if (!arq)
BUG();
else {
const int data_dir = arq->is_sync;
struct as_data *ad = q->elevator.elevator_data;
......@@ -995,8 +995,6 @@ static void as_remove_queued_request(request_queue_t *q, struct request *rq)
list_del_init(&arq->fifo);
as_remove_merge_hints(q, arq);
as_del_arq_rb(ad, arq);
}
}
/*
......@@ -1292,7 +1290,7 @@ static void as_add_request(struct as_data *ad, struct as_rq *arq)
arq->io_context = as_get_io_context();
if (arq->io_context && arq->io_context->aic) {
if (arq->io_context) {
atomic_inc(&arq->io_context->aic->nr_queued);
as_update_iohist(arq->io_context->aic, arq->request);
}
......
......@@ -1360,7 +1360,7 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
{
struct request *rq = NULL;
struct request_list *rl = &q->rq;
struct io_context *ioc = get_io_context();
struct io_context *ioc = get_io_context(gfp_mask);
spin_lock_irq(q->queue_lock);
if (rl->count[rw]+1 >= q->nr_requests) {
......@@ -1439,7 +1439,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw)
struct io_context *ioc;
io_schedule();
ioc = get_io_context();
ioc = get_io_context(GFP_NOIO);
ioc_set_batching(ioc);
put_io_context(ioc);
}
......@@ -2462,7 +2462,7 @@ void exit_io_context(void)
* But weird things happen, so we disable local interrupts to ensure exclusive
* access to *current.
*/
struct io_context *get_io_context(void)
struct io_context *get_io_context(int gfp_flags)
{
struct task_struct *tsk = current;
unsigned long flags;
......@@ -2482,8 +2482,9 @@ struct io_context *get_io_context(void)
tsk->io_context = ret;
}
}
local_irq_restore(flags);
if (ret)
atomic_inc(&ret->refcount);
local_irq_restore(flags);
return ret;
}
......
......@@ -70,7 +70,7 @@ struct io_context {
void put_io_context(struct io_context *ioc);
void exit_io_context(void);
struct io_context *get_io_context(void);
struct io_context *get_io_context(int gfp_flags);
void copy_io_context(struct io_context **pdst, struct io_context **psrc);
void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment