Commit 950a2b0b authored by Linus Torvalds's avatar Linus Torvalds

v2.4.0.4 -> v2.4.0.5

  - ppp UP deadlock attack fix
parent 6aea1666
VERSION = 2 VERSION = 2
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 1 SUBLEVEL = 1
EXTRAVERSION =-pre4 EXTRAVERSION =-pre5
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
...@@ -457,9 +457,8 @@ export MODVERFILE ...@@ -457,9 +457,8 @@ export MODVERFILE
depend dep: dep-files depend dep: dep-files
# make checkconfig: Prune 'scripts' directory to avoid "false positives".
checkconfig: checkconfig:
find * -name '*.[hcS]' -type f -print | grep -v scripts/ | sort | xargs $(PERL) -w scripts/checkconfig.pl find * -name '*.[hcS]' -type f -print | sort | xargs $(PERL) -w scripts/checkconfig.pl
checkhelp: checkhelp:
find * -name [cC]onfig.in -print | sort | xargs $(PERL) -w scripts/checkhelp.pl find * -name [cC]onfig.in -print | sort | xargs $(PERL) -w scripts/checkhelp.pl
......
...@@ -537,6 +537,8 @@ CONFIG_PCMCIA_SERIAL=y ...@@ -537,6 +537,8 @@ CONFIG_PCMCIA_SERIAL=y
# CONFIG_QUOTA is not set # CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set # CONFIG_AUTOFS_FS is not set
CONFIG_AUTOFS4_FS=y CONFIG_AUTOFS4_FS=y
# CONFIG_REISERFS_FS is not set
# CONFIG_REISERFS_CHECK is not set
# CONFIG_ADFS_FS is not set # CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set # CONFIG_ADFS_FS_RW is not set
# CONFIG_AFFS_FS is not set # CONFIG_AFFS_FS is not set
......
...@@ -1820,7 +1820,6 @@ static int DAC960_BackMergeFunction(RequestQueue_T *RequestQueue, ...@@ -1820,7 +1820,6 @@ static int DAC960_BackMergeFunction(RequestQueue_T *RequestQueue,
Request->nr_segments < Controller->DriverScatterGatherLimit) Request->nr_segments < Controller->DriverScatterGatherLimit)
{ {
Request->nr_segments++; Request->nr_segments++;
RequestQueue->elevator.nr_segments++;
return true; return true;
} }
return false; return false;
...@@ -1844,7 +1843,6 @@ static int DAC960_FrontMergeFunction(RequestQueue_T *RequestQueue, ...@@ -1844,7 +1843,6 @@ static int DAC960_FrontMergeFunction(RequestQueue_T *RequestQueue,
Request->nr_segments < Controller->DriverScatterGatherLimit) Request->nr_segments < Controller->DriverScatterGatherLimit)
{ {
Request->nr_segments++; Request->nr_segments++;
RequestQueue->elevator.nr_segments++;
return true; return true;
} }
return false; return false;
...@@ -1874,7 +1872,6 @@ static int DAC960_MergeRequestsFunction(RequestQueue_T *RequestQueue, ...@@ -1874,7 +1872,6 @@ static int DAC960_MergeRequestsFunction(RequestQueue_T *RequestQueue,
if (TotalSegments > MaxSegments || if (TotalSegments > MaxSegments ||
TotalSegments > Controller->DriverScatterGatherLimit) TotalSegments > Controller->DriverScatterGatherLimit)
return false; return false;
RequestQueue->elevator.nr_segments -= SameSegment;
Request->nr_segments = TotalSegments; Request->nr_segments = TotalSegments;
return true; return true;
} }
......
...@@ -24,125 +24,115 @@ ...@@ -24,125 +24,115 @@
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/elevator.h> #include <linux/elevator.h>
#include <linux/blk.h> #include <linux/blk.h>
#include <linux/module.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
/*
* Order ascending, but only allow a request to be skipped a certain
* number of times
*/
void elevator_linus(struct request *req, elevator_t *elevator,
struct list_head *real_head,
struct list_head *head, int orig_latency)
{
struct list_head *entry = real_head;
struct request *tmp;
req->elevator_sequence = orig_latency;
while ((entry = entry->prev) != head) {
tmp = blkdev_entry_to_request(entry);
if (IN_ORDER(tmp, req))
break;
if (!tmp->elevator_sequence)
break;
tmp->elevator_sequence--;
}
list_add(&req->queue, entry);
}
int elevator_linus_merge(request_queue_t *q, struct request **req, int elevator_linus_merge(request_queue_t *q, struct request **req,
struct list_head * head,
struct buffer_head *bh, int rw, struct buffer_head *bh, int rw,
int *max_sectors, int *max_segments) int max_sectors, int max_segments)
{ {
struct list_head *entry, *head = &q->queue_head; struct list_head *entry = &q->queue_head;
unsigned int count = bh->b_size >> 9, ret = ELEVATOR_NO_MERGE; unsigned int count = bh->b_size >> 9, ret = ELEVATOR_NO_MERGE;
entry = head;
if (q->head_active && !q->plugged)
head = head->next;
while ((entry = entry->prev) != head) { while ((entry = entry->prev) != head) {
struct request *__rq = *req = blkdev_entry_to_request(entry); struct request *__rq = blkdev_entry_to_request(entry);
/*
* simply "aging" of requests in queue
*/
if (__rq->elevator_sequence-- <= 0) {
*req = __rq;
break;
}
if (__rq->sem) if (__rq->sem)
continue; continue;
if (__rq->cmd != rw) if (__rq->cmd != rw)
continue; continue;
if (__rq->nr_sectors + count > *max_sectors)
continue;
if (__rq->rq_dev != bh->b_rdev) if (__rq->rq_dev != bh->b_rdev)
continue; continue;
if (__rq->nr_sectors + count > max_sectors)
continue;
if (__rq->elevator_sequence < count)
break;
if (__rq->sector + __rq->nr_sectors == bh->b_rsector) { if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
ret = ELEVATOR_BACK_MERGE; ret = ELEVATOR_BACK_MERGE;
*req = __rq;
break; break;
} } else if (__rq->sector - count == bh->b_rsector) {
if (!__rq->elevator_sequence)
break;
if (__rq->sector - count == bh->b_rsector) {
__rq->elevator_sequence--;
ret = ELEVATOR_FRONT_MERGE; ret = ELEVATOR_FRONT_MERGE;
__rq->elevator_sequence -= count;
*req = __rq;
break; break;
} } else if (!*req && BHRQ_IN_ORDER(bh, __rq))
*req = __rq;
} }
return ret;
}
void elevator_linus_merge_cleanup(request_queue_t *q, struct request *req, int count)
{
struct list_head *entry = &req->queue, *head = &q->queue_head;
/* /*
* second pass scan of requests that got passed over, if any * second pass scan of requests that got passed over, if any
*/ */
if (ret != ELEVATOR_NO_MERGE && *req) { while ((entry = entry->next) != head) {
while ((entry = entry->next) != &q->queue_head) {
struct request *tmp = blkdev_entry_to_request(entry); struct request *tmp = blkdev_entry_to_request(entry);
tmp->elevator_sequence--; tmp->elevator_sequence -= count;
}
} }
return ret;
} }
/* void elevator_linus_merge_req(struct request *req, struct request *next)
* No request sorting, just add it to the back of the list
*/
void elevator_noop(struct request *req, elevator_t *elevator,
struct list_head *real_head, struct list_head *head,
int orig_latency)
{ {
list_add_tail(&req->queue, real_head); if (next->elevator_sequence < req->elevator_sequence)
req->elevator_sequence = next->elevator_sequence;
} }
/* /*
* See if we can find a request that is buffer can be coalesced with. * See if we can find a request that this buffer can be coalesced with.
*/ */
int elevator_noop_merge(request_queue_t *q, struct request **req, int elevator_noop_merge(request_queue_t *q, struct request **req,
struct list_head * head,
struct buffer_head *bh, int rw, struct buffer_head *bh, int rw,
int *max_sectors, int *max_segments) int max_sectors, int max_segments)
{ {
struct list_head *entry, *head = &q->queue_head; struct list_head *entry;
unsigned int count = bh->b_size >> 9; unsigned int count = bh->b_size >> 9;
if (q->head_active && !q->plugged) if (list_empty(&q->queue_head))
head = head->next; return ELEVATOR_NO_MERGE;
entry = head; entry = &q->queue_head;
while ((entry = entry->prev) != head) { while ((entry = entry->prev) != head) {
struct request *__rq = *req = blkdev_entry_to_request(entry); struct request *__rq = blkdev_entry_to_request(entry);
if (__rq->sem)
continue;
if (__rq->cmd != rw) if (__rq->cmd != rw)
continue; continue;
if (__rq->nr_sectors + count > *max_sectors)
continue;
if (__rq->rq_dev != bh->b_rdev) if (__rq->rq_dev != bh->b_rdev)
continue; continue;
if (__rq->sector + __rq->nr_sectors == bh->b_rsector) if (__rq->nr_sectors + count > max_sectors)
continue;
if (__rq->sem)
continue;
if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
*req = __rq;
return ELEVATOR_BACK_MERGE; return ELEVATOR_BACK_MERGE;
if (__rq->sector - count == bh->b_rsector) } else if (__rq->sector - count == bh->b_rsector) {
*req = __rq;
return ELEVATOR_FRONT_MERGE; return ELEVATOR_FRONT_MERGE;
} }
}
*req = blkdev_entry_to_request(q->queue_head.prev);
return ELEVATOR_NO_MERGE; return ELEVATOR_NO_MERGE;
} }
/* void elevator_noop_merge_cleanup(request_queue_t *q, struct request *req, int count) {}
* The noop "elevator" does not do any accounting
*/ void elevator_noop_merge_req(struct request *req, struct request *next) {}
void elevator_noop_dequeue(struct request *req) {}
int blkelvget_ioctl(elevator_t * elevator, blkelv_ioctl_arg_t * arg) int blkelvget_ioctl(elevator_t * elevator, blkelv_ioctl_arg_t * arg)
{ {
......
...@@ -125,7 +125,7 @@ static inline int get_max_sectors(kdev_t dev) ...@@ -125,7 +125,7 @@ static inline int get_max_sectors(kdev_t dev)
return max_sectors[MAJOR(dev)][MINOR(dev)]; return max_sectors[MAJOR(dev)][MINOR(dev)];
} }
static inline request_queue_t *__blk_get_queue(kdev_t dev) inline request_queue_t *__blk_get_queue(kdev_t dev)
{ {
struct blk_dev_struct *bdev = blk_dev + MAJOR(dev); struct blk_dev_struct *bdev = blk_dev + MAJOR(dev);
...@@ -153,17 +153,14 @@ request_queue_t *blk_get_queue(kdev_t dev) ...@@ -153,17 +153,14 @@ request_queue_t *blk_get_queue(kdev_t dev)
static int __blk_cleanup_queue(struct list_head *head) static int __blk_cleanup_queue(struct list_head *head)
{ {
struct list_head *entry;
struct request *rq; struct request *rq;
int i = 0; int i = 0;
if (list_empty(head)) if (list_empty(head))
return 0; return 0;
entry = head->next;
do { do {
rq = list_entry(entry, struct request, table); rq = list_entry(head->next, struct request, table);
entry = entry->next;
list_del(&rq->table); list_del(&rq->table);
kmem_cache_free(request_cachep, rq); kmem_cache_free(request_cachep, rq);
i++; i++;
...@@ -192,6 +189,8 @@ void blk_cleanup_queue(request_queue_t * q) ...@@ -192,6 +189,8 @@ void blk_cleanup_queue(request_queue_t * q)
count -= __blk_cleanup_queue(&q->request_freelist[READ]); count -= __blk_cleanup_queue(&q->request_freelist[READ]);
count -= __blk_cleanup_queue(&q->request_freelist[WRITE]); count -= __blk_cleanup_queue(&q->request_freelist[WRITE]);
count -= __blk_cleanup_queue(&q->pending_freelist[READ]);
count -= __blk_cleanup_queue(&q->pending_freelist[WRITE]);
if (count) if (count)
printk("blk_cleanup_queue: leaked requests (%d)\n", count); printk("blk_cleanup_queue: leaked requests (%d)\n", count);
...@@ -290,7 +289,6 @@ static inline int ll_new_segment(request_queue_t *q, struct request *req, int ma ...@@ -290,7 +289,6 @@ static inline int ll_new_segment(request_queue_t *q, struct request *req, int ma
{ {
if (req->nr_segments < max_segments) { if (req->nr_segments < max_segments) {
req->nr_segments++; req->nr_segments++;
q->elevator.nr_segments++;
return 1; return 1;
} }
return 0; return 0;
...@@ -327,7 +325,6 @@ static int ll_merge_requests_fn(request_queue_t *q, struct request *req, ...@@ -327,7 +325,6 @@ static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
if (total_segments > max_segments) if (total_segments > max_segments)
return 0; return 0;
q->elevator.nr_segments -= same_segment;
req->nr_segments = total_segments; req->nr_segments = total_segments;
return 1; return 1;
} }
...@@ -364,7 +361,7 @@ static inline void __generic_unplug_device(request_queue_t *q) ...@@ -364,7 +361,7 @@ static inline void __generic_unplug_device(request_queue_t *q)
} }
} }
static void generic_unplug_device(void *data) void generic_unplug_device(void *data)
{ {
request_queue_t *q = (request_queue_t *) data; request_queue_t *q = (request_queue_t *) data;
unsigned long flags; unsigned long flags;
...@@ -379,19 +376,24 @@ static void blk_init_free_list(request_queue_t *q) ...@@ -379,19 +376,24 @@ static void blk_init_free_list(request_queue_t *q)
struct request *rq; struct request *rq;
int i; int i;
INIT_LIST_HEAD(&q->request_freelist[READ]);
INIT_LIST_HEAD(&q->request_freelist[WRITE]);
INIT_LIST_HEAD(&q->pending_freelist[READ]);
INIT_LIST_HEAD(&q->pending_freelist[WRITE]);
q->pending_free[READ] = q->pending_free[WRITE] = 0;
/* /*
* Divide requests in half between read and write. This used to * Divide requests in half between read and write
* be a 2/3 advantage for reads, but now reads can steal from
* the write free list.
*/ */
for (i = 0; i < QUEUE_NR_REQUESTS; i++) { for (i = 0; i < QUEUE_NR_REQUESTS; i++) {
rq = kmem_cache_alloc(request_cachep, SLAB_KERNEL); rq = kmem_cache_alloc(request_cachep, SLAB_KERNEL);
memset(rq, 0, sizeof(struct request));
rq->rq_status = RQ_INACTIVE; rq->rq_status = RQ_INACTIVE;
list_add(&rq->table, &q->request_freelist[i & 1]); list_add(&rq->table, &q->request_freelist[i & 1]);
} }
init_waitqueue_head(&q->wait_for_request); init_waitqueue_head(&q->wait_for_request);
spin_lock_init(&q->request_lock); spin_lock_init(&q->queue_lock);
} }
static int __make_request(request_queue_t * q, int rw, struct buffer_head * bh); static int __make_request(request_queue_t * q, int rw, struct buffer_head * bh);
...@@ -426,14 +428,12 @@ static int __make_request(request_queue_t * q, int rw, struct buffer_head * bh); ...@@ -426,14 +428,12 @@ static int __make_request(request_queue_t * q, int rw, struct buffer_head * bh);
* blk_queue_headactive(). * blk_queue_headactive().
* *
* Note: * Note:
* blk_init_queue() must be paired with a blk_cleanup-queue() call * blk_init_queue() must be paired with a blk_cleanup_queue() call
* when the block device is deactivated (such as at module unload). * when the block device is deactivated (such as at module unload).
**/ **/
void blk_init_queue(request_queue_t * q, request_fn_proc * rfn) void blk_init_queue(request_queue_t * q, request_fn_proc * rfn)
{ {
INIT_LIST_HEAD(&q->queue_head); INIT_LIST_HEAD(&q->queue_head);
INIT_LIST_HEAD(&q->request_freelist[READ]);
INIT_LIST_HEAD(&q->request_freelist[WRITE]);
elevator_init(&q->elevator, ELEVATOR_LINUS); elevator_init(&q->elevator, ELEVATOR_LINUS);
blk_init_free_list(q); blk_init_free_list(q);
q->request_fn = rfn; q->request_fn = rfn;
...@@ -455,7 +455,6 @@ void blk_init_queue(request_queue_t * q, request_fn_proc * rfn) ...@@ -455,7 +455,6 @@ void blk_init_queue(request_queue_t * q, request_fn_proc * rfn)
q->head_active = 1; q->head_active = 1;
} }
#define blkdev_free_rq(list) list_entry((list)->next, struct request, table); #define blkdev_free_rq(list) list_entry((list)->next, struct request, table);
/* /*
* Get a free request. io_request_lock must be held and interrupts * Get a free request. io_request_lock must be held and interrupts
...@@ -463,37 +462,16 @@ void blk_init_queue(request_queue_t * q, request_fn_proc * rfn) ...@@ -463,37 +462,16 @@ void blk_init_queue(request_queue_t * q, request_fn_proc * rfn)
*/ */
static inline struct request *get_request(request_queue_t *q, int rw) static inline struct request *get_request(request_queue_t *q, int rw)
{ {
struct list_head *list = &q->request_freelist[rw]; struct request *rq = NULL;
struct request *rq;
/*
* Reads get preferential treatment and are allowed to steal
* from the write free list if necessary.
*/
if (!list_empty(list)) {
rq = blkdev_free_rq(list);
goto got_rq;
}
/*
* if the WRITE list is non-empty, we know that rw is READ
* and that the READ list is empty. allow reads to 'steal'
* from the WRITE list.
*/
if (!list_empty(&q->request_freelist[WRITE])) {
list = &q->request_freelist[WRITE];
rq = blkdev_free_rq(list);
goto got_rq;
}
return NULL; if (!list_empty(&q->request_freelist[rw])) {
rq = blkdev_free_rq(&q->request_freelist[rw]);
got_rq:
list_del(&rq->table); list_del(&rq->table);
rq->free_list = list;
rq->rq_status = RQ_ACTIVE; rq->rq_status = RQ_ACTIVE;
rq->special = NULL; rq->special = NULL;
rq->q = q; rq->q = q;
}
return rq; return rq;
} }
...@@ -590,16 +568,22 @@ inline void drive_stat_acct (kdev_t dev, int rw, ...@@ -590,16 +568,22 @@ inline void drive_stat_acct (kdev_t dev, int rw,
*/ */
static inline void add_request(request_queue_t * q, struct request * req, static inline void add_request(request_queue_t * q, struct request * req,
struct list_head *head, int lat) struct list_head *insert_here)
{ {
int major; int major;
drive_stat_acct(req->rq_dev, req->cmd, req->nr_sectors, 1); drive_stat_acct(req->rq_dev, req->cmd, req->nr_sectors, 1);
if (!q->plugged && q->head_active && insert_here == &q->queue_head) {
spin_unlock_irq(&io_request_lock);
BUG();
}
/* /*
* let selected elevator insert the request * elevator indicated where it wants this request to be
* inserted at elevator_merge time
*/ */
q->elevator.elevator_fn(req, &q->elevator, &q->queue_head, head, lat); list_add(&req->queue, insert_here);
/* /*
* FIXME(eric) I don't understand why there is a need for this * FIXME(eric) I don't understand why there is a need for this
...@@ -617,20 +601,47 @@ static inline void add_request(request_queue_t * q, struct request * req, ...@@ -617,20 +601,47 @@ static inline void add_request(request_queue_t * q, struct request * req,
(q->request_fn)(q); (q->request_fn)(q);
} }
void inline blk_refill_freelist(request_queue_t *q, int rw)
{
if (q->pending_free[rw]) {
list_splice(&q->pending_freelist[rw], &q->request_freelist[rw]);
INIT_LIST_HEAD(&q->pending_freelist[rw]);
q->pending_free[rw] = 0;
}
}
/* /*
* Must be called with io_request_lock held and interrupts disabled * Must be called with io_request_lock held and interrupts disabled
*/ */
void inline blkdev_release_request(struct request *req) void inline blkdev_release_request(struct request *req)
{ {
request_queue_t *q = req->q;
int rw = req->cmd;
req->rq_status = RQ_INACTIVE; req->rq_status = RQ_INACTIVE;
req->q = NULL;
/* /*
* Request may not have originated from ll_rw_blk * Request may not have originated from ll_rw_blk
*/ */
if (req->free_list) { if (q) {
list_add(&req->table, req->free_list); if (!list_empty(&q->request_freelist[rw])) {
req->free_list = NULL; blk_refill_freelist(q, rw);
wake_up(&req->q->wait_for_request); list_add(&req->table, &q->request_freelist[rw]);
return;
}
/*
* free list is empty, add to pending free list and
* batch wakeups
*/
list_add(&req->table, &q->pending_freelist[rw]);
if (++q->pending_free[rw] >= (QUEUE_NR_REQUESTS >> 4)) {
int wake_up = q->pending_free[rw];
blk_refill_freelist(q, rw);
wake_up_nr(&q->wait_for_request, wake_up);
}
} }
} }
...@@ -658,9 +669,10 @@ static void attempt_merge(request_queue_t * q, ...@@ -658,9 +669,10 @@ static void attempt_merge(request_queue_t * q,
* will have been updated to the appropriate number, * will have been updated to the appropriate number,
* and we shouldn't do it here too. * and we shouldn't do it here too.
*/ */
if(!(q->merge_requests_fn)(q, req, next, max_segments)) if(!q->merge_requests_fn(q, req, next, max_segments))
return; return;
q->elevator.elevator_merge_req_fn(req, next);
req->bhtail->b_reqnext = next->bh; req->bhtail->b_reqnext = next->bh;
req->bhtail = next->bhtail; req->bhtail = next->bhtail;
req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
...@@ -699,7 +711,7 @@ static int __make_request(request_queue_t * q, int rw, ...@@ -699,7 +711,7 @@ static int __make_request(request_queue_t * q, int rw,
int max_segments = MAX_SEGMENTS; int max_segments = MAX_SEGMENTS;
struct request * req = NULL, *freereq = NULL; struct request * req = NULL, *freereq = NULL;
int rw_ahead, max_sectors, el_ret; int rw_ahead, max_sectors, el_ret;
struct list_head *head; struct list_head *head, *insert_here;
int latency; int latency;
elevator_t *elevator = &q->elevator; elevator_t *elevator = &q->elevator;
...@@ -713,6 +725,7 @@ static int __make_request(request_queue_t * q, int rw, ...@@ -713,6 +725,7 @@ static int __make_request(request_queue_t * q, int rw,
rw = READ; /* drop into READ */ rw = READ; /* drop into READ */
case READ: case READ:
case WRITE: case WRITE:
latency = elevator_request_latency(elevator, rw);
break; break;
default: default:
BUG(); BUG();
...@@ -741,38 +754,32 @@ static int __make_request(request_queue_t * q, int rw, ...@@ -741,38 +754,32 @@ static int __make_request(request_queue_t * q, int rw,
*/ */
max_sectors = get_max_sectors(bh->b_rdev); max_sectors = get_max_sectors(bh->b_rdev);
latency = elevator_request_latency(elevator, rw); again:
/* /*
* Now we acquire the request spinlock, we have to be mega careful * Now we acquire the request spinlock, we have to be mega careful
* not to schedule or do something nonatomic * not to schedule or do something nonatomic
*/ */
again:
spin_lock_irq(&io_request_lock); spin_lock_irq(&io_request_lock);
/*
* skip first entry, for devices with active queue head
*/
head = &q->queue_head; head = &q->queue_head;
if (q->head_active && !q->plugged) insert_here = head->prev;
head = head->next;
if (list_empty(head)) { if (list_empty(head)) {
q->plug_device_fn(q, bh->b_rdev); /* is atomic */ q->plug_device_fn(q, bh->b_rdev); /* is atomic */
goto get_rq; goto get_rq;
} } else if (q->head_active && !q->plugged)
head = head->next;
el_ret = elevator->elevator_merge_fn(q, &req, bh, rw, el_ret = elevator->elevator_merge_fn(q, &req, head, bh, rw,
&max_sectors, &max_segments); max_sectors, max_segments);
switch (el_ret) { switch (el_ret) {
case ELEVATOR_BACK_MERGE: case ELEVATOR_BACK_MERGE:
if (!q->back_merge_fn(q, req, bh, max_segments)) if (!q->back_merge_fn(q, req, bh, max_segments))
break; break;
elevator->elevator_merge_cleanup_fn(q, req, count);
req->bhtail->b_reqnext = bh; req->bhtail->b_reqnext = bh;
req->bhtail = bh; req->bhtail = bh;
req->nr_sectors = req->hard_nr_sectors += count; req->nr_sectors = req->hard_nr_sectors += count;
req->e = elevator;
drive_stat_acct(req->rq_dev, req->cmd, count, 0); drive_stat_acct(req->rq_dev, req->cmd, count, 0);
attempt_back_merge(q, req, max_sectors, max_segments); attempt_back_merge(q, req, max_sectors, max_segments);
goto out; goto out;
...@@ -780,20 +787,28 @@ static int __make_request(request_queue_t * q, int rw, ...@@ -780,20 +787,28 @@ static int __make_request(request_queue_t * q, int rw,
case ELEVATOR_FRONT_MERGE: case ELEVATOR_FRONT_MERGE:
if (!q->front_merge_fn(q, req, bh, max_segments)) if (!q->front_merge_fn(q, req, bh, max_segments))
break; break;
elevator->elevator_merge_cleanup_fn(q, req, count);
bh->b_reqnext = req->bh; bh->b_reqnext = req->bh;
req->bh = bh; req->bh = bh;
req->buffer = bh->b_data; req->buffer = bh->b_data;
req->current_nr_sectors = count; req->current_nr_sectors = count;
req->sector = req->hard_sector = sector; req->sector = req->hard_sector = sector;
req->nr_sectors = req->hard_nr_sectors += count; req->nr_sectors = req->hard_nr_sectors += count;
req->e = elevator;
drive_stat_acct(req->rq_dev, req->cmd, count, 0); drive_stat_acct(req->rq_dev, req->cmd, count, 0);
attempt_front_merge(q, head, req, max_sectors, max_segments); attempt_front_merge(q, head, req, max_sectors, max_segments);
goto out; goto out;
/* /*
* elevator says don't/can't merge. get new request * elevator says don't/can't merge. get new request
*/ */
case ELEVATOR_NO_MERGE: case ELEVATOR_NO_MERGE:
/*
* use elevator hints as to where to insert the
* request. if no hints, just add it to the back
* of the queue
*/
if (req)
insert_here = &req->queue;
break; break;
default: default:
...@@ -821,6 +836,7 @@ static int __make_request(request_queue_t * q, int rw, ...@@ -821,6 +836,7 @@ static int __make_request(request_queue_t * q, int rw,
} }
/* fill up the request-info, and add it to the queue */ /* fill up the request-info, and add it to the queue */
req->elevator_sequence = latency;
req->cmd = rw; req->cmd = rw;
req->errors = 0; req->errors = 0;
req->hard_sector = req->sector = sector; req->hard_sector = req->sector = sector;
...@@ -833,13 +849,12 @@ static int __make_request(request_queue_t * q, int rw, ...@@ -833,13 +849,12 @@ static int __make_request(request_queue_t * q, int rw,
req->bh = bh; req->bh = bh;
req->bhtail = bh; req->bhtail = bh;
req->rq_dev = bh->b_rdev; req->rq_dev = bh->b_rdev;
req->e = elevator; add_request(q, req, insert_here);
add_request(q, req, head, latency);
out: out:
if (!q->plugged)
(q->request_fn)(q);
if (freereq) if (freereq)
blkdev_release_request(freereq); blkdev_release_request(freereq);
if (!q->plugged)
q->request_fn(q);
spin_unlock_irq(&io_request_lock); spin_unlock_irq(&io_request_lock);
return 0; return 0;
end_io: end_io:
...@@ -930,7 +945,6 @@ void generic_make_request (int rw, struct buffer_head * bh) ...@@ -930,7 +945,6 @@ void generic_make_request (int rw, struct buffer_head * bh)
buffer_IO_error(bh); buffer_IO_error(bh);
break; break;
} }
} }
while (q->make_request_fn(q, rw, bh)); while (q->make_request_fn(q, rw, bh));
} }
...@@ -1021,6 +1035,9 @@ void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]) ...@@ -1021,6 +1035,9 @@ void ll_rw_block(int rw, int nr, struct buffer_head * bhs[])
int correct_size; int correct_size;
int i; int i;
if (!nr)
return;
major = MAJOR(bhs[0]->b_dev); major = MAJOR(bhs[0]->b_dev);
/* Determine correct block size for this device. */ /* Determine correct block size for this device. */
...@@ -1035,7 +1052,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]) ...@@ -1035,7 +1052,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head * bhs[])
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
struct buffer_head *bh; struct buffer_head *bh;
bh = bhs[i]; bh = bhs[i];
if (bh->b_size != correct_size) { if (bh->b_size % correct_size) {
printk(KERN_NOTICE "ll_rw_block: device %s: " printk(KERN_NOTICE "ll_rw_block: device %s: "
"only %d-char blocks implemented (%u)\n", "only %d-char blocks implemented (%u)\n",
kdevname(bhs[0]->b_dev), kdevname(bhs[0]->b_dev),
...@@ -1138,10 +1155,6 @@ int end_that_request_first (struct request *req, int uptodate, char *name) ...@@ -1138,10 +1155,6 @@ int end_that_request_first (struct request *req, int uptodate, char *name)
void end_that_request_last(struct request *req) void end_that_request_last(struct request *req)
{ {
if (req->e) {
printk("end_that_request_last called with non-dequeued req\n");
BUG();
}
if (req->sem != NULL) if (req->sem != NULL)
up(req->sem); up(req->sem);
...@@ -1279,9 +1292,11 @@ EXPORT_SYMBOL(end_that_request_first); ...@@ -1279,9 +1292,11 @@ EXPORT_SYMBOL(end_that_request_first);
EXPORT_SYMBOL(end_that_request_last); EXPORT_SYMBOL(end_that_request_last);
EXPORT_SYMBOL(blk_init_queue); EXPORT_SYMBOL(blk_init_queue);
EXPORT_SYMBOL(blk_get_queue); EXPORT_SYMBOL(blk_get_queue);
EXPORT_SYMBOL(__blk_get_queue);
EXPORT_SYMBOL(blk_cleanup_queue); EXPORT_SYMBOL(blk_cleanup_queue);
EXPORT_SYMBOL(blk_queue_headactive); EXPORT_SYMBOL(blk_queue_headactive);
EXPORT_SYMBOL(blk_queue_pluggable); EXPORT_SYMBOL(blk_queue_pluggable);
EXPORT_SYMBOL(blk_queue_make_request); EXPORT_SYMBOL(blk_queue_make_request);
EXPORT_SYMBOL(generic_make_request); EXPORT_SYMBOL(generic_make_request);
EXPORT_SYMBOL(blkdev_release_request); EXPORT_SYMBOL(blkdev_release_request);
EXPORT_SYMBOL(generic_unplug_device);
...@@ -392,7 +392,6 @@ static inline int pd_new_segment(request_queue_t *q, struct request *req, int ma ...@@ -392,7 +392,6 @@ static inline int pd_new_segment(request_queue_t *q, struct request *req, int ma
if (req->nr_segments < max_segments) { if (req->nr_segments < max_segments) {
req->nr_segments++; req->nr_segments++;
q->elevator.nr_segments++;
return 1; return 1;
} }
return 0; return 0;
...@@ -432,7 +431,6 @@ static int pd_merge_requests_fn(request_queue_t *q, struct request *req, ...@@ -432,7 +431,6 @@ static int pd_merge_requests_fn(request_queue_t *q, struct request *req,
if (total_segments > max_segments) if (total_segments > max_segments)
return 0; return 0;
q->elevator.nr_segments -= same_segment;
req->nr_segments = total_segments; req->nr_segments = total_segments;
return 1; return 1;
} }
......
...@@ -346,7 +346,6 @@ static inline int pf_new_segment(request_queue_t *q, struct request *req, int ma ...@@ -346,7 +346,6 @@ static inline int pf_new_segment(request_queue_t *q, struct request *req, int ma
if (req->nr_segments < max_segments) { if (req->nr_segments < max_segments) {
req->nr_segments++; req->nr_segments++;
q->elevator.nr_segments++;
return 1; return 1;
} }
return 0; return 0;
...@@ -386,7 +385,6 @@ static int pf_merge_requests_fn(request_queue_t *q, struct request *req, ...@@ -386,7 +385,6 @@ static int pf_merge_requests_fn(request_queue_t *q, struct request *req,
if (total_segments > max_segments) if (total_segments > max_segments)
return 0; return 0;
q->elevator.nr_segments -= same_segment;
req->nr_segments = total_segments; req->nr_segments = total_segments;
return 1; return 1;
} }
......
...@@ -392,7 +392,6 @@ static inline int i2ob_new_segment(request_queue_t *q, struct request *req, ...@@ -392,7 +392,6 @@ static inline int i2ob_new_segment(request_queue_t *q, struct request *req,
if (req->nr_segments < max_segments) { if (req->nr_segments < max_segments) {
req->nr_segments++; req->nr_segments++;
q->elevator.nr_segments++;
return 1; return 1;
} }
return 0; return 0;
...@@ -436,7 +435,6 @@ static int i2ob_merge_requests(request_queue_t *q, ...@@ -436,7 +435,6 @@ static int i2ob_merge_requests(request_queue_t *q,
if (total_segments > max_segments) if (total_segments > max_segments)
return 0; return 0;
q->elevator.nr_segments -= same_segment;
req->nr_segments = total_segments; req->nr_segments = total_segments;
return 1; return 1;
} }
......
...@@ -226,6 +226,9 @@ static int ide_build_sglist (ide_hwif_t *hwif, struct request *rq) ...@@ -226,6 +226,9 @@ static int ide_build_sglist (ide_hwif_t *hwif, struct request *rq)
unsigned char *virt_addr = bh->b_data; unsigned char *virt_addr = bh->b_data;
unsigned int size = bh->b_size; unsigned int size = bh->b_size;
if (nents >= PRD_ENTRIES)
return 0;
while ((bh = bh->b_reqnext) != NULL) { while ((bh = bh->b_reqnext) != NULL) {
if ((virt_addr + size) != (unsigned char *) bh->b_data) if ((virt_addr + size) != (unsigned char *) bh->b_data)
break; break;
...@@ -259,6 +262,9 @@ int ide_build_dmatable (ide_drive_t *drive, ide_dma_action_t func) ...@@ -259,6 +262,9 @@ int ide_build_dmatable (ide_drive_t *drive, ide_dma_action_t func)
HWIF(drive)->sg_nents = i = ide_build_sglist(HWIF(drive), HWGROUP(drive)->rq); HWIF(drive)->sg_nents = i = ide_build_sglist(HWIF(drive), HWGROUP(drive)->rq);
if (!i)
return 0;
sg = HWIF(drive)->sg_table; sg = HWIF(drive)->sg_table;
while (i && sg_dma_len(sg)) { while (i && sg_dma_len(sg)) {
u32 cur_addr; u32 cur_addr;
...@@ -274,7 +280,7 @@ int ide_build_dmatable (ide_drive_t *drive, ide_dma_action_t func) ...@@ -274,7 +280,7 @@ int ide_build_dmatable (ide_drive_t *drive, ide_dma_action_t func)
*/ */
while (cur_len) { while (cur_len) {
if (++count >= PRD_ENTRIES) { if (count++ >= PRD_ENTRIES) {
printk("%s: DMA table too small\n", drive->name); printk("%s: DMA table too small\n", drive->name);
pci_unmap_sg(HWIF(drive)->pci_dev, pci_unmap_sg(HWIF(drive)->pci_dev,
HWIF(drive)->sg_table, HWIF(drive)->sg_table,
......
...@@ -134,7 +134,7 @@ static inline void do_identify (ide_drive_t *drive, byte cmd) ...@@ -134,7 +134,7 @@ static inline void do_identify (ide_drive_t *drive, byte cmd)
break; break;
} }
#endif #endif
printk ("CDROM"); printk ("CD/DVD-ROM");
break; break;
case ide_tape: case ide_tape:
printk ("TAPE"); printk ("TAPE");
...@@ -761,9 +761,10 @@ static void init_gendisk (ide_hwif_t *hwif) ...@@ -761,9 +761,10 @@ static void init_gendisk (ide_hwif_t *hwif)
for (unit = 0; unit < minors; ++unit) { for (unit = 0; unit < minors; ++unit) {
*bs++ = BLOCK_SIZE; *bs++ = BLOCK_SIZE;
#ifdef CONFIG_BLK_DEV_PDC4030 #ifdef CONFIG_BLK_DEV_PDC4030
*max_sect++ = ((hwif->chipset == ide_pdc4030) ? 127 : MAX_SECTORS); *max_sect++ = ((hwif->chipset == ide_pdc4030) ? 127 : 256);
#else #else
*max_sect++ = MAX_SECTORS; /* IDE can do up to 128K per request. */
*max_sect++ = 256;
#endif #endif
*max_ra++ = MAX_READAHEAD; *max_ra++ = MAX_READAHEAD;
} }
......
...@@ -102,7 +102,7 @@ isdn_v110_open(unsigned char key, int hdrlen, int maxsize) ...@@ -102,7 +102,7 @@ isdn_v110_open(unsigned char key, int hdrlen, int maxsize)
int i; int i;
isdn_v110_stream *v; isdn_v110_stream *v;
if ((v = kmalloc(sizeof(isdn_v110_stream), GFP_KERNEL)) == NULL) if ((v = kmalloc(sizeof(isdn_v110_stream), GFP_ATOMIC)) == NULL)
return NULL; return NULL;
memset(v, 0, sizeof(isdn_v110_stream)); memset(v, 0, sizeof(isdn_v110_stream));
v->key = key; v->key = key;
...@@ -134,7 +134,7 @@ isdn_v110_open(unsigned char key, int hdrlen, int maxsize) ...@@ -134,7 +134,7 @@ isdn_v110_open(unsigned char key, int hdrlen, int maxsize)
v->b = 0; v->b = 0;
v->skbres = hdrlen; v->skbres = hdrlen;
v->maxsize = maxsize - hdrlen; v->maxsize = maxsize - hdrlen;
if ((v->encodebuf = kmalloc(maxsize, GFP_KERNEL)) == NULL) { if ((v->encodebuf = kmalloc(maxsize, GFP_ATOMIC)) == NULL) {
kfree(v); kfree(v);
return NULL; return NULL;
} }
......
...@@ -951,7 +951,6 @@ do_dasd_request (request_queue_t *queue) ...@@ -951,7 +951,6 @@ do_dasd_request (request_queue_t *queue)
dasd_debug ((unsigned long) __builtin_return_address(0)); dasd_debug ((unsigned long) __builtin_return_address(0));
go = 1; go = 1;
while (go && !list_empty(&queue->queue_head)) { while (go && !list_empty(&queue->queue_head)) {
req = blkdev_entry_next_request(&queue->queue_head);
req = blkdev_entry_next_request(&queue->queue_head); req = blkdev_entry_next_request(&queue->queue_head);
di = DEVICE_NR (req->rq_dev); di = DEVICE_NR (req->rq_dev);
dasd_debug ((unsigned long) req); /* req */ dasd_debug ((unsigned long) req); /* req */
......
...@@ -776,7 +776,7 @@ void print_sense_internal(const char * devclass, ...@@ -776,7 +776,7 @@ void print_sense_internal(const char * devclass,
printk("%s%s: sns = %2x %2x\n", devclass, printk("%s%s: sns = %2x %2x\n", devclass,
kdevname(dev), sense_buffer[0], sense_buffer[2]); kdevname(dev), sense_buffer[0], sense_buffer[2]);
printk("Non-extended sense class %d code 0x%0x ", sense_class, code); printk("Non-extended sense class %d code 0x%0x\n", sense_class, code);
s = 4; s = 4;
} }
......
...@@ -50,45 +50,32 @@ ...@@ -50,45 +50,32 @@
* This entire source file deals with the new queueing code. * This entire source file deals with the new queueing code.
*/ */
/* /*
* Function: scsi_insert_special_cmd() * Function: __scsi_insert_special()
* *
* Purpose: Insert pre-formed command into request queue. * Purpose: worker for scsi_insert_special_*()
* *
* Arguments: SCpnt - command that is ready to be queued. * Arguments: q - request queue where request should be inserted
* at_head - boolean. True if we should insert at head * rq - request to be inserted
* of queue, false if we should insert at tail. * data - private data
* at_head - insert request at head or tail of queue
* *
* Lock status: Assumed that lock is not held upon entry. * Lock status: Assumed that io_request_lock is not held upon entry.
* *
* Returns: Nothing * Returns: Nothing
*
* Notes: This function is called from character device and from
* ioctl types of functions where the caller knows exactly
* what SCSI command needs to be issued. The idea is that
* we merely inject the command into the queue (at the head
* for now), and then call the queue request function to actually
* process it.
*/ */
int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head) static void __scsi_insert_special(request_queue_t *q, struct request *rq,
void *data, int at_head)
{ {
unsigned long flags; unsigned long flags;
request_queue_t *q;
ASSERT_LOCK(&io_request_lock, 0); ASSERT_LOCK(&io_request_lock, 0);
/* rq->cmd = SPECIAL;
* The SCpnt already contains a request structure - we will doctor the rq->special = data;
* thing up with the appropriate values and use that in the actual rq->q = NULL;
* request queue. rq->nr_segments = 0;
*/ rq->elevator_sequence = 0;
q = &SCpnt->device->request_queue;
SCpnt->request.cmd = SPECIAL;
SCpnt->request.special = (void *) SCpnt;
SCpnt->request.q = NULL;
SCpnt->request.free_list = NULL;
SCpnt->request.nr_segments = 0;
/* /*
* We have the option of inserting the head or the tail of the queue. * We have the option of inserting the head or the tail of the queue.
...@@ -98,27 +85,41 @@ int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head) ...@@ -98,27 +85,41 @@ int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
*/ */
spin_lock_irqsave(&io_request_lock, flags); spin_lock_irqsave(&io_request_lock, flags);
if (at_head) { if (at_head)
list_add(&SCpnt->request.queue, &q->queue_head); list_add(&rq->queue, &q->queue_head);
} else { else
/* list_add_tail(&rq->queue, &q->queue_head);
* FIXME(eric) - we always insert at the tail of the
* list. Otherwise ioctl commands would always take
* precedence over normal I/O. An ioctl on a busy
* disk might be delayed indefinitely because the
* request might not float high enough in the queue
* to be scheduled.
*/
list_add_tail(&SCpnt->request.queue, &q->queue_head);
}
/*
* Now hit the requeue function for the queue. If the host is
* already busy, so be it - we have nothing special to do. If
* the host can queue it, then send it off.
*/
q->request_fn(q); q->request_fn(q);
spin_unlock_irqrestore(&io_request_lock, flags); spin_unlock_irqrestore(&io_request_lock, flags);
}
/*
* Function: scsi_insert_special_cmd()
*
* Purpose: Insert pre-formed command into request queue.
*
* Arguments: SCpnt - command that is ready to be queued.
* at_head - boolean. True if we should insert at head
* of queue, false if we should insert at tail.
*
* Lock status: Assumed that lock is not held upon entry.
*
* Returns: Nothing
*
* Notes: This function is called from character device and from
* ioctl types of functions where the caller knows exactly
* what SCSI command needs to be issued. The idea is that
* we merely inject the command into the queue (at the head
* for now), and then call the queue request function to actually
* process it.
*/
int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
{
request_queue_t *q = &SCpnt->device->request_queue;
__scsi_insert_special(q, &SCpnt->request, SCpnt, at_head);
return 0; return 0;
} }
...@@ -144,51 +145,9 @@ int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head) ...@@ -144,51 +145,9 @@ int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
*/ */
int scsi_insert_special_req(Scsi_Request * SRpnt, int at_head) int scsi_insert_special_req(Scsi_Request * SRpnt, int at_head)
{ {
unsigned long flags; request_queue_t *q = &SRpnt->sr_device->request_queue;
request_queue_t *q;
ASSERT_LOCK(&io_request_lock, 0);
/*
* The SCpnt already contains a request structure - we will doctor the
* thing up with the appropriate values and use that in the actual
* request queue.
*/
q = &SRpnt->sr_device->request_queue;
SRpnt->sr_request.cmd = SPECIAL;
SRpnt->sr_request.special = (void *) SRpnt;
SRpnt->sr_request.q = NULL;
SRpnt->sr_request.nr_segments = 0;
/*
* We have the option of inserting the head or the tail of the queue.
* Typically we use the tail for new ioctls and so forth. We use the
* head of the queue for things like a QUEUE_FULL message from a
* device, or a host that is unable to accept a particular command.
*/
spin_lock_irqsave(&io_request_lock, flags);
if (at_head) {
list_add(&SRpnt->sr_request.queue, &q->queue_head);
} else {
/*
* FIXME(eric) - we always insert at the tail of the
* list. Otherwise ioctl commands would always take
* precedence over normal I/O. An ioctl on a busy
* disk might be delayed indefinitely because the
* request might not float high enough in the queue
* to be scheduled.
*/
list_add_tail(&SRpnt->sr_request.queue, &q->queue_head);
}
/* __scsi_insert_special(q, &SRpnt->sr_request, SRpnt, at_head);
* Now hit the requeue function for the queue. If the host is
* already busy, so be it - we have nothing special to do. If
* the host can queue it, then send it off.
*/
q->request_fn(q);
spin_unlock_irqrestore(&io_request_lock, flags);
return 0; return 0;
} }
...@@ -862,17 +821,6 @@ void scsi_request_fn(request_queue_t * q) ...@@ -862,17 +821,6 @@ void scsi_request_fn(request_queue_t * q)
} }
SHpnt = SDpnt->host; SHpnt = SDpnt->host;
/*
* If the host for this device is in error recovery mode, don't
* do anything at all here. When the host leaves error recovery
* mode, it will automatically restart things and start queueing
* commands again. Same goes if the queue is actually plugged,
* if the device itself is blocked, or if the host is fully
* occupied.
*/
if (SHpnt->in_recovery || q->plugged)
return;
/* /*
* To start with, we keep looping until the queue is empty, or until * To start with, we keep looping until the queue is empty, or until
* the host is no longer able to accept any more requests. * the host is no longer able to accept any more requests.
...@@ -896,10 +844,11 @@ void scsi_request_fn(request_queue_t * q) ...@@ -896,10 +844,11 @@ void scsi_request_fn(request_queue_t * q)
|| (SHpnt->host_blocked) || (SHpnt->host_blocked)
|| (SHpnt->host_self_blocked)) { || (SHpnt->host_self_blocked)) {
/* /*
* If we are unable to process any commands at all for this * If we are unable to process any commands at all for
* device, then we consider it to be starved. What this means * this device, then we consider it to be starved.
* is that there are no outstanding commands for this device * What this means is that there are no outstanding
* and hence we need a little help getting it started again * commands for this device and hence we need a
* little help getting it started again
* once the host isn't quite so busy. * once the host isn't quite so busy.
*/ */
if (SDpnt->device_busy == 0) { if (SDpnt->device_busy == 0) {
...@@ -1000,8 +949,8 @@ void scsi_request_fn(request_queue_t * q) ...@@ -1000,8 +949,8 @@ void scsi_request_fn(request_queue_t * q)
} }
/* /*
* If so, we are ready to do something. Bump the count * If so, we are ready to do something. Bump the count
* while the queue is locked and then break out of the loop. * while the queue is locked and then break out of the
* Otherwise loop around and try another request. * loop. Otherwise loop around and try another request.
*/ */
if (!SCpnt) { if (!SCpnt) {
break; break;
...@@ -1029,8 +978,9 @@ void scsi_request_fn(request_queue_t * q) ...@@ -1029,8 +978,9 @@ void scsi_request_fn(request_queue_t * q)
memcpy(&SCpnt->request, req, sizeof(struct request)); memcpy(&SCpnt->request, req, sizeof(struct request));
/* /*
* We have copied the data out of the request block - it is now in * We have copied the data out of the request block -
* a field in SCpnt. Release the request block. * it is now in a field in SCpnt. Release the request
* block.
*/ */
blkdev_release_request(req); blkdev_release_request(req);
} }
...@@ -1047,12 +997,14 @@ void scsi_request_fn(request_queue_t * q) ...@@ -1047,12 +997,14 @@ void scsi_request_fn(request_queue_t * q)
/* /*
* This will do a couple of things: * This will do a couple of things:
* 1) Fill in the actual SCSI command. * 1) Fill in the actual SCSI command.
* 2) Fill in any other upper-level specific fields (timeout). * 2) Fill in any other upper-level specific fields
* (timeout).
* *
* If this returns 0, it means that the request failed (reading * If this returns 0, it means that the request failed
* past end of disk, reading offline device, etc). This won't * (reading past end of disk, reading offline device,
* actually talk to the device, but some kinds of consistency * etc). This won't actually talk to the device, but
* checking may cause the request to be rejected immediately. * some kinds of consistency checking may cause the
* request to be rejected immediately.
*/ */
if (STpnt == NULL) { if (STpnt == NULL) {
STpnt = scsi_get_request_dev(req); STpnt = scsi_get_request_dev(req);
...@@ -1103,8 +1055,8 @@ void scsi_request_fn(request_queue_t * q) ...@@ -1103,8 +1055,8 @@ void scsi_request_fn(request_queue_t * q)
scsi_dispatch_cmd(SCpnt); scsi_dispatch_cmd(SCpnt);
/* /*
* Now we need to grab the lock again. We are about to mess with * Now we need to grab the lock again. We are about to mess
* the request queue and try to find another command. * with the request queue and try to find another command.
*/ */
spin_lock_irq(&io_request_lock); spin_lock_irq(&io_request_lock);
} }
......
...@@ -324,7 +324,6 @@ static inline int scsi_new_mergeable(request_queue_t * q, ...@@ -324,7 +324,6 @@ static inline int scsi_new_mergeable(request_queue_t * q,
req->nr_segments >= SHpnt->sg_tablesize) req->nr_segments >= SHpnt->sg_tablesize)
return 0; return 0;
req->nr_segments++; req->nr_segments++;
q->elevator.nr_segments++;
return 1; return 1;
} }
...@@ -341,11 +340,8 @@ static inline int scsi_new_segment(request_queue_t * q, ...@@ -341,11 +340,8 @@ static inline int scsi_new_segment(request_queue_t * q,
if (req->nr_hw_segments >= SHpnt->sg_tablesize || if (req->nr_hw_segments >= SHpnt->sg_tablesize ||
req->nr_segments >= SHpnt->sg_tablesize) req->nr_segments >= SHpnt->sg_tablesize)
return 0; return 0;
if (req->nr_segments >= max_segments)
return 0;
req->nr_hw_segments++; req->nr_hw_segments++;
req->nr_segments++; req->nr_segments++;
q->elevator.nr_segments++;
return 1; return 1;
} }
#else #else
...@@ -361,7 +357,6 @@ static inline int scsi_new_segment(request_queue_t * q, ...@@ -361,7 +357,6 @@ static inline int scsi_new_segment(request_queue_t * q,
* counter. * counter.
*/ */
req->nr_segments++; req->nr_segments++;
q->elevator.nr_segments++;
return 1; return 1;
} else { } else {
return 0; return 0;
...@@ -417,8 +412,10 @@ __inline static int __scsi_back_merge_fn(request_queue_t * q, ...@@ -417,8 +412,10 @@ __inline static int __scsi_back_merge_fn(request_queue_t * q,
SDpnt = (Scsi_Device *) q->queuedata; SDpnt = (Scsi_Device *) q->queuedata;
SHpnt = SDpnt->host; SHpnt = SDpnt->host;
#ifdef DMA_CHUNK_SIZE
if (max_segments > 64) if (max_segments > 64)
max_segments = 64; max_segments = 64;
#endif
if (use_clustering) { if (use_clustering) {
/* /*
...@@ -471,8 +468,10 @@ __inline static int __scsi_front_merge_fn(request_queue_t * q, ...@@ -471,8 +468,10 @@ __inline static int __scsi_front_merge_fn(request_queue_t * q,
SDpnt = (Scsi_Device *) q->queuedata; SDpnt = (Scsi_Device *) q->queuedata;
SHpnt = SDpnt->host; SHpnt = SDpnt->host;
#ifdef DMA_CHUNK_SIZE
if (max_segments > 64) if (max_segments > 64)
max_segments = 64; max_segments = 64;
#endif
if (use_clustering) { if (use_clustering) {
/* /*
...@@ -601,10 +600,10 @@ __inline static int __scsi_merge_requests_fn(request_queue_t * q, ...@@ -601,10 +600,10 @@ __inline static int __scsi_merge_requests_fn(request_queue_t * q,
SDpnt = (Scsi_Device *) q->queuedata; SDpnt = (Scsi_Device *) q->queuedata;
SHpnt = SDpnt->host; SHpnt = SDpnt->host;
#ifdef DMA_CHUNK_SIZE
if (max_segments > 64) if (max_segments > 64)
max_segments = 64; max_segments = 64;
#ifdef DMA_CHUNK_SIZE
/* If it would not fit into prepared memory space for sg chain, /* If it would not fit into prepared memory space for sg chain,
* then don't allow the merge. * then don't allow the merge.
*/ */
...@@ -664,7 +663,6 @@ __inline static int __scsi_merge_requests_fn(request_queue_t * q, ...@@ -664,7 +663,6 @@ __inline static int __scsi_merge_requests_fn(request_queue_t * q,
* This one is OK. Let it go. * This one is OK. Let it go.
*/ */
req->nr_segments += next->nr_segments - 1; req->nr_segments += next->nr_segments - 1;
q->elevator.nr_segments--;
#ifdef DMA_CHUNK_SIZE #ifdef DMA_CHUNK_SIZE
req->nr_hw_segments += next->nr_hw_segments - 1; req->nr_hw_segments += next->nr_hw_segments - 1;
#endif #endif
......
...@@ -694,6 +694,7 @@ static int sg_common_write(Sg_fd * sfp, Sg_request * srp, ...@@ -694,6 +694,7 @@ static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
(void *)SRpnt->sr_buffer, hp->dxfer_len, (void *)SRpnt->sr_buffer, hp->dxfer_len,
sg_cmd_done_bh, timeout, SG_DEFAULT_RETRIES); sg_cmd_done_bh, timeout, SG_DEFAULT_RETRIES);
/* dxfer_len overwrites SRpnt->sr_bufflen, hence need for b_malloc_len */ /* dxfer_len overwrites SRpnt->sr_bufflen, hence need for b_malloc_len */
generic_unplug_device(&SRpnt->sr_device->request_queue);
return 0; return 0;
} }
......
...@@ -671,12 +671,14 @@ void get_capabilities(int i) ...@@ -671,12 +671,14 @@ void get_capabilities(int i)
cmd[3] = cmd[5] = 0; cmd[3] = cmd[5] = 0;
rc = sr_do_ioctl(i, cmd, buffer, 128, 1, SCSI_DATA_READ, NULL); rc = sr_do_ioctl(i, cmd, buffer, 128, 1, SCSI_DATA_READ, NULL);
if (-EINVAL == rc) { if (rc) {
/* failed, drive has'nt this mode page */ /* failed, drive doesn't have capabilities mode page */
scsi_CDs[i].cdi.speed = 1; scsi_CDs[i].cdi.speed = 1;
/* disable speed select, drive probably can't do this either */ scsi_CDs[i].cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
scsi_CDs[i].cdi.mask |= CDC_SELECT_SPEED; CDC_DVD | CDC_DVD_RAM |
CDC_SELECT_DISC | CDC_SELECT_SPEED);
scsi_free(buffer, 512); scsi_free(buffer, 512);
printk("sr%i: scsi-1 drive\n");
return; return;
} }
n = buffer[3] + 4; n = buffer[3] + 4;
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
*/ */
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/config.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/reiserfs_fs.h> #include <linux/reiserfs_fs.h>
#include <linux/locks.h> #include <linux/locks.h>
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
*/ */
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/config.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/locks.h> #include <linux/locks.h>
#include <linux/reiserfs_fs.h> #include <linux/reiserfs_fs.h>
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/config.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/fs.h> #include <linux/fs.h>
...@@ -142,6 +143,10 @@ static int reiserfs_readdir (struct file * filp, void * dirent, filldir_t filldi ...@@ -142,6 +143,10 @@ static int reiserfs_readdir (struct file * filp, void * dirent, filldir_t filldi
if (!d_name[d_reclen - 1]) if (!d_name[d_reclen - 1])
d_reclen = strlen (d_name); d_reclen = strlen (d_name);
if (d_reclen > REISERFS_MAX_NAME_LEN(inode->i_sb->s_blocksize)){
/* too big to send back to VFS */
continue ;
}
d_off = deh_offset (deh); d_off = deh_offset (deh);
filp->f_pos = d_off ; filp->f_pos = d_off ;
d_ino = deh_objectid (deh); d_ino = deh_objectid (deh);
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/config.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/reiserfs_fs.h> #include <linux/reiserfs_fs.h>
......
...@@ -64,7 +64,7 @@ static int reiserfs_file_release (struct inode * inode, struct file * filp) ...@@ -64,7 +64,7 @@ static int reiserfs_file_release (struct inode * inode, struct file * filp)
item(s) had to be converted, then it may have to be item(s) had to be converted, then it may have to be
indirect2direct converted */ indirect2direct converted */
windex = push_journal_writer("file_release") ; windex = push_journal_writer("file_release") ;
reiserfs_truncate_file(inode) ; reiserfs_truncate_file(inode, 0) ;
pop_journal_writer(windex) ; pop_journal_writer(windex) ;
} }
up (&inode->i_sem); up (&inode->i_sem);
...@@ -72,6 +72,9 @@ static int reiserfs_file_release (struct inode * inode, struct file * filp) ...@@ -72,6 +72,9 @@ static int reiserfs_file_release (struct inode * inode, struct file * filp)
return 0; return 0;
} }
static void reiserfs_vfs_truncate_file(struct inode *inode) {
reiserfs_truncate_file(inode, 1) ;
}
/* Sync a reiserfs file. */ /* Sync a reiserfs file. */
static int reiserfs_sync_file( static int reiserfs_sync_file(
...@@ -115,7 +118,7 @@ struct file_operations reiserfs_file_operations = { ...@@ -115,7 +118,7 @@ struct file_operations reiserfs_file_operations = {
struct inode_operations reiserfs_file_inode_operations = { struct inode_operations reiserfs_file_inode_operations = {
truncate: reiserfs_truncate_file, truncate: reiserfs_vfs_truncate_file,
}; };
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/config.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/locks.h> #include <linux/locks.h>
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/config.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/sched.h> #include <linux/sched.h>
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
*/ */
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/config.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/reiserfs_fs.h> #include <linux/reiserfs_fs.h>
#include <linux/locks.h> #include <linux/locks.h>
...@@ -1538,7 +1539,7 @@ static int grab_tail_page(struct inode *p_s_inode, ...@@ -1538,7 +1539,7 @@ static int grab_tail_page(struct inode *p_s_inode,
** **
** some code taken from block_truncate_page ** some code taken from block_truncate_page
*/ */
void reiserfs_truncate_file(struct inode *p_s_inode) { void reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps) {
struct reiserfs_transaction_handle th ; struct reiserfs_transaction_handle th ;
int windex ; int windex ;
...@@ -1571,7 +1572,7 @@ void reiserfs_truncate_file(struct inode *p_s_inode) { ...@@ -1571,7 +1572,7 @@ void reiserfs_truncate_file(struct inode *p_s_inode) {
prevent_flush_page_lock(page, p_s_inode) ; prevent_flush_page_lock(page, p_s_inode) ;
journal_begin(&th, p_s_inode->i_sb, JOURNAL_PER_BALANCE_CNT * 2 ) ; journal_begin(&th, p_s_inode->i_sb, JOURNAL_PER_BALANCE_CNT * 2 ) ;
windex = push_journal_writer("reiserfs_vfs_truncate_file") ; windex = push_journal_writer("reiserfs_vfs_truncate_file") ;
reiserfs_do_truncate (&th, p_s_inode, page, 1/*update timestamps*/) ; reiserfs_do_truncate (&th, p_s_inode, page, update_timestamps) ;
pop_journal_writer(windex) ; pop_journal_writer(windex) ;
journal_end(&th, p_s_inode->i_sb, JOURNAL_PER_BALANCE_CNT * 2 ) ; journal_end(&th, p_s_inode->i_sb, JOURNAL_PER_BALANCE_CNT * 2 ) ;
allow_flush_page_lock(page, p_s_inode) ; allow_flush_page_lock(page, p_s_inode) ;
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/config.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/system.h> #include <asm/system.h>
...@@ -415,7 +416,7 @@ inline int mark_buffer_not_journaled(struct buffer_head *bh) { ...@@ -415,7 +416,7 @@ inline int mark_buffer_not_journaled(struct buffer_head *bh) {
** kernel lock held. caller is the string printed just before calling BUG() ** kernel lock held. caller is the string printed just before calling BUG()
*/ */
void reiserfs_check_lock_depth(char *caller) { void reiserfs_check_lock_depth(char *caller) {
#ifdef __SMP__ #ifdef CONFIG_SMP
if (current->lock_depth < 0) { if (current->lock_depth < 0) {
printk("%s called without kernel lock held\n", caller) ; printk("%s called without kernel lock held\n", caller) ;
show_reiserfs_locks() ; show_reiserfs_locks() ;
...@@ -865,14 +866,20 @@ static int flush_older_journal_lists(struct super_block *p_s_sb, struct reiserfs ...@@ -865,14 +866,20 @@ static int flush_older_journal_lists(struct super_block *p_s_sb, struct reiserfs
return 0 ; return 0 ;
} }
static void submit_logged_buffer(struct buffer_head *bh) { static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate) {
mark_buffer_notjournal_new(bh) ;
if (buffer_journaled(bh)) { if (buffer_journaled(bh)) {
reiserfs_warning("clm-2084: pinned buffer %u:%s sent to disk\n", reiserfs_warning("clm-2084: pinned buffer %u:%s sent to disk\n",
bh->b_blocknr, kdevname(bh->b_dev)) ; bh->b_blocknr, kdevname(bh->b_dev)) ;
} }
set_bit(BH_Dirty, &bh->b_state) ; mark_buffer_uptodate(bh, uptodate) ;
ll_rw_block(WRITE, 1, &bh) ; unlock_buffer(bh) ;
}
static void submit_logged_buffer(struct buffer_head *bh) {
lock_buffer(bh) ;
bh->b_end_io = reiserfs_end_buffer_io_sync ;
mark_buffer_notjournal_new(bh) ;
clear_bit(BH_Dirty, &bh->b_state) ;
submit_bh(WRITE, bh) ;
} }
/* flush a journal list, both commit and real blocks /* flush a journal list, both commit and real blocks
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/config.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/sched.h> #include <linux/sched.h>
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/config.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/reiserfs_fs.h> #include <linux/reiserfs_fs.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
*/ */
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/config.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/locks.h> #include <linux/locks.h>
#include <linux/sched.h> #include <linux/sched.h>
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <stdarg.h> #include <linux/config.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/reiserfs_fs.h> #include <linux/reiserfs_fs.h>
...@@ -12,11 +12,11 @@ ...@@ -12,11 +12,11 @@
#else #else
#include "nokernel.h" #include "nokernel.h"
#include <stdarg.h>
#include <limits.h> #include <limits.h>
#endif #endif
#include <stdarg.h>
static char error_buf[1024]; static char error_buf[1024];
static char fmt_buf[1024]; static char fmt_buf[1024];
......
...@@ -54,6 +54,7 @@ ...@@ -54,6 +54,7 @@
*/ */
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/config.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/locks.h> #include <linux/locks.h>
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/config.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/config.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/reiserfs_fs.h> #include <linux/reiserfs_fs.h>
......
...@@ -87,10 +87,6 @@ void initrd_init(void); ...@@ -87,10 +87,6 @@ void initrd_init(void);
static inline void blkdev_dequeue_request(struct request * req) static inline void blkdev_dequeue_request(struct request * req)
{ {
if (req->e) {
req->e->dequeue_fn(req);
req->e = NULL;
}
list_del(&req->queue); list_del(&req->queue);
} }
......
...@@ -23,8 +23,6 @@ struct request { ...@@ -23,8 +23,6 @@ struct request {
int elevator_sequence; int elevator_sequence;
struct list_head table; struct list_head table;
struct list_head *free_list;
volatile int rq_status; /* should split this into a few status bits */ volatile int rq_status; /* should split this into a few status bits */
#define RQ_INACTIVE (-1) #define RQ_INACTIVE (-1)
#define RQ_ACTIVE 1 #define RQ_ACTIVE 1
...@@ -47,7 +45,6 @@ struct request { ...@@ -47,7 +45,6 @@ struct request {
struct buffer_head * bh; struct buffer_head * bh;
struct buffer_head * bhtail; struct buffer_head * bhtail;
request_queue_t *q; request_queue_t *q;
elevator_t *e;
}; };
#include <linux/elevator.h> #include <linux/elevator.h>
...@@ -69,7 +66,7 @@ typedef void (unplug_device_fn) (void *q); ...@@ -69,7 +66,7 @@ typedef void (unplug_device_fn) (void *q);
/* /*
* Default nr free requests per queue * Default nr free requests per queue
*/ */
#define QUEUE_NR_REQUESTS 256 #define QUEUE_NR_REQUESTS 512
struct request_queue struct request_queue
{ {
...@@ -77,6 +74,8 @@ struct request_queue ...@@ -77,6 +74,8 @@ struct request_queue
* the queue request freelist, one for reads and one for writes * the queue request freelist, one for reads and one for writes
*/ */
struct list_head request_freelist[2]; struct list_head request_freelist[2];
struct list_head pending_freelist[2];
int pending_free[2];
/* /*
* Together with queue_head for cacheline sharing * Together with queue_head for cacheline sharing
...@@ -116,7 +115,7 @@ struct request_queue ...@@ -116,7 +115,7 @@ struct request_queue
* Is meant to protect the queue in the future instead of * Is meant to protect the queue in the future instead of
* io_request_lock * io_request_lock
*/ */
spinlock_t request_lock; spinlock_t queue_lock;
/* /*
* Tasks wait here for free request * Tasks wait here for free request
...@@ -152,6 +151,7 @@ extern void grok_partitions(struct gendisk *dev, int drive, unsigned minors, lon ...@@ -152,6 +151,7 @@ extern void grok_partitions(struct gendisk *dev, int drive, unsigned minors, lon
extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size); extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size);
extern void generic_make_request(int rw, struct buffer_head * bh); extern void generic_make_request(int rw, struct buffer_head * bh);
extern request_queue_t *blk_get_queue(kdev_t dev); extern request_queue_t *blk_get_queue(kdev_t dev);
extern inline request_queue_t *__blk_get_queue(kdev_t dev);
extern void blkdev_release_request(struct request *); extern void blkdev_release_request(struct request *);
/* /*
...@@ -162,6 +162,7 @@ extern void blk_cleanup_queue(request_queue_t *); ...@@ -162,6 +162,7 @@ extern void blk_cleanup_queue(request_queue_t *);
extern void blk_queue_headactive(request_queue_t *, int); extern void blk_queue_headactive(request_queue_t *, int);
extern void blk_queue_pluggable(request_queue_t *, plug_device_fn *); extern void blk_queue_pluggable(request_queue_t *, plug_device_fn *);
extern void blk_queue_make_request(request_queue_t *, make_request_fn *); extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
extern void generic_unplug_device(void *);
extern int * blk_size[MAX_BLKDEV]; extern int * blk_size[MAX_BLKDEV];
...@@ -175,9 +176,8 @@ extern int * max_sectors[MAX_BLKDEV]; ...@@ -175,9 +176,8 @@ extern int * max_sectors[MAX_BLKDEV];
extern int * max_segments[MAX_BLKDEV]; extern int * max_segments[MAX_BLKDEV];
#define MAX_SECTORS 254 #define MAX_SEGMENTS 128
#define MAX_SECTORS (MAX_SEGMENTS*8)
#define MAX_SEGMENTS MAX_SECTORS
#define PageAlignSize(size) (((size) + PAGE_SIZE -1) & PAGE_MASK) #define PageAlignSize(size) (((size) + PAGE_SIZE -1) & PAGE_MASK)
......
...@@ -7,34 +7,32 @@ typedef void (elevator_fn) (struct request *, elevator_t *, ...@@ -7,34 +7,32 @@ typedef void (elevator_fn) (struct request *, elevator_t *,
struct list_head *, struct list_head *,
struct list_head *, int); struct list_head *, int);
typedef int (elevator_merge_fn) (request_queue_t *, struct request **, typedef int (elevator_merge_fn) (request_queue_t *, struct request **, struct list_head *,
struct buffer_head *, int, int *, int *); struct buffer_head *, int, int, int);
typedef void (elevator_dequeue_fn) (struct request *); typedef void (elevator_merge_cleanup_fn) (request_queue_t *, struct request *, int);
typedef void (elevator_merge_req_fn) (struct request *, struct request *);
struct elevator_s struct elevator_s
{ {
int sequence;
int read_latency; int read_latency;
int write_latency; int write_latency;
int max_bomb_segments;
unsigned int nr_segments;
int read_pendings;
elevator_fn * elevator_fn;
elevator_merge_fn *elevator_merge_fn; elevator_merge_fn *elevator_merge_fn;
elevator_dequeue_fn *dequeue_fn; elevator_merge_cleanup_fn *elevator_merge_cleanup_fn;
elevator_merge_req_fn *elevator_merge_req_fn;
unsigned int queue_ID; unsigned int queue_ID;
}; };
void elevator_noop(struct request *, elevator_t *, struct list_head *, struct list_head *, int); int elevator_noop_merge(request_queue_t *, struct request **, struct list_head *, struct buffer_head *, int, int, int);
int elevator_noop_merge(request_queue_t *, struct request **, struct buffer_head *, int, int *, int *); void elevator_noop_merge_cleanup(request_queue_t *, struct request *, int);
void elevator_noop_dequeue(struct request *); void elevator_noop_merge_req(struct request *, struct request *);
void elevator_linus(struct request *, elevator_t *, struct list_head *, struct list_head *, int);
int elevator_linus_merge(request_queue_t *, struct request **, struct buffer_head *, int, int *, int *); int elevator_linus_merge(request_queue_t *, struct request **, struct list_head *, struct buffer_head *, int, int, int);
void elevator_linus_merge_cleanup(request_queue_t *, struct request *, int);
void elevator_linus_merge_req(struct request *, struct request *);
typedef struct blkelv_ioctl_arg_s { typedef struct blkelv_ioctl_arg_s {
int queue_ID; int queue_ID;
...@@ -69,6 +67,10 @@ extern void elevator_init(elevator_t *, elevator_t); ...@@ -69,6 +67,10 @@ extern void elevator_init(elevator_t *, elevator_t);
(s1)->sector < (s2)->sector)) || \ (s1)->sector < (s2)->sector)) || \
(s1)->rq_dev < (s2)->rq_dev) (s1)->rq_dev < (s2)->rq_dev)
#define BHRQ_IN_ORDER(bh, rq) \
(((bh)->b_rdev == (rq)->rq_dev && \
(bh)->b_rsector < (rq)->sector))
static inline int elevator_request_latency(elevator_t * elevator, int rw) static inline int elevator_request_latency(elevator_t * elevator, int rw)
{ {
int latency; int latency;
...@@ -82,34 +84,22 @@ static inline int elevator_request_latency(elevator_t * elevator, int rw) ...@@ -82,34 +84,22 @@ static inline int elevator_request_latency(elevator_t * elevator, int rw)
#define ELEVATOR_NOOP \ #define ELEVATOR_NOOP \
((elevator_t) { \ ((elevator_t) { \
0, /* sequence */ \
\
0, /* read_latency */ \ 0, /* read_latency */ \
0, /* write_latency */ \ 0, /* write_latency */ \
0, /* max_bomb_segments */ \
\
0, /* nr_segments */ \
0, /* read_pendings */ \
\ \
elevator_noop, /* elevator_fn */ \
elevator_noop_merge, /* elevator_merge_fn */ \ elevator_noop_merge, /* elevator_merge_fn */ \
elevator_noop_dequeue, /* dequeue_fn */ \ elevator_noop_merge_cleanup, /* elevator_merge_cleanup_fn */ \
elevator_noop_merge_req, /* elevator_merge_req_fn */ \
}) })
#define ELEVATOR_LINUS \ #define ELEVATOR_LINUS \
((elevator_t) { \ ((elevator_t) { \
0, /* not used */ \ 8192, /* read passovers */ \
\ 16384, /* write passovers */ \
1000000, /* read passovers */ \
2000000, /* write passovers */ \
0, /* max_bomb_segments */ \
\
0, /* not used */ \
0, /* not used */ \
\ \
elevator_linus, /* elevator_fn */ \
elevator_linus_merge, /* elevator_merge_fn */ \ elevator_linus_merge, /* elevator_merge_fn */ \
elevator_noop_dequeue, /* dequeue_fn */ \ elevator_linus_merge_cleanup, /* elevator_merge_cleanup_fn */ \
elevator_linus_merge_req, /* elevator_merge_req_fn */ \
}) })
#endif #endif
...@@ -926,8 +926,7 @@ extern inline int entry_length (struct buffer_head * bh, struct item_head * ih, ...@@ -926,8 +926,7 @@ extern inline int entry_length (struct buffer_head * bh, struct item_head * ih,
//((block_size - BLKH_SIZE - IH_SIZE - DEH_SIZE * 2) / 2) //((block_size - BLKH_SIZE - IH_SIZE - DEH_SIZE * 2) / 2)
// two entries per block (at least) // two entries per block (at least)
#define REISERFS_MAX_NAME_LEN(block_size) \ #define REISERFS_MAX_NAME_LEN(block_size) 255
((block_size - BLKH_SIZE - IH_SIZE - DEH_SIZE))
...@@ -1753,7 +1752,6 @@ void reiserfs_do_truncate (struct reiserfs_transaction_handle *th, ...@@ -1753,7 +1752,6 @@ void reiserfs_do_truncate (struct reiserfs_transaction_handle *th,
struct inode * p_s_inode, struct page *, struct inode * p_s_inode, struct page *,
int update_timestamps); int update_timestamps);
// //
void reiserfs_vfs_truncate_file (struct inode * p_s_inode);
//void lock_inode_to_convert (struct inode * p_s_inode); //void lock_inode_to_convert (struct inode * p_s_inode);
//void unlock_inode_after_convert (struct inode * p_s_inode); //void unlock_inode_after_convert (struct inode * p_s_inode);
//void increment_i_read_sync_counter (struct inode * p_s_inode); //void increment_i_read_sync_counter (struct inode * p_s_inode);
...@@ -1792,7 +1790,7 @@ void padd_item (char * item, int total_length, int length); ...@@ -1792,7 +1790,7 @@ void padd_item (char * item, int total_length, int length);
/* inode.c */ /* inode.c */
int reiserfs_prepare_write(struct file *, struct page *, unsigned, unsigned) ; int reiserfs_prepare_write(struct file *, struct page *, unsigned, unsigned) ;
void reiserfs_truncate_file(struct inode *) ; void reiserfs_truncate_file(struct inode *, int update_timestamps) ;
void make_cpu_key (struct cpu_key * cpu_key, const struct inode * inode, loff_t offset, void make_cpu_key (struct cpu_key * cpu_key, const struct inode * inode, loff_t offset,
int type, int key_length); int type, int key_length);
void make_le_item_head (struct item_head * ih, struct cpu_key * key, int version, void make_le_item_head (struct item_head * ih, struct cpu_key * key, int version,
......
...@@ -543,8 +543,8 @@ extern unsigned long prof_shift; ...@@ -543,8 +543,8 @@ extern unsigned long prof_shift;
#define CURRENT_TIME (xtime.tv_sec) #define CURRENT_TIME (xtime.tv_sec)
extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, unsigned int wq_mode)); extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr));
extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, unsigned int wq_mode)); extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
extern void FASTCALL(sleep_on(wait_queue_head_t *q)); extern void FASTCALL(sleep_on(wait_queue_head_t *q));
extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q, extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
signed long timeout)); signed long timeout));
...@@ -553,12 +553,16 @@ extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q, ...@@ -553,12 +553,16 @@ extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
signed long timeout)); signed long timeout));
extern void FASTCALL(wake_up_process(struct task_struct * tsk)); extern void FASTCALL(wake_up_process(struct task_struct * tsk));
#define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,WQ_FLAG_EXCLUSIVE) #define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
#define wake_up_all(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,0) #define wake_up_nr(x, nr) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
#define wake_up_sync(x) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,WQ_FLAG_EXCLUSIVE) #define wake_up_all(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0)
#define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE,WQ_FLAG_EXCLUSIVE) #define wake_up_sync(x) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
#define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE,0) #define wake_up_sync_nr(x, nr) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE,WQ_FLAG_EXCLUSIVE) #define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1)
#define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr)
#define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0)
#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
#define wake_up_interruptible_sync_nr(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, nr)
extern int in_group_p(gid_t); extern int in_group_p(gid_t);
extern int in_egroup_p(gid_t); extern int in_egroup_p(gid_t);
......
...@@ -690,19 +690,15 @@ asmlinkage void schedule(void) ...@@ -690,19 +690,15 @@ asmlinkage void schedule(void)
} }
static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode, static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode,
unsigned int wq_mode, const int sync) int nr_exclusive, const int sync)
{ {
struct list_head *tmp, *head; struct list_head *tmp, *head;
struct task_struct *p, *best_exclusive; struct task_struct *p;
unsigned long flags; unsigned long flags;
int best_cpu, irq;
if (!q) if (!q)
goto out; goto out;
best_cpu = smp_processor_id();
irq = in_interrupt();
best_exclusive = NULL;
wq_write_lock_irqsave(&q->lock, flags); wq_write_lock_irqsave(&q->lock, flags);
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
...@@ -730,47 +726,27 @@ static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode, ...@@ -730,47 +726,27 @@ static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode,
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
curr->__waker = (long)__builtin_return_address(0); curr->__waker = (long)__builtin_return_address(0);
#endif #endif
/*
* If waking up from an interrupt context then
* prefer processes which are affine to this
* CPU.
*/
if (irq && (curr->flags & wq_mode & WQ_FLAG_EXCLUSIVE)) {
if (!best_exclusive)
best_exclusive = p;
if (p->processor == best_cpu) {
best_exclusive = p;
break;
}
} else {
if (sync) if (sync)
wake_up_process_synchronous(p); wake_up_process_synchronous(p);
else else
wake_up_process(p); wake_up_process(p);
if (curr->flags & wq_mode & WQ_FLAG_EXCLUSIVE) if ((curr->flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
break; break;
} }
} }
}
if (best_exclusive) {
if (sync)
wake_up_process_synchronous(best_exclusive);
else
wake_up_process(best_exclusive);
}
wq_write_unlock_irqrestore(&q->lock, flags); wq_write_unlock_irqrestore(&q->lock, flags);
out: out:
return; return;
} }
void __wake_up(wait_queue_head_t *q, unsigned int mode, unsigned int wq_mode) void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr)
{ {
__wake_up_common(q, mode, wq_mode, 0); __wake_up_common(q, mode, nr, 0);
} }
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, unsigned int wq_mode) void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr)
{ {
__wake_up_common(q, mode, wq_mode, 1); __wake_up_common(q, mode, nr, 1);
} }
#define SLEEP_ON_VAR \ #define SLEEP_ON_VAR \
......
...@@ -974,10 +974,6 @@ static void generic_file_readahead(int reada_ok, ...@@ -974,10 +974,6 @@ static void generic_file_readahead(int reada_ok,
* accessed sequentially. * accessed sequentially.
*/ */
if (ahead) { if (ahead) {
if (reada_ok == 2) {
run_task_queue(&tq_disk);
}
filp->f_ralen += ahead; filp->f_ralen += ahead;
filp->f_rawin += filp->f_ralen; filp->f_rawin += filp->f_ralen;
filp->f_raend = raend + ahead + 1; filp->f_raend = raend + ahead + 1;
......
...@@ -14,6 +14,7 @@ foreach $file (@ARGV) ...@@ -14,6 +14,7 @@ foreach $file (@ARGV)
# Initialize variables. # Initialize variables.
my $fInComment = 0; my $fInComment = 0;
my $fInString = 0;
my $fUseConfig = 0; my $fUseConfig = 0;
my $iLinuxConfig = 0; my $iLinuxConfig = 0;
my %configList = (); my %configList = ();
...@@ -24,6 +25,10 @@ foreach $file (@ARGV) ...@@ -24,6 +25,10 @@ foreach $file (@ARGV)
$fInComment && (s+^.*?\*/+ +o ? ($fInComment = 0) : next); $fInComment && (s+^.*?\*/+ +o ? ($fInComment = 0) : next);
m+/\*+o && (s+/\*.*?\*/+ +go, (s+/\*.*$+ +o && ($fInComment = 1))); m+/\*+o && (s+/\*.*?\*/+ +go, (s+/\*.*$+ +o && ($fInComment = 1)));
# Strip strings.
$fInString && (s+^.*?"+ +o ? ($fInString = 0) : next);
m+"+o && (s+".*?"+ +go, (s+".*$+ +o && ($fInString = 1)));
# Pick up definitions. # Pick up definitions.
if ( m/^\s*#/o ) if ( m/^\s*#/o )
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment