Commit 950a2b0b authored by Linus Torvalds's avatar Linus Torvalds

v2.4.0.4 -> v2.4.0.5

  - ppp UP deadlock attack fix
parent 6aea1666
VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 1
EXTRAVERSION =-pre4
EXTRAVERSION =-pre5
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
......@@ -457,9 +457,8 @@ export MODVERFILE
depend dep: dep-files
# make checkconfig: Prune 'scripts' directory to avoid "false positives".
checkconfig:
find * -name '*.[hcS]' -type f -print | grep -v scripts/ | sort | xargs $(PERL) -w scripts/checkconfig.pl
find * -name '*.[hcS]' -type f -print | sort | xargs $(PERL) -w scripts/checkconfig.pl
checkhelp:
find * -name [cC]onfig.in -print | sort | xargs $(PERL) -w scripts/checkhelp.pl
......
......@@ -537,6 +537,8 @@ CONFIG_PCMCIA_SERIAL=y
# CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set
CONFIG_AUTOFS4_FS=y
# CONFIG_REISERFS_FS is not set
# CONFIG_REISERFS_CHECK is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
# CONFIG_AFFS_FS is not set
......
......@@ -1820,7 +1820,6 @@ static int DAC960_BackMergeFunction(RequestQueue_T *RequestQueue,
Request->nr_segments < Controller->DriverScatterGatherLimit)
{
Request->nr_segments++;
RequestQueue->elevator.nr_segments++;
return true;
}
return false;
......@@ -1844,7 +1843,6 @@ static int DAC960_FrontMergeFunction(RequestQueue_T *RequestQueue,
Request->nr_segments < Controller->DriverScatterGatherLimit)
{
Request->nr_segments++;
RequestQueue->elevator.nr_segments++;
return true;
}
return false;
......@@ -1874,7 +1872,6 @@ static int DAC960_MergeRequestsFunction(RequestQueue_T *RequestQueue,
if (TotalSegments > MaxSegments ||
TotalSegments > Controller->DriverScatterGatherLimit)
return false;
RequestQueue->elevator.nr_segments -= SameSegment;
Request->nr_segments = TotalSegments;
return true;
}
......
......@@ -24,125 +24,115 @@
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/blk.h>
#include <linux/module.h>
#include <asm/uaccess.h>
/*
* Order ascending, but only allow a request to be skipped a certain
* number of times
*/
void elevator_linus(struct request *req, elevator_t *elevator,
struct list_head *real_head,
struct list_head *head, int orig_latency)
{
struct list_head *entry = real_head;
struct request *tmp;
req->elevator_sequence = orig_latency;
while ((entry = entry->prev) != head) {
tmp = blkdev_entry_to_request(entry);
if (IN_ORDER(tmp, req))
break;
if (!tmp->elevator_sequence)
break;
tmp->elevator_sequence--;
}
list_add(&req->queue, entry);
}
int elevator_linus_merge(request_queue_t *q, struct request **req,
struct list_head * head,
struct buffer_head *bh, int rw,
int *max_sectors, int *max_segments)
int max_sectors, int max_segments)
{
struct list_head *entry, *head = &q->queue_head;
struct list_head *entry = &q->queue_head;
unsigned int count = bh->b_size >> 9, ret = ELEVATOR_NO_MERGE;
entry = head;
if (q->head_active && !q->plugged)
head = head->next;
while ((entry = entry->prev) != head) {
struct request *__rq = *req = blkdev_entry_to_request(entry);
struct request *__rq = blkdev_entry_to_request(entry);
/*
* simply "aging" of requests in queue
*/
if (__rq->elevator_sequence-- <= 0) {
*req = __rq;
break;
}
if (__rq->sem)
continue;
if (__rq->cmd != rw)
continue;
if (__rq->nr_sectors + count > *max_sectors)
continue;
if (__rq->rq_dev != bh->b_rdev)
continue;
if (__rq->nr_sectors + count > max_sectors)
continue;
if (__rq->elevator_sequence < count)
break;
if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
ret = ELEVATOR_BACK_MERGE;
*req = __rq;
break;
}
if (!__rq->elevator_sequence)
break;
if (__rq->sector - count == bh->b_rsector) {
__rq->elevator_sequence--;
} else if (__rq->sector - count == bh->b_rsector) {
ret = ELEVATOR_FRONT_MERGE;
__rq->elevator_sequence -= count;
*req = __rq;
break;
}
} else if (!*req && BHRQ_IN_ORDER(bh, __rq))
*req = __rq;
}
return ret;
}
void elevator_linus_merge_cleanup(request_queue_t *q, struct request *req, int count)
{
struct list_head *entry = &req->queue, *head = &q->queue_head;
/*
* second pass scan of requests that got passed over, if any
*/
if (ret != ELEVATOR_NO_MERGE && *req) {
while ((entry = entry->next) != &q->queue_head) {
struct request *tmp = blkdev_entry_to_request(entry);
tmp->elevator_sequence--;
}
while ((entry = entry->next) != head) {
struct request *tmp = blkdev_entry_to_request(entry);
tmp->elevator_sequence -= count;
}
return ret;
}
/*
* No request sorting, just add it to the back of the list
*/
void elevator_noop(struct request *req, elevator_t *elevator,
struct list_head *real_head, struct list_head *head,
int orig_latency)
void elevator_linus_merge_req(struct request *req, struct request *next)
{
list_add_tail(&req->queue, real_head);
if (next->elevator_sequence < req->elevator_sequence)
req->elevator_sequence = next->elevator_sequence;
}
/*
* See if we can find a request that is buffer can be coalesced with.
* See if we can find a request that this buffer can be coalesced with.
*/
int elevator_noop_merge(request_queue_t *q, struct request **req,
struct list_head * head,
struct buffer_head *bh, int rw,
int *max_sectors, int *max_segments)
int max_sectors, int max_segments)
{
struct list_head *entry, *head = &q->queue_head;
struct list_head *entry;
unsigned int count = bh->b_size >> 9;
if (q->head_active && !q->plugged)
head = head->next;
if (list_empty(&q->queue_head))
return ELEVATOR_NO_MERGE;
entry = head;
entry = &q->queue_head;
while ((entry = entry->prev) != head) {
struct request *__rq = *req = blkdev_entry_to_request(entry);
if (__rq->sem)
continue;
struct request *__rq = blkdev_entry_to_request(entry);
if (__rq->cmd != rw)
continue;
if (__rq->nr_sectors + count > *max_sectors)
continue;
if (__rq->rq_dev != bh->b_rdev)
continue;
if (__rq->sector + __rq->nr_sectors == bh->b_rsector)
if (__rq->nr_sectors + count > max_sectors)
continue;
if (__rq->sem)
continue;
if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
*req = __rq;
return ELEVATOR_BACK_MERGE;
if (__rq->sector - count == bh->b_rsector)
} else if (__rq->sector - count == bh->b_rsector) {
*req = __rq;
return ELEVATOR_FRONT_MERGE;
}
}
*req = blkdev_entry_to_request(q->queue_head.prev);
return ELEVATOR_NO_MERGE;
}
/*
* The noop "elevator" does not do any accounting
*/
void elevator_noop_dequeue(struct request *req) {}
void elevator_noop_merge_cleanup(request_queue_t *q, struct request *req, int count) {}
void elevator_noop_merge_req(struct request *req, struct request *next) {}
int blkelvget_ioctl(elevator_t * elevator, blkelv_ioctl_arg_t * arg)
{
......
This diff is collapsed.
......@@ -392,7 +392,6 @@ static inline int pd_new_segment(request_queue_t *q, struct request *req, int ma
if (req->nr_segments < max_segments) {
req->nr_segments++;
q->elevator.nr_segments++;
return 1;
}
return 0;
......@@ -432,7 +431,6 @@ static int pd_merge_requests_fn(request_queue_t *q, struct request *req,
if (total_segments > max_segments)
return 0;
q->elevator.nr_segments -= same_segment;
req->nr_segments = total_segments;
return 1;
}
......
......@@ -346,7 +346,6 @@ static inline int pf_new_segment(request_queue_t *q, struct request *req, int ma
if (req->nr_segments < max_segments) {
req->nr_segments++;
q->elevator.nr_segments++;
return 1;
}
return 0;
......@@ -386,7 +385,6 @@ static int pf_merge_requests_fn(request_queue_t *q, struct request *req,
if (total_segments > max_segments)
return 0;
q->elevator.nr_segments -= same_segment;
req->nr_segments = total_segments;
return 1;
}
......
......@@ -392,7 +392,6 @@ static inline int i2ob_new_segment(request_queue_t *q, struct request *req,
if (req->nr_segments < max_segments) {
req->nr_segments++;
q->elevator.nr_segments++;
return 1;
}
return 0;
......@@ -436,7 +435,6 @@ static int i2ob_merge_requests(request_queue_t *q,
if (total_segments > max_segments)
return 0;
q->elevator.nr_segments -= same_segment;
req->nr_segments = total_segments;
return 1;
}
......
......@@ -226,6 +226,9 @@ static int ide_build_sglist (ide_hwif_t *hwif, struct request *rq)
unsigned char *virt_addr = bh->b_data;
unsigned int size = bh->b_size;
if (nents >= PRD_ENTRIES)
return 0;
while ((bh = bh->b_reqnext) != NULL) {
if ((virt_addr + size) != (unsigned char *) bh->b_data)
break;
......@@ -259,6 +262,9 @@ int ide_build_dmatable (ide_drive_t *drive, ide_dma_action_t func)
HWIF(drive)->sg_nents = i = ide_build_sglist(HWIF(drive), HWGROUP(drive)->rq);
if (!i)
return 0;
sg = HWIF(drive)->sg_table;
while (i && sg_dma_len(sg)) {
u32 cur_addr;
......@@ -274,7 +280,7 @@ int ide_build_dmatable (ide_drive_t *drive, ide_dma_action_t func)
*/
while (cur_len) {
if (++count >= PRD_ENTRIES) {
if (count++ >= PRD_ENTRIES) {
printk("%s: DMA table too small\n", drive->name);
pci_unmap_sg(HWIF(drive)->pci_dev,
HWIF(drive)->sg_table,
......
......@@ -134,7 +134,7 @@ static inline void do_identify (ide_drive_t *drive, byte cmd)
break;
}
#endif
printk ("CDROM");
printk ("CD/DVD-ROM");
break;
case ide_tape:
printk ("TAPE");
......@@ -761,9 +761,10 @@ static void init_gendisk (ide_hwif_t *hwif)
for (unit = 0; unit < minors; ++unit) {
*bs++ = BLOCK_SIZE;
#ifdef CONFIG_BLK_DEV_PDC4030
*max_sect++ = ((hwif->chipset == ide_pdc4030) ? 127 : MAX_SECTORS);
*max_sect++ = ((hwif->chipset == ide_pdc4030) ? 127 : 256);
#else
*max_sect++ = MAX_SECTORS;
/* IDE can do up to 128K per request. */
*max_sect++ = 256;
#endif
*max_ra++ = MAX_READAHEAD;
}
......
......@@ -102,7 +102,7 @@ isdn_v110_open(unsigned char key, int hdrlen, int maxsize)
int i;
isdn_v110_stream *v;
if ((v = kmalloc(sizeof(isdn_v110_stream), GFP_KERNEL)) == NULL)
if ((v = kmalloc(sizeof(isdn_v110_stream), GFP_ATOMIC)) == NULL)
return NULL;
memset(v, 0, sizeof(isdn_v110_stream));
v->key = key;
......@@ -134,7 +134,7 @@ isdn_v110_open(unsigned char key, int hdrlen, int maxsize)
v->b = 0;
v->skbres = hdrlen;
v->maxsize = maxsize - hdrlen;
if ((v->encodebuf = kmalloc(maxsize, GFP_KERNEL)) == NULL) {
if ((v->encodebuf = kmalloc(maxsize, GFP_ATOMIC)) == NULL) {
kfree(v);
return NULL;
}
......
......@@ -951,7 +951,6 @@ do_dasd_request (request_queue_t *queue)
dasd_debug ((unsigned long) __builtin_return_address(0));
go = 1;
while (go && !list_empty(&queue->queue_head)) {
req = blkdev_entry_next_request(&queue->queue_head);
req = blkdev_entry_next_request(&queue->queue_head);
di = DEVICE_NR (req->rq_dev);
dasd_debug ((unsigned long) req); /* req */
......
......@@ -776,7 +776,7 @@ void print_sense_internal(const char * devclass,
printk("%s%s: sns = %2x %2x\n", devclass,
kdevname(dev), sense_buffer[0], sense_buffer[2]);
printk("Non-extended sense class %d code 0x%0x ", sense_class, code);
printk("Non-extended sense class %d code 0x%0x\n", sense_class, code);
s = 4;
}
......
......@@ -50,6 +50,50 @@
* This entire source file deals with the new queueing code.
*/
/*
* Function: __scsi_insert_special()
*
* Purpose: worker for scsi_insert_special_*()
*
* Arguments: q - request queue where request should be inserted
* rq - request to be inserted
* data - private data
* at_head - insert request at head or tail of queue
*
* Lock status: Assumed that io_request_lock is not held upon entry.
*
* Returns: Nothing
*/
static void __scsi_insert_special(request_queue_t *q, struct request *rq,
void *data, int at_head)
{
unsigned long flags;
ASSERT_LOCK(&io_request_lock, 0);
rq->cmd = SPECIAL;
rq->special = data;
rq->q = NULL;
rq->nr_segments = 0;
rq->elevator_sequence = 0;
/*
* We have the option of inserting the head or the tail of the queue.
* Typically we use the tail for new ioctls and so forth. We use the
* head of the queue for things like a QUEUE_FULL message from a
* device, or a host that is unable to accept a particular command.
*/
spin_lock_irqsave(&io_request_lock, flags);
if (at_head)
list_add(&rq->queue, &q->queue_head);
else
list_add_tail(&rq->queue, &q->queue_head);
q->request_fn(q);
spin_unlock_irqrestore(&io_request_lock, flags);
}
/*
* Function: scsi_insert_special_cmd()
......@@ -73,52 +117,9 @@
*/
int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
{
unsigned long flags;
request_queue_t *q;
ASSERT_LOCK(&io_request_lock, 0);
/*
* The SCpnt already contains a request structure - we will doctor the
* thing up with the appropriate values and use that in the actual
* request queue.
*/
q = &SCpnt->device->request_queue;
SCpnt->request.cmd = SPECIAL;
SCpnt->request.special = (void *) SCpnt;
SCpnt->request.q = NULL;
SCpnt->request.free_list = NULL;
SCpnt->request.nr_segments = 0;
/*
* We have the option of inserting the head or the tail of the queue.
* Typically we use the tail for new ioctls and so forth. We use the
* head of the queue for things like a QUEUE_FULL message from a
* device, or a host that is unable to accept a particular command.
*/
spin_lock_irqsave(&io_request_lock, flags);
if (at_head) {
list_add(&SCpnt->request.queue, &q->queue_head);
} else {
/*
* FIXME(eric) - we always insert at the tail of the
* list. Otherwise ioctl commands would always take
* precedence over normal I/O. An ioctl on a busy
* disk might be delayed indefinitely because the
* request might not float high enough in the queue
* to be scheduled.
*/
list_add_tail(&SCpnt->request.queue, &q->queue_head);
}
request_queue_t *q = &SCpnt->device->request_queue;
/*
* Now hit the requeue function for the queue. If the host is
* already busy, so be it - we have nothing special to do. If
* the host can queue it, then send it off.
*/
q->request_fn(q);
spin_unlock_irqrestore(&io_request_lock, flags);
__scsi_insert_special(q, &SCpnt->request, SCpnt, at_head);
return 0;
}
......@@ -144,51 +145,9 @@ int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
*/
int scsi_insert_special_req(Scsi_Request * SRpnt, int at_head)
{
unsigned long flags;
request_queue_t *q;
ASSERT_LOCK(&io_request_lock, 0);
/*
* The SCpnt already contains a request structure - we will doctor the
* thing up with the appropriate values and use that in the actual
* request queue.
*/
q = &SRpnt->sr_device->request_queue;
SRpnt->sr_request.cmd = SPECIAL;
SRpnt->sr_request.special = (void *) SRpnt;
SRpnt->sr_request.q = NULL;
SRpnt->sr_request.nr_segments = 0;
/*
* We have the option of inserting the head or the tail of the queue.
* Typically we use the tail for new ioctls and so forth. We use the
* head of the queue for things like a QUEUE_FULL message from a
* device, or a host that is unable to accept a particular command.
*/
spin_lock_irqsave(&io_request_lock, flags);
request_queue_t *q = &SRpnt->sr_device->request_queue;
if (at_head) {
list_add(&SRpnt->sr_request.queue, &q->queue_head);
} else {
/*
* FIXME(eric) - we always insert at the tail of the
* list. Otherwise ioctl commands would always take
* precedence over normal I/O. An ioctl on a busy
* disk might be delayed indefinitely because the
* request might not float high enough in the queue
* to be scheduled.
*/
list_add_tail(&SRpnt->sr_request.queue, &q->queue_head);
}
/*
* Now hit the requeue function for the queue. If the host is
* already busy, so be it - we have nothing special to do. If
* the host can queue it, then send it off.
*/
q->request_fn(q);
spin_unlock_irqrestore(&io_request_lock, flags);
__scsi_insert_special(q, &SRpnt->sr_request, SRpnt, at_head);
return 0;
}
......@@ -862,17 +821,6 @@ void scsi_request_fn(request_queue_t * q)
}
SHpnt = SDpnt->host;
/*
* If the host for this device is in error recovery mode, don't
* do anything at all here. When the host leaves error recovery
* mode, it will automatically restart things and start queueing
* commands again. Same goes if the queue is actually plugged,
* if the device itself is blocked, or if the host is fully
* occupied.
*/
if (SHpnt->in_recovery || q->plugged)
return;
/*
* To start with, we keep looping until the queue is empty, or until
* the host is no longer able to accept any more requests.
......@@ -896,10 +844,11 @@ void scsi_request_fn(request_queue_t * q)
|| (SHpnt->host_blocked)
|| (SHpnt->host_self_blocked)) {
/*
* If we are unable to process any commands at all for this
* device, then we consider it to be starved. What this means
* is that there are no outstanding commands for this device
* and hence we need a little help getting it started again
* If we are unable to process any commands at all for
* this device, then we consider it to be starved.
* What this means is that there are no outstanding
* commands for this device and hence we need a
* little help getting it started again
* once the host isn't quite so busy.
*/
if (SDpnt->device_busy == 0) {
......@@ -1000,8 +949,8 @@ void scsi_request_fn(request_queue_t * q)
}
/*
* If so, we are ready to do something. Bump the count
* while the queue is locked and then break out of the loop.
* Otherwise loop around and try another request.
* while the queue is locked and then break out of the
* loop. Otherwise loop around and try another request.
*/
if (!SCpnt) {
break;
......@@ -1029,8 +978,9 @@ void scsi_request_fn(request_queue_t * q)
memcpy(&SCpnt->request, req, sizeof(struct request));
/*
* We have copied the data out of the request block - it is now in
* a field in SCpnt. Release the request block.
* We have copied the data out of the request block -
* it is now in a field in SCpnt. Release the request
* block.
*/
blkdev_release_request(req);
}
......@@ -1047,12 +997,14 @@ void scsi_request_fn(request_queue_t * q)
/*
* This will do a couple of things:
* 1) Fill in the actual SCSI command.
* 2) Fill in any other upper-level specific fields (timeout).
* 2) Fill in any other upper-level specific fields
* (timeout).
*
* If this returns 0, it means that the request failed (reading
* past end of disk, reading offline device, etc). This won't
* actually talk to the device, but some kinds of consistency
* checking may cause the request to be rejected immediately.
* If this returns 0, it means that the request failed
* (reading past end of disk, reading offline device,
* etc). This won't actually talk to the device, but
* some kinds of consistency checking may cause the
* request to be rejected immediately.
*/
if (STpnt == NULL) {
STpnt = scsi_get_request_dev(req);
......@@ -1103,8 +1055,8 @@ void scsi_request_fn(request_queue_t * q)
scsi_dispatch_cmd(SCpnt);
/*
* Now we need to grab the lock again. We are about to mess with
* the request queue and try to find another command.
* Now we need to grab the lock again. We are about to mess
* with the request queue and try to find another command.
*/
spin_lock_irq(&io_request_lock);
}
......
......@@ -324,7 +324,6 @@ static inline int scsi_new_mergeable(request_queue_t * q,
req->nr_segments >= SHpnt->sg_tablesize)
return 0;
req->nr_segments++;
q->elevator.nr_segments++;
return 1;
}
......@@ -341,11 +340,8 @@ static inline int scsi_new_segment(request_queue_t * q,
if (req->nr_hw_segments >= SHpnt->sg_tablesize ||
req->nr_segments >= SHpnt->sg_tablesize)
return 0;
if (req->nr_segments >= max_segments)
return 0;
req->nr_hw_segments++;
req->nr_segments++;
q->elevator.nr_segments++;
return 1;
}
#else
......@@ -361,7 +357,6 @@ static inline int scsi_new_segment(request_queue_t * q,
* counter.
*/
req->nr_segments++;
q->elevator.nr_segments++;
return 1;
} else {
return 0;
......@@ -417,8 +412,10 @@ __inline static int __scsi_back_merge_fn(request_queue_t * q,
SDpnt = (Scsi_Device *) q->queuedata;
SHpnt = SDpnt->host;
#ifdef DMA_CHUNK_SIZE
if (max_segments > 64)
max_segments = 64;
#endif
if (use_clustering) {
/*
......@@ -471,8 +468,10 @@ __inline static int __scsi_front_merge_fn(request_queue_t * q,
SDpnt = (Scsi_Device *) q->queuedata;
SHpnt = SDpnt->host;
#ifdef DMA_CHUNK_SIZE
if (max_segments > 64)
max_segments = 64;
#endif
if (use_clustering) {
/*
......@@ -601,10 +600,10 @@ __inline static int __scsi_merge_requests_fn(request_queue_t * q,
SDpnt = (Scsi_Device *) q->queuedata;
SHpnt = SDpnt->host;
#ifdef DMA_CHUNK_SIZE
if (max_segments > 64)
max_segments = 64;
#ifdef DMA_CHUNK_SIZE
/* If it would not fit into prepared memory space for sg chain,
* then don't allow the merge.
*/
......@@ -664,7 +663,6 @@ __inline static int __scsi_merge_requests_fn(request_queue_t * q,
* This one is OK. Let it go.
*/
req->nr_segments += next->nr_segments - 1;
q->elevator.nr_segments--;
#ifdef DMA_CHUNK_SIZE
req->nr_hw_segments += next->nr_hw_segments - 1;
#endif
......
......@@ -694,6 +694,7 @@ static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
(void *)SRpnt->sr_buffer, hp->dxfer_len,
sg_cmd_done_bh, timeout, SG_DEFAULT_RETRIES);
/* dxfer_len overwrites SRpnt->sr_bufflen, hence need for b_malloc_len */
generic_unplug_device(&SRpnt->sr_device->request_queue);
return 0;
}
......
......@@ -671,12 +671,14 @@ void get_capabilities(int i)
cmd[3] = cmd[5] = 0;
rc = sr_do_ioctl(i, cmd, buffer, 128, 1, SCSI_DATA_READ, NULL);
if (-EINVAL == rc) {
/* failed, drive has'nt this mode page */
if (rc) {
/* failed, drive doesn't have capabilities mode page */
scsi_CDs[i].cdi.speed = 1;
/* disable speed select, drive probably can't do this either */
scsi_CDs[i].cdi.mask |= CDC_SELECT_SPEED;
scsi_CDs[i].cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
CDC_DVD | CDC_DVD_RAM |
CDC_SELECT_DISC | CDC_SELECT_SPEED);
scsi_free(buffer, 512);
printk("sr%i: scsi-1 drive\n");
return;
}
n = buffer[3] + 4;
......
......@@ -3,6 +3,7 @@
*/
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/reiserfs_fs.h>
#include <linux/locks.h>
......
......@@ -12,6 +12,7 @@
*/
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/locks.h>
#include <linux/reiserfs_fs.h>
......
......@@ -4,6 +4,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/fs.h>
......@@ -142,6 +143,10 @@ static int reiserfs_readdir (struct file * filp, void * dirent, filldir_t filldi
if (!d_name[d_reclen - 1])
d_reclen = strlen (d_name);
if (d_reclen > REISERFS_MAX_NAME_LEN(inode->i_sb->s_blocksize)){
/* too big to send back to VFS */
continue ;
}
d_off = deh_offset (deh);
filp->f_pos = d_off ;
d_ino = deh_objectid (deh);
......
......@@ -18,6 +18,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <asm/uaccess.h>
#include <linux/sched.h>
#include <linux/reiserfs_fs.h>
......
......@@ -64,7 +64,7 @@ static int reiserfs_file_release (struct inode * inode, struct file * filp)
item(s) had to be converted, then it may have to be
indirect2direct converted */
windex = push_journal_writer("file_release") ;
reiserfs_truncate_file(inode) ;
reiserfs_truncate_file(inode, 0) ;
pop_journal_writer(windex) ;
}
up (&inode->i_sem);
......@@ -72,6 +72,9 @@ static int reiserfs_file_release (struct inode * inode, struct file * filp)
return 0;
}
static void reiserfs_vfs_truncate_file(struct inode *inode) {
reiserfs_truncate_file(inode, 1) ;
}
/* Sync a reiserfs file. */
static int reiserfs_sync_file(
......@@ -115,7 +118,7 @@ struct file_operations reiserfs_file_operations = {
struct inode_operations reiserfs_file_inode_operations = {
truncate: reiserfs_truncate_file,
truncate: reiserfs_vfs_truncate_file,
};
......@@ -37,6 +37,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/locks.h>
......
......@@ -4,6 +4,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <asm/uaccess.h>
#include <linux/string.h>
#include <linux/sched.h>
......
......@@ -3,6 +3,7 @@
*/
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/reiserfs_fs.h>
#include <linux/locks.h>
......@@ -1538,7 +1539,7 @@ static int grab_tail_page(struct inode *p_s_inode,
**
** some code taken from block_truncate_page
*/
void reiserfs_truncate_file(struct inode *p_s_inode) {
void reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps) {
struct reiserfs_transaction_handle th ;
int windex ;
......@@ -1571,7 +1572,7 @@ void reiserfs_truncate_file(struct inode *p_s_inode) {
prevent_flush_page_lock(page, p_s_inode) ;
journal_begin(&th, p_s_inode->i_sb, JOURNAL_PER_BALANCE_CNT * 2 ) ;
windex = push_journal_writer("reiserfs_vfs_truncate_file") ;
reiserfs_do_truncate (&th, p_s_inode, page, 1/*update timestamps*/) ;
reiserfs_do_truncate (&th, p_s_inode, page, update_timestamps) ;
pop_journal_writer(windex) ;
journal_end(&th, p_s_inode->i_sb, JOURNAL_PER_BALANCE_CNT * 2 ) ;
allow_flush_page_lock(page, p_s_inode) ;
......
......@@ -43,6 +43,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <asm/uaccess.h>
#include <asm/system.h>
......@@ -415,7 +416,7 @@ inline int mark_buffer_not_journaled(struct buffer_head *bh) {
** kernel lock held. caller is the string printed just before calling BUG()
*/
void reiserfs_check_lock_depth(char *caller) {
#ifdef __SMP__
#ifdef CONFIG_SMP
if (current->lock_depth < 0) {
printk("%s called without kernel lock held\n", caller) ;
show_reiserfs_locks() ;
......@@ -865,14 +866,20 @@ static int flush_older_journal_lists(struct super_block *p_s_sb, struct reiserfs
return 0 ;
}
static void submit_logged_buffer(struct buffer_head *bh) {
mark_buffer_notjournal_new(bh) ;
static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate) {
if (buffer_journaled(bh)) {
reiserfs_warning("clm-2084: pinned buffer %u:%s sent to disk\n",
bh->b_blocknr, kdevname(bh->b_dev)) ;
}
set_bit(BH_Dirty, &bh->b_state) ;
ll_rw_block(WRITE, 1, &bh) ;
mark_buffer_uptodate(bh, uptodate) ;
unlock_buffer(bh) ;
}
static void submit_logged_buffer(struct buffer_head *bh) {
lock_buffer(bh) ;
bh->b_end_io = reiserfs_end_buffer_io_sync ;
mark_buffer_notjournal_new(bh) ;
clear_bit(BH_Dirty, &bh->b_state) ;
submit_bh(WRITE, bh) ;
}
/* flush a journal list, both commit and real blocks
......
......@@ -4,6 +4,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <asm/uaccess.h>
#include <linux/string.h>
#include <linux/sched.h>
......
......@@ -4,6 +4,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/reiserfs_fs.h>
#include <linux/smp_lock.h>
......
......@@ -3,6 +3,7 @@
*/
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/string.h>
#include <linux/locks.h>
#include <linux/sched.h>
......
......@@ -3,7 +3,7 @@
*/
#ifdef __KERNEL__
#include <stdarg.h>
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/reiserfs_fs.h>
......@@ -12,11 +12,11 @@
#else
#include "nokernel.h"
#include <stdarg.h>
#include <limits.h>
#endif
#include <stdarg.h>
static char error_buf[1024];
static char fmt_buf[1024];
......
......@@ -54,6 +54,7 @@
*/
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/locks.h>
......
......@@ -4,6 +4,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <asm/uaccess.h>
......
......@@ -4,6 +4,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/pagemap.h>
#include <linux/reiserfs_fs.h>
......
......@@ -87,10 +87,6 @@ void initrd_init(void);
static inline void blkdev_dequeue_request(struct request * req)
{
if (req->e) {
req->e->dequeue_fn(req);
req->e = NULL;
}
list_del(&req->queue);
}
......
......@@ -23,8 +23,6 @@ struct request {
int elevator_sequence;
struct list_head table;
struct list_head *free_list;
volatile int rq_status; /* should split this into a few status bits */
#define RQ_INACTIVE (-1)
#define RQ_ACTIVE 1
......@@ -47,7 +45,6 @@ struct request {
struct buffer_head * bh;
struct buffer_head * bhtail;
request_queue_t *q;
elevator_t *e;
};
#include <linux/elevator.h>
......@@ -69,7 +66,7 @@ typedef void (unplug_device_fn) (void *q);
/*
* Default nr free requests per queue
*/
#define QUEUE_NR_REQUESTS 256
#define QUEUE_NR_REQUESTS 512
struct request_queue
{
......@@ -77,6 +74,8 @@ struct request_queue
* the queue request freelist, one for reads and one for writes
*/
struct list_head request_freelist[2];
struct list_head pending_freelist[2];
int pending_free[2];
/*
* Together with queue_head for cacheline sharing
......@@ -116,7 +115,7 @@ struct request_queue
* Is meant to protect the queue in the future instead of
* io_request_lock
*/
spinlock_t request_lock;
spinlock_t queue_lock;
/*
* Tasks wait here for free request
......@@ -152,6 +151,7 @@ extern void grok_partitions(struct gendisk *dev, int drive, unsigned minors, lon
extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size);
extern void generic_make_request(int rw, struct buffer_head * bh);
extern request_queue_t *blk_get_queue(kdev_t dev);
extern inline request_queue_t *__blk_get_queue(kdev_t dev);
extern void blkdev_release_request(struct request *);
/*
......@@ -162,6 +162,7 @@ extern void blk_cleanup_queue(request_queue_t *);
extern void blk_queue_headactive(request_queue_t *, int);
extern void blk_queue_pluggable(request_queue_t *, plug_device_fn *);
extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
extern void generic_unplug_device(void *);
extern int * blk_size[MAX_BLKDEV];
......@@ -175,9 +176,8 @@ extern int * max_sectors[MAX_BLKDEV];
extern int * max_segments[MAX_BLKDEV];
#define MAX_SECTORS 254
#define MAX_SEGMENTS MAX_SECTORS
#define MAX_SEGMENTS 128
#define MAX_SECTORS (MAX_SEGMENTS*8)
#define PageAlignSize(size) (((size) + PAGE_SIZE -1) & PAGE_MASK)
......
......@@ -7,34 +7,32 @@ typedef void (elevator_fn) (struct request *, elevator_t *,
struct list_head *,
struct list_head *, int);
typedef int (elevator_merge_fn) (request_queue_t *, struct request **,
struct buffer_head *, int, int *, int *);
typedef int (elevator_merge_fn) (request_queue_t *, struct request **, struct list_head *,
struct buffer_head *, int, int, int);
typedef void (elevator_dequeue_fn) (struct request *);
typedef void (elevator_merge_cleanup_fn) (request_queue_t *, struct request *, int);
typedef void (elevator_merge_req_fn) (struct request *, struct request *);
struct elevator_s
{
int sequence;
int read_latency;
int write_latency;
int max_bomb_segments;
unsigned int nr_segments;
int read_pendings;
elevator_fn * elevator_fn;
elevator_merge_fn *elevator_merge_fn;
elevator_dequeue_fn *dequeue_fn;
elevator_merge_cleanup_fn *elevator_merge_cleanup_fn;
elevator_merge_req_fn *elevator_merge_req_fn;
unsigned int queue_ID;
};
void elevator_noop(struct request *, elevator_t *, struct list_head *, struct list_head *, int);
int elevator_noop_merge(request_queue_t *, struct request **, struct buffer_head *, int, int *, int *);
void elevator_noop_dequeue(struct request *);
void elevator_linus(struct request *, elevator_t *, struct list_head *, struct list_head *, int);
int elevator_linus_merge(request_queue_t *, struct request **, struct buffer_head *, int, int *, int *);
int elevator_noop_merge(request_queue_t *, struct request **, struct list_head *, struct buffer_head *, int, int, int);
void elevator_noop_merge_cleanup(request_queue_t *, struct request *, int);
void elevator_noop_merge_req(struct request *, struct request *);
int elevator_linus_merge(request_queue_t *, struct request **, struct list_head *, struct buffer_head *, int, int, int);
void elevator_linus_merge_cleanup(request_queue_t *, struct request *, int);
void elevator_linus_merge_req(struct request *, struct request *);
typedef struct blkelv_ioctl_arg_s {
int queue_ID;
......@@ -69,6 +67,10 @@ extern void elevator_init(elevator_t *, elevator_t);
(s1)->sector < (s2)->sector)) || \
(s1)->rq_dev < (s2)->rq_dev)
#define BHRQ_IN_ORDER(bh, rq) \
(((bh)->b_rdev == (rq)->rq_dev && \
(bh)->b_rsector < (rq)->sector))
static inline int elevator_request_latency(elevator_t * elevator, int rw)
{
int latency;
......@@ -80,36 +82,24 @@ static inline int elevator_request_latency(elevator_t * elevator, int rw)
return latency;
}
#define ELEVATOR_NOOP \
((elevator_t) { \
0, /* sequence */ \
\
0, /* read_latency */ \
0, /* write_latency */ \
0, /* max_bomb_segments */ \
\
0, /* nr_segments */ \
0, /* read_pendings */ \
\
elevator_noop, /* elevator_fn */ \
elevator_noop_merge, /* elevator_merge_fn */ \
elevator_noop_dequeue, /* dequeue_fn */ \
#define ELEVATOR_NOOP \
((elevator_t) { \
0, /* read_latency */ \
0, /* write_latency */ \
\
elevator_noop_merge, /* elevator_merge_fn */ \
elevator_noop_merge_cleanup, /* elevator_merge_cleanup_fn */ \
elevator_noop_merge_req, /* elevator_merge_req_fn */ \
})
#define ELEVATOR_LINUS \
((elevator_t) { \
0, /* not used */ \
\
1000000, /* read passovers */ \
2000000, /* write passovers */ \
0, /* max_bomb_segments */ \
\
0, /* not used */ \
0, /* not used */ \
\
elevator_linus, /* elevator_fn */ \
elevator_linus_merge, /* elevator_merge_fn */ \
elevator_noop_dequeue, /* dequeue_fn */ \
#define ELEVATOR_LINUS \
((elevator_t) { \
8192, /* read passovers */ \
16384, /* write passovers */ \
\
elevator_linus_merge, /* elevator_merge_fn */ \
elevator_linus_merge_cleanup, /* elevator_merge_cleanup_fn */ \
elevator_linus_merge_req, /* elevator_merge_req_fn */ \
})
#endif
......@@ -926,8 +926,7 @@ extern inline int entry_length (struct buffer_head * bh, struct item_head * ih,
//((block_size - BLKH_SIZE - IH_SIZE - DEH_SIZE * 2) / 2)
// two entries per block (at least)
#define REISERFS_MAX_NAME_LEN(block_size) \
((block_size - BLKH_SIZE - IH_SIZE - DEH_SIZE))
#define REISERFS_MAX_NAME_LEN(block_size) 255
......@@ -1753,7 +1752,6 @@ void reiserfs_do_truncate (struct reiserfs_transaction_handle *th,
struct inode * p_s_inode, struct page *,
int update_timestamps);
//
void reiserfs_vfs_truncate_file (struct inode * p_s_inode);
//void lock_inode_to_convert (struct inode * p_s_inode);
//void unlock_inode_after_convert (struct inode * p_s_inode);
//void increment_i_read_sync_counter (struct inode * p_s_inode);
......@@ -1792,7 +1790,7 @@ void padd_item (char * item, int total_length, int length);
/* inode.c */
int reiserfs_prepare_write(struct file *, struct page *, unsigned, unsigned) ;
void reiserfs_truncate_file(struct inode *) ;
void reiserfs_truncate_file(struct inode *, int update_timestamps) ;
void make_cpu_key (struct cpu_key * cpu_key, const struct inode * inode, loff_t offset,
int type, int key_length);
void make_le_item_head (struct item_head * ih, struct cpu_key * key, int version,
......
......@@ -543,8 +543,8 @@ extern unsigned long prof_shift;
#define CURRENT_TIME (xtime.tv_sec)
extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, unsigned int wq_mode));
extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, unsigned int wq_mode));
extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr));
extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
extern void FASTCALL(sleep_on(wait_queue_head_t *q));
extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
signed long timeout));
......@@ -553,12 +553,16 @@ extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
signed long timeout));
extern void FASTCALL(wake_up_process(struct task_struct * tsk));
#define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,WQ_FLAG_EXCLUSIVE)
#define wake_up_all(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,0)
#define wake_up_sync(x) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,WQ_FLAG_EXCLUSIVE)
#define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE,WQ_FLAG_EXCLUSIVE)
#define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE,0)
#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE,WQ_FLAG_EXCLUSIVE)
#define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
#define wake_up_nr(x, nr) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
#define wake_up_all(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0)
#define wake_up_sync(x) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
#define wake_up_sync_nr(x, nr) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
#define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1)
#define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr)
#define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0)
#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
#define wake_up_interruptible_sync_nr(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, nr)
extern int in_group_p(gid_t);
extern int in_egroup_p(gid_t);
......
......@@ -690,19 +690,15 @@ asmlinkage void schedule(void)
}
static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode,
unsigned int wq_mode, const int sync)
int nr_exclusive, const int sync)
{
struct list_head *tmp, *head;
struct task_struct *p, *best_exclusive;
struct task_struct *p;
unsigned long flags;
int best_cpu, irq;
if (!q)
goto out;
best_cpu = smp_processor_id();
irq = in_interrupt();
best_exclusive = NULL;
wq_write_lock_irqsave(&q->lock, flags);
#if WAITQUEUE_DEBUG
......@@ -730,47 +726,27 @@ static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode,
#if WAITQUEUE_DEBUG
curr->__waker = (long)__builtin_return_address(0);
#endif
/*
* If waking up from an interrupt context then
* prefer processes which are affine to this
* CPU.
*/
if (irq && (curr->flags & wq_mode & WQ_FLAG_EXCLUSIVE)) {
if (!best_exclusive)
best_exclusive = p;
if (p->processor == best_cpu) {
best_exclusive = p;
break;
}
} else {
if (sync)
wake_up_process_synchronous(p);
else
wake_up_process(p);
if (curr->flags & wq_mode & WQ_FLAG_EXCLUSIVE)
break;
}
if (sync)
wake_up_process_synchronous(p);
else
wake_up_process(p);
if ((curr->flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
break;
}
}
if (best_exclusive) {
if (sync)
wake_up_process_synchronous(best_exclusive);
else
wake_up_process(best_exclusive);
}
wq_write_unlock_irqrestore(&q->lock, flags);
out:
return;
}
void __wake_up(wait_queue_head_t *q, unsigned int mode, unsigned int wq_mode)
void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr)
{
__wake_up_common(q, mode, wq_mode, 0);
__wake_up_common(q, mode, nr, 0);
}
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, unsigned int wq_mode)
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr)
{
__wake_up_common(q, mode, wq_mode, 1);
__wake_up_common(q, mode, nr, 1);
}
#define SLEEP_ON_VAR \
......
......@@ -974,10 +974,6 @@ static void generic_file_readahead(int reada_ok,
* accessed sequentially.
*/
if (ahead) {
if (reada_ok == 2) {
run_task_queue(&tq_disk);
}
filp->f_ralen += ahead;
filp->f_rawin += filp->f_ralen;
filp->f_raend = raend + ahead + 1;
......
......@@ -14,6 +14,7 @@ foreach $file (@ARGV)
# Initialize variables.
my $fInComment = 0;
my $fInString = 0;
my $fUseConfig = 0;
my $iLinuxConfig = 0;
my %configList = ();
......@@ -24,6 +25,10 @@ foreach $file (@ARGV)
$fInComment && (s+^.*?\*/+ +o ? ($fInComment = 0) : next);
m+/\*+o && (s+/\*.*?\*/+ +go, (s+/\*.*$+ +o && ($fInComment = 1)));
# Strip strings.
$fInString && (s+^.*?"+ +o ? ($fInString = 0) : next);
m+"+o && (s+".*?"+ +go, (s+".*$+ +o && ($fInString = 1)));
# Pick up definitions.
if ( m/^\s*#/o )
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment