Commit e4cb6d89 authored by Linus Torvalds's avatar Linus Torvalds

Import 1.3.67

parent 343c5aed
......@@ -324,6 +324,11 @@ S: Australia
N: Ralf Flaxa
E: rfflaxa@immd4.informatik.uni-erlangen.de
D: The Linux Support Team Erlangen
D: Creator of LST distribution
D: Author of installation tool LISA
S: Pfitznerweg 6
S: 74523 Schwaebisch Hall
S: Germany
N: Lawrence Foard
E: entropy@world.std.com
......@@ -409,6 +414,14 @@ S: Muehlenweg 19
S: 34266 Niestetal
S: Germany
N: Sebastian Hetze
E: she@lunetix.de
D: German Linux Documentation,
D: Organization of German Linux Conferences
S: Danckelmannstr. 48
S: 14059 Berlin
S: Germany
N: Michael Hipp
E: mhipp@student.uni-tuebingen.de
D: drivers for the racal ni5210 & ni6510 ethernet-boards
......
......@@ -42,14 +42,14 @@ foo \kill}%
%
\title{{\bf Linux Allocated Devices}}
\author{Maintained by H. Peter Anvin $<$hpa@storm.net$>$}
\date{Last revised: February 3, 1996}
\date{Last revised: February 17, 1996}
\maketitle
%
\noindent
This list is the successor to Rick Miller's Linux Device List, which
he stopped maintaining when he lost network access in 1993. It is a
registry of allocated major device numbers, as well as the recommended
{\file /dev} directory nodes for these devices.
he stopped maintaining when he got busy with other things in 1993. It
is a registry of allocated major device numbers, as well as the
recommended {\file /dev} directory nodes for these devices.
The latest version of this list is included with the Linux kernel
sources in \LaTeX\ and ASCII form. In case of discrepancy, the
......@@ -145,6 +145,7 @@ an unreasonable effort.
\major{36}{}{char }{Netlink support}
\major{ }{}{block}{MCA ESDI hard disk}
\major{37}{}{char }{IDE tape}
\major{ }{}{block}{Zorro II ramdisk}
\major{38}{}{char }{Myricom PCI Myrinet board}
\major{39}{--40}{}{Unallocated}
\major{41}{}{char}{Yet Another Micro Monitor}
......@@ -183,9 +184,14 @@ an unreasonable effort.
\minor{9}{/dev/urandom}{Less secure, but faster random number generator}
\\
\major{}{}{block}{RAM disk}
\minor{1}{/dev/ramdisk}{RAM disk}
\minor{0}{/dev/ram0}{First RAM disk}
\minordots
\minor{7}{/dev/ram7}{Eighth RAM disk}
\end{devicelist}
\noindent
Earlier kernels had /dev/ramdisk (1, 1) here.
\begin{devicelist}
\major{2}{}{char}{Pseudo-TTY masters}
\minor{0}{/dev/ptyp0}{First PTY master}
......@@ -307,7 +313,7 @@ partitioning schemes appropriate to their respective architectures.
\noindent
For compatibility with previous versions of Linux, the first 64 PTYs
are replicated under this device number. This use will be obsolescent
with the release of Linux 1.4 and may be removed in a future version
with the release of Linux 2.0 and may be removed in a future version
of Linux.
\begin{devicelist}
......@@ -815,12 +821,13 @@ major number 3).
\minor{131}{/dev/smpte3}{Fourth MIDI port, SMPTE timed}
\\
\major{ }{}{block}{Modular RAM disk}
\minor{0}{/dev/ram0}{First modular RAM disk}
\minor{1}{/dev/ram1}{Second modular RAM disk}
\minordots
\minor{255}{/dev/ram255}{256th modular RAM disk}
\end{devicelist}
\noindent
This device number is provided for older kernels which did not have
the modular RAM disk in the standard distribution. See major number
1. This assignment will be removed when the 2.0 kernel is released.
\begin{devicelist}
\major{36}{}{char }{Netlink support}
\minor{0}{/dev/route}{Routing, device updates (kernel to user)}
......@@ -845,6 +852,11 @@ Partitions are handled the same way as for IDE disks (see major number
\noindent
Currently, only one IDE tape drive is supported.
\begin{devicelist}
\major{ }{}{block}{Zorro II ramdisk}
\minor{0}{/dev/z2ram}{Zorro II ramdisk}
\end{devicelist}
\begin{devicelist}
\major{38}{}{char }{Myricom PCI Myrinet board}
\minor{0}{/dev/mlanai0}{First Myrinet board}
......@@ -961,16 +973,20 @@ These links should exist on all systems:
\link{/dev/stdin}{fd/0}{symbolic}{Standard input file descriptor}
\link{/dev/stdout}{fd/1}{symbolic}{Standard output file descriptor}
\link{/dev/stderr}{fd/2}{symbolic}{Standard error file descriptor}
\link{/dev/nfsd}{socksys}{symbolic}{Required by iBCS-2}
\link{/dev/X0R}{null}{symbolic}{Required by iBCS-2}
\end{nodelist}
\noindent
Note: The last device is: letter {\tt X}-digit {\tt 0}-letter {\tt R}.
\subsection{Recommended links}
It is recommended that these links exist on all systems:
\begin{nodelist}
\link{/dev/X0R}{null}{symbolic}{Used by iBCS-2}
\link{/dev/nfsd}{socksys}{symbolic}{Used by iBCS-2}
\link{/dev/core}{/proc/kcore}{symbolic}{Backward compatibility}
\link{/dev/ramdisk}{ram1}{symbolic}{Backward compatibility}
\link{/dev/scd?}{sr?}{hard}{Alternate name for CD-ROMs}
%\link{/dev/fd?H*}{fd?D*}{hard}{Compatible floppy formats}
%\link{/dev/fd?E*}{fd?D*}{hard}{Compatible floppy formats}
......
......@@ -2,12 +2,12 @@
Maintained by H. Peter Anvin <hpa@storm.net>
Last revised: February 3, 1996
Last revised: February 17, 1996
This list is the successor to Rick Miller's Linux Device List, which
he stopped maintaining when he lost network access in 1993. It is a
registry of allocated major device numbers, as well as the recommended
/dev directory nodes for these devices.
he stopped maintaining when he got busy with other things in 1993. It
is a registry of allocated major device numbers, as well as the
recommended /dev directory nodes for these devices.
The lastest version of this list is included with the Linux kernel
sources in LaTeX and ASCII form. In case of discrepancy, the LaTeX
......@@ -45,7 +45,11 @@ an unreasonable effort.
8 = /dev/random Nondeterministic random number gen.
9 = /dev/urandom Faster, less secure random number gen.
block RAM disk
1 = /dev/ramdisk RAM disk
0 = /dev/ram0 First RAM disk
...
7 = /dev/ram7 Eighth RAM disk
Older kernels had /dev/ramdisk (1, 1) here.
2 char Pseudo-TTY masters
0 = /dev/ptyp0 First PTY master
......@@ -550,10 +554,11 @@ an unreasonable effort.
130 = /dev/smpte2 Third MIDI port, SMPTE timed
131 = /dev/smpte3 Fourth MIDI port, SMPTE timed
block Modular RAM disk device
0 = /dev/ram0 First modular RAM disk
1 = /dev/ram1 Second modular RAM disk
...
255 = /dev/ram255 256th modular RAM disk
This device number is provided for older kernels which
did not have the modular RAM disk in the standard
distribution. See major number 1. This assignment
will be removed when the 2.0 kernel is released.
36 char Netlink support
0 = /dev/route Routing, device updates, kernel to user
......@@ -572,6 +577,9 @@ an unreasonable effort.
Currently, only one IDE tape drive is supported.
block Zorro II ramdisk
0 = /dev/z2ram Zorro II ramdisk
38 char Myricom PCI Myrinet board
0 = /dev/mlanai0 First Myrinet board
1 = /dev/mlanai1 Second Myrinet board
......@@ -657,16 +665,18 @@ These links should exist on all systems:
/dev/stdin fd/0 symbolic stdin file descriptor
/dev/stdout fd/1 symbolic stdout file descriptor
/dev/stderr fd/2 symbolic stderr file descriptor
/dev/nfsd socksys symbolic Required by iBCS-2
/dev/X0R null symbolic Required by iBCS-2
Note: the last device is letter X-digit 0-letter R.
Recommended links
It is recommended that these links exist on all systems:
/dev/X0R null symbolic Used by iBCS-2
/dev/nfsd socksys symbolic Used by iBCS-2
/dev/core /proc/kcore symbolic Backward compatibility
/dev/scd? /dev/sr? hard Alternate SCSI CD-ROM name
/dev/ramdisk ram1 symbolic Backward compatibility
/dev/scd? sr? hard Alternate SCSI CD-ROM name
Locally defined links
......
VERSION = 1
PATCHLEVEL = 3
SUBLEVEL = 66
SUBLEVEL = 67
ARCH = i386
......
......@@ -8,6 +8,7 @@
* This file initializes the trap entry points
*/
#include <linux/config.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/tty.h>
......
......@@ -684,4 +684,5 @@ ENTRY(sys_call_table)
.long SYMBOL_NAME(sys_sched_get_priority_max)
.long SYMBOL_NAME(sys_sched_get_priority_min) /* 160 */
.long SYMBOL_NAME(sys_sched_rr_get_interval)
.space (NR_syscalls-162)*4
.long SYMBOL_NAME(sys_nanosleep)
.space (NR_syscalls-163)*4
......@@ -943,13 +943,16 @@ depca_rx(struct device *dev)
}
}
if (buf[0] & 0x01) { /* Multicast/Broadcast */
if ((*(s16 *)&buf[0] == -1) && (*(s32 *)&buf[2] == -1)) {
if ((*(s16 *)&buf[0] == -1) &&
(*(s16 *)&buf[2] == -1) &&
(*(s16 *)&buf[4] == -1)) {
lp->pktStats.broadcast++;
} else {
lp->pktStats.multicast++;
}
} else if ((*(s16 *)&buf[0] == *(s16 *)&dev->dev_addr[0]) &&
(*(s32 *)&buf[2] == *(s32 *)&dev->dev_addr[2])) {
(*(s16 *)&buf[2] == *(s16 *)&dev->dev_addr[2]) &&
(*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) {
lp->pktStats.unicast++;
}
......
......@@ -14,6 +14,8 @@
* Dmitry Gorodchanin : Added CSLIP statistics.
* Stanislav Voronyi : Make line checking as created by
* Igor Chechik, RELCOM Corp.
* Craig Schlenter : Fixed #define bug that caused
* CSLIP telnets to hang in 1.3.61-6
*
* Author: Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
*/
......@@ -89,8 +91,8 @@ struct slip {
#define SLF_INUSE 0 /* Channel in use */
#define SLF_ESCAPE 1 /* ESC received */
#define SLF_ERROR 2 /* Parity, etc. error */
#define SLF_KEEPTEST 4 /* Keepalive test flag */
#define SLF_OUTWAIT 8 /* is outpacket was flag */
#define SLF_KEEPTEST 3 /* Keepalive test flag */
#define SLF_OUTWAIT 4 /* is outpacket was flag */
unsigned char mode; /* SLIP mode */
#define SL_MODE_SLIP 0
......
......@@ -2958,8 +2958,13 @@ int aha152x_proc_info(
scd = scd->next;
}
*start=buffer;
return (pos-buffer < length ? pos-buffer : length);
*start=buffer+offset;
if (pos - buffer < offset)
return 0;
else if (pos - buffer - offset < length)
return pos - buffer - offset;
else
return length;
}
#ifdef MODULE
......
......@@ -564,7 +564,7 @@ int scan_scsis_single (int channel, int dev, int lun, int *max_dev_lun,
((SCpnt->sense_buffer[0] & 0x70) >> 4) == 7) {
if (((SCpnt->sense_buffer[2] & 0xf) != NOT_READY) &&
((SCpnt->sense_buffer[2] & 0xf) != UNIT_ATTENTION) &&
((SCpnt->sense_buffer[2] & 0xf) != ILLEGAL_REQUEST))
((SCpnt->sense_buffer[2] & 0xf) != ILLEGAL_REQUEST || lun > 0))
return 1;
}
else
......
......@@ -56,6 +56,7 @@ static struct buffer_head * lru_list[NR_LIST] = {NULL, };
buffers to discard when more memory is needed */
static struct buffer_head * next_to_age[NR_LIST] = {NULL, };
static struct buffer_head * free_list[NR_SIZES] = {NULL, };
static struct buffer_head * unused_list = NULL;
struct buffer_head * reuse_list = NULL;
static struct wait_queue * buffer_wait = NULL;
......@@ -846,8 +847,7 @@ void __brelse(struct buffer_head * buf)
refile_buffer(buf);
if (buf->b_count) {
if (!--buf->b_count)
wake_up(&buffer_wait);
buf->b_count--;
return;
}
printk("VFS: brelse: Trying to free free buffer\n");
......@@ -867,7 +867,6 @@ void __bforget(struct buffer_head * buf)
remove_from_hash_queue(buf);
buf->b_dev = NODEV;
refile_buffer(buf);
wake_up(&buffer_wait);
}
/*
......@@ -966,6 +965,7 @@ static void put_unused_buffer_head(struct buffer_head * bh)
((volatile struct buffer_head *) bh)->b_wait = wait;
bh->b_next_free = unused_list;
unused_list = bh;
wake_up(&buffer_wait);
}
static void get_more_buffer_heads(void)
......@@ -973,11 +973,26 @@ static void get_more_buffer_heads(void)
int i;
struct buffer_head * bh;
if (unused_list)
return;
for (;;) {
if (unused_list)
return;
if (!(bh = (struct buffer_head*) get_free_page(GFP_KERNEL)))
return;
/*
* This is critical. We can't swap out pages to get
* more buffer heads, because the swap-out may need
* more buffer-heads itself. Thus GFP_ATOMIC.
*/
bh = (struct buffer_head *) get_free_page(GFP_ATOMIC);
if (bh)
break;
/*
* Uhhuh. We're _really_ low on memory. Now we just
* wait for old buffer heads to become free due to
* finishing IO..
*/
sleep_on(&buffer_wait);
}
for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
bh->b_next_free = unused_list; /* only make link */
......@@ -1217,6 +1232,7 @@ void unlock_buffer(struct buffer_head * bh)
page->free_after = 0;
free_page(page_address(page));
}
wake_up(&buffer_wait);
}
/*
......@@ -1307,7 +1323,6 @@ static int grow_buffers(int pri, int size)
free_list[isize] = bh;
buffer_pages[MAP_NR(page)] = bh;
tmp->b_this_page = bh;
wake_up(&buffer_wait);
buffermem += PAGE_SIZE;
return 1;
}
......
......@@ -167,7 +167,7 @@ asmlinkage int sys_uselib(const char * library)
for (fmt = formats ; fmt ; fmt = fmt->next) {
int (*fn)(int) = fmt->load_shlib;
if (!fn)
break;
continue;
retval = fn(fd);
if (retval != -ENOEXEC)
break;
......@@ -658,7 +658,7 @@ int do_execve(char * filename, char ** argv, char ** envp, struct pt_regs * regs
for (fmt = formats ; fmt ; fmt = fmt->next) {
int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
if (!fn)
break;
continue;
retval = fn(&bprm, regs);
if (retval >= 0) {
iput(bprm.inode);
......
......@@ -45,7 +45,7 @@ static struct file_operations ext_file_operations = {
NULL, /* readdir - bad */
NULL, /* select - default */
NULL, /* ioctl - default */
generic_mmap, /* mmap */
generic_file_mmap, /* mmap */
NULL, /* no special open is needed */
NULL, /* release */
ext_sync_file /* fsync */
......
......@@ -50,7 +50,7 @@ static struct file_operations ext2_file_operations = {
NULL, /* readdir - bad */
NULL, /* select - default */
ext2_ioctl, /* ioctl */
generic_mmap, /* mmap */
generic_file_mmap, /* mmap */
NULL, /* no special open is needed */
ext2_release_file, /* release */
ext2_sync_file, /* fsync */
......
......@@ -33,7 +33,7 @@ static struct file_operations fat_file_operations = {
NULL, /* readdir - bad */
NULL, /* select - default */
NULL, /* ioctl - default */
generic_mmap, /* mmap */
generic_file_mmap, /* mmap */
NULL, /* no special open is needed */
NULL, /* release */
file_fsync /* fsync */
......@@ -61,8 +61,8 @@ struct inode_operations fat_file_inode_operations = {
};
/* #Specification: msdos / special devices / mmap
Mmapping does work because a special mmap is provide in that case.
Note that it is much less efficient than the generic_mmap normally
used since it allocate extra buffer. generic_mmap is used for
Note that it is much less efficient than the generic_file_mmap normally
used since it allocate extra buffer. generic_file_mmap is used for
normal device (512 bytes hardware sectors).
*/
static struct file_operations fat_file_operations_1024 = {
......
......@@ -358,6 +358,15 @@ int fat_bmap(struct inode *inode,int block)
return (cluster-2)*sb->cluster_size+sb->data_start+offset;
}
static int is_exec(char *extension)
{
char *exe_extensions = "EXECOMBAT", *walk;
for (walk = exe_extensions; *walk; walk += 3)
if (!strncmp(extension, walk, 3))
return 1;
return 0;
}
void fat_read_inode(struct inode *inode, struct inode_operations *fs_dir_inode_ops)
{
......@@ -424,7 +433,8 @@ void fat_read_inode(struct inode *inode, struct inode_operations *fs_dir_inode_o
}
} else { /* not a directory */
inode->i_mode = MSDOS_MKMODE(raw_entry->attr,
(IS_NOEXEC(inode) ? S_IRUGO|S_IWUGO : S_IRWXUGO)
((IS_NOEXEC(inode) || !is_exec(raw_entry->ext))
? S_IRUGO|S_IWUGO : S_IRWXUGO)
& ~MSDOS_SB(inode->i_sb)->fs_umask) | S_IFREG;
inode->i_op = (sb->s_blocksize == 1024)
? &fat_file_inode_operations_1024
......
......@@ -155,7 +155,7 @@ static const struct file_operations hpfs_file_ops =
NULL, /* readdir - bad */
NULL, /* select - default */
NULL, /* ioctl - default */
generic_mmap, /* mmap */
generic_file_mmap, /* mmap */
NULL, /* no special open is needed */
NULL, /* release */
file_fsync, /* fsync */
......
......@@ -29,7 +29,7 @@ static struct file_operations isofs_file_operations = {
NULL, /* readdir - bad */
NULL, /* select - default */
NULL, /* ioctl - default */
generic_mmap, /* mmap */
generic_file_mmap, /* mmap */
NULL, /* no special open is needed */
NULL, /* release */
NULL /* fsync */
......
......@@ -40,7 +40,7 @@ static struct file_operations minix_file_operations = {
NULL, /* readdir - bad */
NULL, /* select - default */
NULL, /* ioctl - default */
generic_mmap, /* mmap */
generic_file_mmap, /* mmap */
NULL, /* no special open is needed */
NULL, /* release */
minix_sync_file /* fsync */
......
......@@ -8,7 +8,7 @@
# Note 2! The CFLAGS definitions are now in the main makefile...
O_TARGET := nfs.o
O_OBJS := proc.o sock.o rpcsock.o inode.o file.o dir.o symlink.o mmap.o
O_OBJS := proc.o sock.o rpcsock.o inode.o file.o dir.o symlink.o
ifdef CONFIG_ROOT_NFS
O_OBJS += nfsroot.o
......
......@@ -11,6 +11,8 @@
*
* Expire cache on write to a file by Wai S Kok (Oct 1994).
*
* Total rewrite of read side for new NFS buffer cache.. Linus.
*
* nfs regular file handling functions
*/
......@@ -22,13 +24,16 @@
#include <linux/mm.h>
#include <linux/nfs_fs.h>
#include <linux/malloc.h>
#include <linux/pagemap.h>
#include <asm/segment.h>
#include <asm/system.h>
static int nfs_file_mmap(struct inode *, struct file *, struct vm_area_struct *);
static int nfs_file_read(struct inode *, struct file *, char *, int);
static int nfs_file_write(struct inode *, struct file *, const char *, int);
static int nfs_fsync(struct inode *, struct file *);
static int nfs_readpage(struct inode * inode, struct page * page);
static struct file_operations nfs_file_operations = {
NULL, /* lseek - default */
......@@ -37,7 +42,7 @@ static struct file_operations nfs_file_operations = {
NULL, /* readdir - bad */
NULL, /* select - default */
NULL, /* ioctl - default */
nfs_mmap, /* mmap */
nfs_file_mmap, /* mmap */
NULL, /* no special open is needed */
NULL, /* release */
nfs_fsync, /* fsync */
......@@ -56,136 +61,98 @@ struct inode_operations nfs_file_inode_operations = {
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
NULL, /* readpage */
nfs_readpage, /* readpage */
NULL, /* writepage */
NULL, /* bmap */
NULL /* truncate */
};
/* Once data is inserted, it can only be deleted, if (in_use==0). */
struct read_cache {
int in_use; /* currently in use? */
unsigned long inode_num; /* inode number */
off_t file_pos; /* file position */
int len; /* size of data */
unsigned long time; /* time, this entry was inserted */
char * buf; /* data */
int buf_size; /* size of buffer */
};
static inline void revalidate_inode(struct nfs_server * server, struct inode * inode)
{
struct nfs_fattr fattr;
if (jiffies - NFS_READTIME(inode) < server->acregmax)
return;
#define READ_CACHE_SIZE 5
#define EXPIRE_CACHE (HZ * 3) /* keep no longer than 3 seconds */
NFS_READTIME(inode) = jiffies;
if (nfs_proc_getattr(server, NFS_FH(inode), &fattr) == 0) {
nfs_refresh_inode(inode, &fattr);
if (fattr.mtime.seconds == NFS_OLDMTIME(inode))
return;
NFS_OLDMTIME(inode) = fattr.mtime.seconds;
}
invalidate_inode_pages(inode, 0);
}
unsigned long num_requests = 0;
unsigned long num_cache_hits = 0;
static int tail = 0; /* next cache slot to replace */
static int nfs_file_read(struct inode * inode, struct file * file,
char * buf, int count)
{
revalidate_inode(NFS_SERVER(inode), inode);
return generic_file_read(inode, file, buf, count);
}
static struct read_cache cache[READ_CACHE_SIZE] = {
{ 0, 0, -1, 0, 0, NULL, 0 },
{ 0, 0, -1, 0, 0, NULL, 0 },
{ 0, 0, -1, 0, 0, NULL, 0 },
{ 0, 0, -1, 0, 0, NULL, 0 },
{ 0, 0, -1, 0, 0, NULL, 0 } };
static int nfs_file_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
{
revalidate_inode(NFS_SERVER(inode), inode);
return generic_file_mmap(inode, file, vma);
}
static int nfs_fsync(struct inode *inode, struct file *file)
{
return 0;
}
static int nfs_file_read(struct inode *inode, struct file *file, char *buf,
int count)
static inline void do_read_nfs(struct inode * inode, char * buf, unsigned long pos)
{
int result, hunk, i, n, fs;
int refresh = 0;
int count = PAGE_SIZE;
int rsize = NFS_SERVER(inode)->rsize;
struct nfs_fattr fattr;
char *data;
off_t pos;
if (!inode) {
printk("nfs_file_read: inode = NULL\n");
return -EINVAL;
}
if (!S_ISREG(inode->i_mode)) {
printk("nfs_file_read: read from non-file, mode %07o\n",
inode->i_mode);
return -EINVAL;
}
pos = file->f_pos;
if (pos + count > inode->i_size)
count = inode->i_size - pos;
if (count <= 0)
return 0;
++num_requests;
cli();
for (i = 0; i < READ_CACHE_SIZE; i++)
if ((cache[i].inode_num == inode->i_ino)
&& (cache[i].file_pos <= pos)
&& (cache[i].file_pos + cache[i].len >= pos + count)
&& (jiffies - cache[i].time < EXPIRE_CACHE))
break;
if (i < READ_CACHE_SIZE) {
++cache[i].in_use;
sti();
++num_cache_hits;
memcpy_tofs(buf, cache[i].buf + pos - cache[i].file_pos, count);
--cache[i].in_use;
file->f_pos += count;
return count;
}
sti();
n = NFS_SERVER(inode)->rsize;
for (i = 0; i < count - n; i += n) {
do {
int result;
if (count < rsize)
rsize = count;
result = nfs_proc_read(NFS_SERVER(inode), NFS_FH(inode),
pos, n, buf, &fattr, 1);
pos, rsize, buf, &fattr);
if (result < 0)
return result;
pos += result;
buf += result;
if (result < n) {
file->f_pos = pos;
nfs_refresh_inode(inode, &fattr);
return i + result;
}
}
fs = 0;
if (!(data = (char *)kmalloc(n, GFP_KERNEL))) {
data = buf;
fs = 1;
}
result = nfs_proc_read(NFS_SERVER(inode), NFS_FH(inode),
pos, n, data, &fattr, fs);
if (result < 0) {
if (!fs)
kfree_s(data, n);
return result;
}
hunk = count - i;
if (result < hunk)
hunk = result;
if (fs) {
file->f_pos = pos + hunk;
goto partial;
refresh = 1;
count -= rsize;
pos += rsize;
buf += rsize;
if (result < rsize)
goto partial;
} while (count);
nfs_refresh_inode(inode, &fattr);
return;
partial:
memset(buf, 0, count);
if (refresh)
nfs_refresh_inode(inode, &fattr);
return i + hunk;
}
static int nfs_readpage(struct inode * inode, struct page * page)
{
unsigned long address;
address = page_address(page);
page->count++;
wait_on_page(page);
if (page->uptodate) {
free_page(address);
return 0;
}
memcpy_tofs(buf, data, hunk);
file->f_pos = pos + hunk;
nfs_refresh_inode(inode, &fattr);
cli();
if (cache[tail].in_use == 0) {
if (cache[tail].buf)
kfree_s(cache[tail].buf, cache[tail].buf_size);
cache[tail].buf = data;
cache[tail].buf_size = n;
cache[tail].inode_num = inode->i_ino;
cache[tail].file_pos = pos;
cache[tail].len = result;
cache[tail].time = jiffies;
if (++tail >= READ_CACHE_SIZE)
tail = 0;
} else
kfree_s(data, n);
sti();
return i + hunk;
page->locked = 1;
do_read_nfs(inode, (char *) address, page->offset);
page->locked = 0;
page->uptodate = 1;
wake_up(&page->wait);
free_page(address);
return 0;
}
static int nfs_file_write(struct inode *inode, struct file *file, const char *buf,
......@@ -206,13 +173,6 @@ static int nfs_file_write(struct inode *inode, struct file *file, const char *bu
if (count <= 0)
return 0;
cli();
/* If hit, cache is dirty and must be expired. */
for (i = 0; i < READ_CACHE_SIZE; i++)
if(cache[i].inode_num == inode->i_ino)
cache[i].time -= EXPIRE_CACHE;
sti();
pos = file->f_pos;
if (file->f_flags & O_APPEND)
pos = inode->i_size;
......@@ -221,7 +181,7 @@ static int nfs_file_write(struct inode *inode, struct file *file, const char *bu
hunk = count - i;
if (hunk >= n)
hunk = n;
result = nfs_proc_write(NFS_SERVER(inode), NFS_FH(inode),
result = nfs_proc_write(inode,
pos, hunk, buf, &fattr);
if (result < 0)
return result;
......
......@@ -32,10 +32,11 @@ extern int close_fp(struct file *filp);
static int nfs_notify_change(struct inode *, struct iattr *);
static void nfs_put_inode(struct inode *);
static void nfs_put_super(struct super_block *);
static void nfs_read_inode(struct inode *);
static void nfs_statfs(struct super_block *, struct statfs *, int bufsiz);
static struct super_operations nfs_sops = {
NULL, /* read inode */
nfs_read_inode, /* read inode */
nfs_notify_change, /* notify change */
NULL, /* write inode */
nfs_put_inode, /* put inode */
......@@ -45,9 +46,22 @@ static struct super_operations nfs_sops = {
NULL
};
/*
* The "read_inode" function doesn't actually do anything:
* the real data is filled in later in nfs_fhget. Here we
* just mark the cache times invalid.
*/
static void nfs_read_inode(struct inode * inode)
{
NFS_READTIME(inode) = jiffies - 1000000;
NFS_OLDMTIME(inode) = 0;
}
static void nfs_put_inode(struct inode * inode)
{
#if 0
clear_inode(inode);
#endif
}
void nfs_put_super(struct super_block *sb)
......
/*
* fs/nfs/mmap.c by Jon Tombs 15 Aug 1993
*
* This code is from
* linux/mm/mmap.c which was written by obz, Linus and Eric
* and
* linux/mm/memory.c by Linus Torvalds and others
*
* Copyright (C) 1993
*
*/
#include <linux/stat.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/shm.h>
#include <linux/errno.h>
#include <linux/mman.h>
#include <linux/string.h>
#include <linux/malloc.h>
#include <linux/nfs_fs.h>
#include <asm/segment.h>
#include <asm/system.h>
/*
* Return a page for mmap. We need to start using the page cache,
* because otherwise we can't share pages between processes..
*/
static unsigned long nfs_file_mmap_nopage(struct vm_area_struct * area,
unsigned long address, int no_share)
{
struct inode * inode = area->vm_inode;
unsigned long page;
unsigned int clear;
unsigned long tmp;
int n;
int i;
int pos;
struct nfs_fattr fattr;
page = __get_free_page(GFP_KERNEL);
if (!page)
return page;
address &= PAGE_MASK;
pos = address - area->vm_start + area->vm_offset;
clear = 0;
if (address + PAGE_SIZE > area->vm_end) {
clear = address + PAGE_SIZE - area->vm_end;
}
n = NFS_SERVER(inode)->rsize; /* what we can read in one go */
for (i = 0; i < (PAGE_SIZE - clear); i += n) {
int hunk, result;
hunk = PAGE_SIZE - i;
if (hunk > n)
hunk = n;
result = nfs_proc_read(NFS_SERVER(inode), NFS_FH(inode),
pos, hunk, (char *) (page + i), &fattr, 0);
if (result < 0)
break;
pos += result;
if (result < n) {
i += result;
break;
}
}
#ifdef doweneedthishere
nfs_refresh_inode(inode, &fattr);
#endif
tmp = page + PAGE_SIZE;
if (clear > 0){
memset ((char*)(tmp-clear),0,clear);
}
return page;
}
struct vm_operations_struct nfs_file_mmap = {
NULL, /* open */
NULL, /* close */
NULL, /* unmap */
NULL, /* protect */
NULL, /* sync */
NULL, /* advise */
nfs_file_mmap_nopage, /* nopage */
NULL, /* wppage */
NULL, /* swapout */
NULL, /* swapin */
};
/* This is used for a general mmap of a nfs file */
int nfs_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
{
if (vma->vm_flags & VM_SHARED) /* only PAGE_COW or read-only supported now */
return -EINVAL;
if (!inode->i_sb || !S_ISREG(inode->i_mode))
return -EACCES;
if (!IS_RDONLY(inode)) {
inode->i_atime = CURRENT_TIME;
inode->i_dirt = 1;
}
vma->vm_inode = inode;
inode->i_count++;
vma->vm_ops = &nfs_file_mmap;
return 0;
}
......@@ -42,6 +42,8 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/pagemap.h>
#include <asm/segment.h>
#ifdef NFS_PROC_DEBUG
......@@ -150,16 +152,12 @@ static inline int *xdr_encode_data(int *p, const char *data, int len)
return p + quadlen;
}
static inline int *xdr_decode_data(int *p, char *data, int *lenp, int maxlen,
int fs)
static inline int *xdr_decode_data(int *p, char *data, int *lenp, int maxlen)
{
unsigned len = *lenp = ntohl(*p++);
if (len > maxlen)
return NULL;
if (fs)
memcpy_tofs(data, p, len);
else
memcpy(data, p, len);
memcpy(data, p, len);
return p + QUADLEN(len);
}
......@@ -373,7 +371,7 @@ int nfs_proc_readlink(struct nfs_server *server, struct nfs_fh *fhandle,
}
int nfs_proc_read(struct nfs_server *server, struct nfs_fh *fhandle,
int offset, int count, char *data, struct nfs_fattr *fattr, int fs)
int offset, int count, char *data, struct nfs_fattr *fattr)
{
int *p, *p0;
int status;
......@@ -397,7 +395,7 @@ int nfs_proc_read(struct nfs_server *server, struct nfs_fh *fhandle,
status = -errno_NFSERR_IO;
else if ((status = ntohl(*p++)) == NFS_OK) {
p = xdr_decode_fattr(p, fattr);
if (!(p = xdr_decode_data(p, data, &len, count, fs))) {
if (!(p = xdr_decode_data(p, data, &len, count))) {
printk("nfs_proc_read: giant data size\n");
status = -errno_NFSERR_IO;
}
......@@ -418,12 +416,15 @@ int nfs_proc_read(struct nfs_server *server, struct nfs_fh *fhandle,
return status;
}
int nfs_proc_write(struct nfs_server *server, struct nfs_fh *fhandle,
int offset, int count, const char *data, struct nfs_fattr *fattr)
int nfs_proc_write(struct inode * inode, int offset,
int count, const char *data, struct nfs_fattr *fattr)
{
int *p, *p0;
int status;
int ruid = 0;
void * kdata; /* address of kernel copy */
struct nfs_server * server = NFS_SERVER(inode);
struct nfs_fh *fhandle = NFS_FH(inode);
PRINTK("NFS call write %d @ %d\n", count, offset);
if (!(p0 = nfs_rpc_alloc(server->wsize)))
......@@ -434,6 +435,7 @@ int nfs_proc_write(struct nfs_server *server, struct nfs_fh *fhandle,
*p++ = htonl(offset); /* traditional, could be any value */
*p++ = htonl(offset);
*p++ = htonl(count); /* traditional, could be any value */
kdata = (void *) (p+1); /* start of data in RPC buffer */
p = xdr_encode_data(p, data, count);
if ((status = nfs_rpc_call(server, p0, p, server->wsize)) < 0) {
nfs_rpc_free(p0);
......@@ -442,6 +444,7 @@ int nfs_proc_write(struct nfs_server *server, struct nfs_fh *fhandle,
if (!(p = nfs_rpc_verify(p0)))
status = -errno_NFSERR_IO;
else if ((status = ntohl(*p++)) == NFS_OK) {
update_vm_cache(inode, offset, kdata, count);
p = xdr_decode_fattr(p, fattr);
PRINTK("NFS reply write\n");
/* status = 0; */
......
......@@ -254,8 +254,10 @@ static void pipe_write_release(struct inode * inode, struct file * filp)
static void pipe_rdwr_release(struct inode * inode, struct file * filp)
{
PIPE_READERS(*inode)--;
PIPE_WRITERS(*inode)--;
if (filp->f_mode & FMODE_READ)
PIPE_READERS(*inode)--;
if (filp->f_mode & FMODE_WRITE)
PIPE_WRITERS(*inode)--;
wake_up_interruptible(&PIPE_WAIT(*inode));
}
......@@ -273,8 +275,10 @@ static int pipe_write_open(struct inode * inode, struct file * filp)
static int pipe_rdwr_open(struct inode * inode, struct file * filp)
{
PIPE_READERS(*inode)++;
PIPE_WRITERS(*inode)++;
if (filp->f_mode & FMODE_READ)
PIPE_READERS(*inode)++;
if (filp->f_mode & FMODE_WRITE)
PIPE_WRITERS(*inode)++;
return 0;
}
......
......@@ -45,7 +45,7 @@ static struct file_operations sysv_file_operations = {
NULL, /* readdir - bad */
NULL, /* select - default */
NULL, /* ioctl - default */
generic_mmap, /* mmap */
generic_file_mmap, /* mmap */
NULL, /* no special open is needed */
NULL, /* release */
sysv_sync_file /* fsync */
......
......@@ -68,7 +68,7 @@ struct file_operations umsdos_file_operations = {
NULL, /* readdir - bad */
NULL, /* select - default */
NULL, /* ioctl - default */
generic_mmap, /* mmap */
generic_file_mmap, /* mmap */
NULL, /* no special open is needed */
NULL, /* release */
file_fsync /* fsync */
......
......@@ -42,7 +42,7 @@ static struct file_operations xiafs_file_operations = {
NULL, /* readdir - bad */
NULL, /* select - default */
NULL, /* ioctl - default */
generic_mmap, /* mmap */
generic_file_mmap, /* mmap */
NULL, /* no special open is needed */
NULL, /* release */
xiafs_sync_file /* fsync */
......
......@@ -94,8 +94,10 @@ extern void _outw (unsigned short w,unsigned long port);
extern void _outl (unsigned int l,unsigned long port);
extern unsigned long _readb(unsigned long addr);
extern unsigned long _readw(unsigned long addr);
extern unsigned long _readl(unsigned long addr);
extern void _writeb(unsigned char b, unsigned long addr);
extern void _writew(unsigned short b, unsigned long addr);
extern void _writel(unsigned int b, unsigned long addr);
/*
* The platform header files may define some of these macros to use
......
......@@ -268,6 +268,8 @@ struct termios {
#ifdef __KERNEL__
#include <linux/string.h>
/*
* Translate a "termio" structure into a "termios". Ugh.
*/
......
......@@ -155,7 +155,6 @@
#define __NR_getsid 147
#define __NR_fdatasync 148
#define __NR__sysctl 149
#define __NR_mlock 150
#define __NR_munlock 151
#define __NR_mlockall 152
......@@ -168,6 +167,7 @@
#define __NR_sched_get_priority_max 159
#define __NR_sched_get_priority_min 160
#define __NR_sched_rr_get_interval 161
#define __NR_nanosleep 162
/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
#define _syscall0(type,name) \
......
......@@ -597,7 +597,7 @@ extern struct buffer_head * breada(kdev_t dev,int block, int size,
extern int generic_readpage(struct inode *, struct page *);
extern int generic_file_read(struct inode *, struct file *, char *, int);
extern int generic_mmap(struct inode *, struct file *, struct vm_area_struct *);
extern int generic_file_mmap(struct inode *, struct file *, struct vm_area_struct *);
extern int brw_page(int, unsigned long, kdev_t, int [], int, int);
extern void put_super(kdev_t dev);
......
......@@ -43,6 +43,8 @@
#define NFS_SERVER(inode) (&(inode)->i_sb->u.nfs_sb.s_server)
#define NFS_FH(inode) (&(inode)->u.nfs_i.fhandle)
#define NFS_READTIME(inode) ((inode)->u.nfs_i.read_cache_jiffies)
#define NFS_OLDMTIME(inode) ((inode)->u.nfs_i.read_cache_mtime)
#ifdef __KERNEL__
......@@ -60,10 +62,9 @@ extern int nfs_proc_readlink(struct nfs_server *server, struct nfs_fh *fhandle,
unsigned int maxlen);
extern int nfs_proc_read(struct nfs_server *server, struct nfs_fh *fhandle,
int offset, int count, char *data,
struct nfs_fattr *fattr, int fs);
extern int nfs_proc_write(struct nfs_server *server, struct nfs_fh *fhandle,
int offset, int count, const char *data,
struct nfs_fattr *fattr);
struct nfs_fattr *fattr);
extern int nfs_proc_write(struct inode * inode, int offset,
int count, const char *data, struct nfs_fattr *fattr);
extern int nfs_proc_create(struct nfs_server *server, struct nfs_fh *dir,
const char *name, struct nfs_sattr *sattr,
struct nfs_fh *fhandle, struct nfs_fattr *fattr);
......
......@@ -8,6 +8,18 @@
*/
struct nfs_inode_info {
struct nfs_fh fhandle;
/*
* read_cache_jiffies is when we started read-caching this inode,
* and read_cache_mtime is the mtime of the inode at that time.
*
* We need to invalidate the cache for this inode if
*
* jiffies - read_cache_jiffies > 30*HZ
* AND
* mtime != read_cache_mtime
*/
unsigned long read_cache_jiffies;
unsigned long read_cache_mtime;
};
#endif
......@@ -277,7 +277,7 @@ struct sock
struct proto
{
void (*close)(struct sock *sk, int timeout);
void (*close)(struct sock *sk, unsigned long timeout);
int (*build_header)(struct sk_buff *skb,
__u32 saddr,
__u32 daddr,
......
......@@ -54,14 +54,14 @@
* close the socket, about 60 seconds */
#define TCP_FIN_TIMEOUT (3*60*HZ) /* BSD style FIN_WAIT2 deadlock breaker */
#define TCP_ACK_TIME (3*HZ) /* time to delay before sending an ACK */
#define TCP_DONE_TIME 250 /* maximum time to wait before actually
#define TCP_DONE_TIME (5*HZ/2)/* maximum time to wait before actually
* destroying a socket */
#define TCP_WRITE_TIME 3000 /* initial time to wait for an ACK,
#define TCP_WRITE_TIME (30*HZ) /* initial time to wait for an ACK,
* after last transmit */
#define TCP_TIMEOUT_INIT (3*HZ) /* RFC 1122 initial timeout value */
#define TCP_SYN_RETRIES 10 /* number of times to retry opening a
* connection (TCP_RETR2-....) */
#define TCP_PROBEWAIT_LEN 100 /* time to wait between probes when
#define TCP_PROBEWAIT_LEN (1*HZ)/* time to wait between probes when
* I've got something to write and
* there is no window */
......@@ -317,6 +317,8 @@ static __inline__ void tcp_set_state(struct sock *sk, int state)
case TCP_CLOSE:
tcp_cache_zap();
/* Should be about 2 rtt's */
reset_timer(sk, TIME_DONE, min(sk->rtt * 2, TCP_DONE_TIME));
/* fall through */
default:
if (oldstate==TCP_ESTABLISHED)
......
......@@ -184,7 +184,6 @@ struct symbol_table symbol_table = {
X(permission),
X(inode_setattr),
X(inode_change_ok),
X(generic_mmap),
X(set_blocksize),
X(getblk),
X(bread),
......@@ -199,6 +198,7 @@ struct symbol_table symbol_table = {
X(dcache_add),
X(add_blkdev_randomness),
X(generic_file_read),
X(generic_file_mmap),
X(generic_readpage),
/* device registration */
......
......@@ -337,11 +337,11 @@ asmlinkage void schedule(void)
#endif
#ifdef __SMP_PROF__
/* mark processor running an idle thread */
if (0==next->pid)
set_bit(this_cpu,&smp_idle_map);
else
clear_bit(this_cpu,&smp_idle_map);
/* mark processor running an idle thread */
if (0==next->pid)
set_bit(this_cpu,&smp_idle_map);
else
clear_bit(this_cpu,&smp_idle_map);
#endif
if (current != next) {
struct timer_list timer;
......@@ -763,7 +763,7 @@ void do_timer(struct pt_regs * regs)
struct timer_struct *tp;
long ltemp, psecs;
#ifdef __SMP_PROF__
int cpu,i;
int cpu,i;
#endif
/* Advance the phase, once it gets to one microsecond, then
......@@ -814,10 +814,10 @@ void do_timer(struct pt_regs * regs)
jiffies++;
calc_load();
#ifdef __SMP_PROF__
smp_idle_count[NR_CPUS]++; /* count timer ticks */
cpu = smp_processor_id();
for (i=0;i<(0==smp_num_cpus?1:smp_num_cpus);i++)
if (test_bit(i,&smp_idle_map)) smp_idle_count[i]++;
smp_idle_count[NR_CPUS]++; /* count timer ticks */
cpu = smp_processor_id();
for (i=0;i<(0==smp_num_cpus?1:smp_num_cpus);i++)
if (test_bit(i,&smp_idle_map)) smp_idle_count[i]++;
#endif
if (user_mode(regs)) {
current->utime++;
......@@ -859,7 +859,7 @@ void do_timer(struct pt_regs * regs)
send_sig(SIGXCPU, current, 1);
/* and every five seconds thereafter. */
else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
send_sig(SIGXCPU, current, 1);
}
......@@ -1017,7 +1017,7 @@ static int setscheduler(pid_t pid, int policy,
error = verify_area(VERIFY_READ, param, sizeof(struct sched_param));
if (error)
return error;
memcpy_fromfs(&lp, param, sizeof(struct sched_param));
memcpy_fromfs(&lp, param, sizeof(struct sched_param));
p = find_process_by_pid(pid);
if (!p)
......@@ -1031,7 +1031,7 @@ static int setscheduler(pid_t pid, int policy,
/*
* Valid priorities for SCHED_FIFO and SCHED_RR are 1..99, valid
* priority for SCHED_OTHER is 0.
* priority for SCHED_OTHER is 0.
*/
if (lp.sched_priority < 0 || lp.sched_priority > 99)
return -EINVAL;
......@@ -1046,6 +1046,8 @@ static int setscheduler(pid_t pid, int policy,
p->policy = policy;
p->rt_priority = lp.sched_priority;
if (p->next_run)
move_last_runqueue(p);
schedule();
return 0;
......@@ -1101,8 +1103,9 @@ asmlinkage int sys_sched_getparam(pid_t pid, struct sched_param *param)
asmlinkage int sys_sched_yield(void)
{
/* ... not yet implemented ... */
return -ENOSYS;
move_last_runqueue(current);
return 0;
}
asmlinkage int sys_sched_get_priority_max(int policy)
......@@ -1148,6 +1151,76 @@ asmlinkage int sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
return 0;
}
/*
* change timeval to jiffies, trying to avoid the
* most obvious overflows..
*/
static unsigned long timespectojiffies(struct timespec *value)
{
unsigned long sec = (unsigned) value->tv_sec;
long nsec = value->tv_nsec;
if (sec > (LONG_MAX / HZ))
return LONG_MAX;
nsec += 1000000000L / HZ - 1;
nsec /= 1000000000L / HZ;
return HZ * sec + nsec;
}
static void jiffiestotimespec(unsigned long jiffies, struct timespec *value)
{
value->tv_nsec = (jiffies % HZ) * (1000000000L / HZ);
value->tv_sec = jiffies / HZ;
return;
}
asmlinkage int sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
{
int error;
struct timespec t;
unsigned long expire;
error = verify_area(VERIFY_READ, rqtp, sizeof(struct timespec));
if (error)
return error;
memcpy_fromfs(&t, rqtp, sizeof(struct timespec));
if (rmtp) {
error = verify_area(VERIFY_WRITE, rmtp,
sizeof(struct timespec));
if (error)
return error;
}
if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0)
return -EINVAL;
if (t.tv_sec == 0 && t.tv_nsec <= 2000000L &&
current->policy != SCHED_OTHER) {
/*
* Short delay requests up to 2 ms will be handled with
* high precision by a busy wait for all real-time processes.
*/
udelay((t.tv_nsec + 999) / 1000);
return 0;
}
expire = timespectojiffies(&t) + (t.tv_sec || t.tv_nsec) + jiffies;
current->timeout = expire;
current->state = TASK_INTERRUPTIBLE;
schedule();
if (expire > jiffies) {
if (rmtp) {
jiffiestotimespec(expire - jiffies -
(expire > jiffies + 1), &t);
memcpy_tofs(rmtp, &t, sizeof(struct timespec));
}
return -EINTR;
}
return 0;
}
static void show_task(int nr,struct task_struct * p)
{
unsigned long free;
......
......@@ -165,17 +165,31 @@ unsigned long page_unuse(unsigned long page)
*/
void update_vm_cache(struct inode * inode, unsigned long pos, const char * buf, int count)
{
struct page * page;
page = find_page(inode, pos & PAGE_MASK);
if (page) {
unsigned long addr;
unsigned long offset, len;
wait_on_page(page);
addr = page_address(page);
memcpy((void *) ((pos & ~PAGE_MASK) + addr), buf, count);
free_page(addr);
}
offset = (pos & ~PAGE_MASK);
pos = pos & PAGE_MASK;
len = PAGE_SIZE - offset;
do {
struct page * page;
if (len > count)
len = count;
page = find_page(inode, pos);
if (page) {
unsigned long addr;
wait_on_page(page);
addr = page_address(page);
memcpy((void *) (offset + addr), buf, len);
free_page(addr);
}
count -= len;
buf += len;
len = PAGE_SIZE;
offset = 0;
pos += PAGE_SIZE;
} while (count);
}
/*
......@@ -695,7 +709,7 @@ static struct vm_operations_struct file_private_mmap = {
};
/* This is used for a general mmap of a disk file */
int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
int generic_file_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
{
struct vm_operations_struct * ops;
......
......@@ -174,18 +174,21 @@ unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
kfree(vma);
return error;
}
flags = vma->vm_flags;
insert_vm_struct(current, vma);
merge_segments(current, vma->vm_start, vma->vm_end);
/* merge_segments might have merged our vma, so we can't use it any more */
current->mm->total_vm += len >> PAGE_SHIFT;
if (vma->vm_flags & VM_LOCKED) {
unsigned long start = vma->vm_start;
unsigned long end = vma->vm_end;
if (flags & VM_LOCKED) {
unsigned long start = addr;
current->mm->locked_vm += len >> PAGE_SHIFT;
while (start < end) {
do {
char c = get_user((char *) start);
__asm__ __volatile__("": :"r" (c));
len -= PAGE_SIZE;
start += PAGE_SIZE;
}
__asm__ __volatile__("": :"r" (c));
} while (len > 0);
}
return addr;
}
......
......@@ -570,10 +570,5 @@ void __release_sock(struct sock *sk)
sk->users = 0;
barrier();
}
if (sk->dead && sk->state == TCP_CLOSE) {
/* Should be about 2 rtt's */
reset_timer(sk, TIME_DONE, min(sk->rtt * 2, TCP_DONE_TIME));
}
#endif
}
......@@ -742,22 +742,6 @@ static int inet_dup(struct socket *newsock, struct socket *oldsock)
return(inet_create(newsock,((struct sock *)(oldsock->data))->protocol));
}
/*
* Return 1 if we still have things to send in our buffers.
*/
static inline int closing(struct sock * sk)
{
switch (sk->state) {
case TCP_FIN_WAIT1:
case TCP_CLOSING:
case TCP_LAST_ACK:
return 1;
}
return 0;
}
/*
* The peer socket should always be NULL (or else). When we call this
* function we are destroying the object and from then on nobody
......@@ -766,7 +750,9 @@ static inline int closing(struct sock * sk)
static int inet_release(struct socket *sock, struct socket *peer)
{
unsigned long timeout;
struct sock *sk = (struct sock *) sock->data;
if (sk == NULL)
return(0);
......@@ -786,48 +772,19 @@ static int inet_release(struct socket *sock, struct socket *peer)
* If the close is due to the process exiting, we never
* linger..
*/
if (sk->linger == 0 || (current->flags & PF_EXITING))
{
sk->prot->close(sk,0);
sk->dead = 1;
}
else
{
sk->prot->close(sk, 0);
cli();
if (sk->lingertime)
current->timeout = jiffies + HZ*sk->lingertime;
while(closing(sk) && current->timeout>0)
{
interruptible_sleep_on(sk->sleep);
if (current->signal & ~current->blocked)
{
break;
#if 0
/* not working now - closes can't be restarted */
sti();
current->timeout=0;
return(-ERESTARTSYS);
#endif
}
}
current->timeout=0;
sti();
sk->dead = 1;
timeout = 0;
if (sk->linger) {
timeout = ~0UL;
if (!sk->lingertime)
timeout = jiffies + HZ*sk->lingertime;
}
lock_sock(sk);
if (current->flags & PF_EXITING)
timeout = 0;
/* This will destroy it. */
sock->data = NULL;
/*
* Nasty here. release_sock can cause more frames
* to be played through the socket. That can
* reinitialise the tcp cache after tcp_close();
*/
release_sock(sk);
tcp_cache_zap(); /* Kill the cache again. */
sk->socket = NULL;
sk->prot->close(sk, timeout);
return(0);
}
......
......@@ -905,7 +905,7 @@ void ip_fw_masquerade(struct sk_buff **skb_ptr, struct device *dev)
}
else ms->timer.expires = jiffies+MASQUERADE_EXPIRE_TCP;
skb->csum = csum_partial(th + 1, size - sizeof(*th), 0);
skb->csum = csum_partial((void *)(th + 1), size - sizeof(*th), 0);
tcp_send_check(th,iph->saddr,iph->daddr,size,skb);
}
add_timer(&ms->timer);
......@@ -1011,7 +1011,7 @@ int ip_fw_demasquerade(struct sk_buff *skb)
#endif
}
}
skb->csum = csum_partial(portptr + sizeof(struct tcphdr),
skb->csum = csum_partial((void *)(((struct tcphdr *)portptr) + 1),
size - sizeof(struct tcphdr), 0);
tcp_send_check((struct tcphdr *)portptr,iph->saddr,iph->daddr,size,skb);
}
......
......@@ -204,7 +204,7 @@ static int packet_sendmsg(struct sock *sk, struct msghdr *msg, int len,
* file side of the object.
*/
static void packet_close(struct sock *sk, int timeout)
static void packet_close(struct sock *sk, unsigned long timeout)
{
/*
* Stop more data and kill the socket off.
......@@ -236,6 +236,7 @@ static void packet_close(struct sock *sk, int timeout)
}
release_sock(sk);
destroy_sock(sk);
}
/*
......
......@@ -285,7 +285,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, int len, int noblock
}
}
static void raw_close(struct sock *sk, int timeout)
static void raw_close(struct sock *sk, unsigned long timeout)
{
sk->state = TCP_CLOSE;
#ifdef CONFIG_IP_MROUTE
......@@ -295,6 +295,7 @@ static void raw_close(struct sock *sk, int timeout)
mroute_socket=NULL;
}
#endif
destroy_sock(sk);
}
......
......@@ -422,7 +422,7 @@
unsigned long seq_offset;
struct tcp_mib tcp_statistics;
static void tcp_close(struct sock *sk, int timeout);
static void tcp_close(struct sock *sk, unsigned long timeout);
/*
* The less said about this the better, but it works and will do for 1.2 (and 1.4 ;))
......@@ -1782,8 +1782,27 @@ void tcp_shutdown(struct sock *sk, int how)
release_sock(sk);
}
static void tcp_close(struct sock *sk, int timeout)
/*
* Return 1 if we still have things to send in our buffers.
*/
static inline int closing(struct sock * sk)
{
switch (sk->state) {
case TCP_FIN_WAIT1:
case TCP_CLOSING:
case TCP_LAST_ACK:
return 1;
}
return 0;
}
static void tcp_close(struct sock *sk, unsigned long timeout)
{
struct sk_buff *skb;
/*
* We need to grab some memory, and put together a FIN,
* and then put it into the queue to be sent.
......@@ -1807,43 +1826,55 @@ static void tcp_close(struct sock *sk, int timeout)
if (!sk->dead)
sk->state_change(sk);
if (timeout == 0)
{
struct sk_buff *skb;
/*
* We need to flush the recv. buffs. We do this only on the
* descriptor close, not protocol-sourced closes, because the
* reader process may not have drained the data yet!
*/
/*
* We need to flush the recv. buffs. We do this only on the
* descriptor close, not protocol-sourced closes, because the
* reader process may not have drained the data yet!
*/
while((skb=skb_dequeue(&sk->receive_queue))!=NULL)
kfree_skb(skb, FREE_READ);
/*
* Get rid off any half-completed packets.
*/
while((skb=skb_dequeue(&sk->receive_queue))!=NULL)
kfree_skb(skb, FREE_READ);
if (sk->partial)
tcp_send_partial(sk);
}
/*
* Get rid off any half-completed packets.
*/
if (sk->partial)
tcp_send_partial(sk);
/*
* Timeout is not the same thing - however the code likes
* to send both the same way (sigh).
*/
if(timeout)
if (tcp_close_state(sk,1)==1)
{
tcp_set_state(sk, TCP_CLOSE); /* Dead */
tcp_send_fin(sk);
}
else
{
if(tcp_close_state(sk,1)==1)
if (timeout) {
cli();
release_sock(sk);
current->timeout = timeout;
while(closing(sk) && current->timeout)
{
tcp_send_fin(sk);
interruptible_sleep_on(sk->sleep);
if (current->signal & ~current->blocked)
{
break;
}
}
current->timeout=0;
lock_sock(sk);
sti();
}
/*
* This will destroy it. The timers will take care of actually
* free'ing up the memory.
*/
sk->dead = 1;
tcp_cache_zap(); /* Kill the cache again. */
release_sock(sk);
}
......
......@@ -1929,15 +1929,6 @@ int tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
if(tcp_data(skb,sk, saddr, len))
kfree_skb(skb, FREE_READ);
/*
* Finally, if we've moved to TCP_CLOSE, check if we should
* get rid of the socket
*/
if (sk->dead && sk->state == TCP_CLOSE) {
/* Should be about 2 rtt's */
reset_timer(sk, TIME_DONE, min(sk->rtt * 2, TCP_DONE_TIME));
}
/*
* And done
*/
......
......@@ -114,9 +114,15 @@ void net_timer (unsigned long data)
switch (why)
{
case TIME_DONE:
if (! sk->dead || sk->state != TCP_CLOSE)
/* If the socket hasn't been closed off, re-try a bit later */
if (!sk->dead) {
reset_timer(sk, TIME_DONE, TCP_DONE_TIME);
break;
}
if (sk->state != TCP_CLOSE)
{
printk ("non dead socket in time_done\n");
printk ("non CLOSE socket in time_done\n");
break;
}
destroy_sock (sk);
......@@ -127,6 +133,7 @@ void net_timer (unsigned long data)
* We've waited for a while for all the memory associated with
* the socket to be freed.
*/
if(sk->wmem_alloc!=0 || sk->rmem_alloc!=0)
{
sk->wmem_alloc++; /* So it DOESN'T go away */
......
......@@ -573,15 +573,14 @@ int udp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len)
}
static void udp_close(struct sock *sk, int timeout)
static void udp_close(struct sock *sk, unsigned long timeout)
{
lock_sock(sk);
sk->state = TCP_CLOSE;
if(uh_cache_sk==sk)
udp_cache_zap();
release_sock(sk);
if (sk->dead)
destroy_sock(sk);
destroy_sock(sk);
}
......
......@@ -305,7 +305,8 @@ main(int argc, char** argv)
cout << endl;
}
cout << endl;
} else if (strequ(buffer, "ode:") || strequ(buffer, "Code:")) {
}
if (strequ(buffer, "ode:") || strequ(buffer, "Code:")) {
// The 'C' might have been consumed as a hex number
unsigned char code[code_size];
unsigned char* cp = code;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment