Commit 4ff1c5b1 authored by Linus Torvalds's avatar Linus Torvalds

Import 1.3.83

parent 2ab298ef
......@@ -867,9 +867,8 @@ S: 55127 Mainz
S: Germany
N: David C. Niemi
E: niemidc@clark.net
D: FSSTND, The XFree86 Project
D: DMA memory support, floppy driver
E: niemi@erols.com
D: Mtools/VFAT/floppy work, benchmarking, random kernel dilettante
S: 2364 Old Trail Drive
S: Reston, Virginia 22091
S: USA
......
......@@ -937,13 +937,14 @@ CONFIG_IPX
turn your Linux box into a fully featured Netware file server and
IPX router, say Y here and fetch either lwared from
sunsite.unc.edu:/pub/Linux/system/Network/daemons/ or mars_nwe from
linux01.gwdg.de:/pub/ncpfs. For more information, read the IPX-HOWTO
in sunsite.unc.edu:/pub/Linux/docs/howto. The IPX driver would
enlarge your kernel by about 5 kB. This driver is also available as
a module ( = code which can be inserted in and removed from the
running kernel whenever you want). If you want to compile it as a
module, say M here and read Documentation/modules.txt. Unless you
want to integrate your Linux box with a local Novell network, say N.
ftp.gwdg.de:/pub/linux/misc/ncpfs. For more information, read the
IPX-HOWTO in sunsite.unc.edu:/pub/Linux/docs/howto. The IPX driver
would enlarge your kernel by about 5 kB. This driver is also
available as a module ( = code which can be inserted in and removed
from the running kernel whenever you want). If you want to compile
it as a module, say M here and read Documentation/modules.txt.
Unless you want to integrate your Linux box with a local Novell
network, say N.
Full internal IPX network
CONFIG_IPX_INTERN
......@@ -958,7 +959,7 @@ CONFIG_IPX_INTERN
packets targeted at 'special' sockets to sockets listening on the
primary network is disabled. This might break existing applications,
especially RIP/SAP daemons. A RIP/SAP daemon that works well with the
full internal net can be found on linux01.gwdg.de:/pub/ncpfs.
full internal net can be found on ftp.gwdg.de:/pub/linux/misc/ncpfs.
If you don't know what you are doing, say N.
Appletalk DDP
......
ncpfs is a filesystem which understands the NCP protocol, designed by the
Novell Corporation for their NetWare(tm) product. NCP is functionally
similar to the NFS used in the tcp/ip community.
To mount a Netware-Filesystem, you need a special mount program, which can
be found in ncpfs package. Homesite for ncpfs is linux01.gwdg.de/pub/ncpfs,
but sunsite and its many mirrors will have it as well.
To mount a Netware-Filesystem, you need a special mount program, which
can be found in ncpfs package. Homesite for ncpfs is
ftp.gwdg.de/pub/linux/misc/ncpfs, but sunsite and its many mirrors
will have it as well.
Related products are linware and mars_nwe, which will give Linux partial
Netware Server functionality.
NetWare Server functionality.
Linware's home site is: klokan.sh.cvut.cz/pub/linux/linware,
Mars_nwe can be found on linux01.gwdg.de/pub/ncpfs.
Mars_nwe can be found on ftp.gwdg.de/pub/linux/misc/ncpfs.
......@@ -8,5 +8,6 @@ NetBIOS over TCP/IP. There you also find explanation for conceps like
netbios name or share.
To use smbfs, you need a special mount program, which can be found in
the ksmbfs package, found on sunsite.unc.edu:/pub/Linux/ALPHA/smbfs.
the ksmbfs package, found on
sunsite.unc.edu:/pub/Linux/system/Filesystems/smbfs.
VERSION = 1
PATCHLEVEL = 3
SUBLEVEL = 82
SUBLEVEL = 83
ARCH = i386
......
......@@ -149,7 +149,7 @@ void pal_init(void)
printk("Ok (rev %lx)\n", rev);
/* remove the old virtual page-table mapping */
L1[1] = 0;
invalidate_all();
flush_tlb_all();
}
extern int _end;
......
......@@ -257,7 +257,7 @@ static void put_long(struct task_struct * tsk, struct vm_area_struct * vma,
/* we're bypassing pagetables, so we have to set the dirty bit ourselves */
/* this should also re-instate whatever read-only mode there was before */
set_pte(pgtable, pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
invalidate();
flush_tlb();
}
static struct vm_area_struct * find_extend_vma(struct task_struct * tsk,
......
......@@ -135,7 +135,7 @@ unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
init_task.kernel_stack_page = INIT_STACK;
load_PCB(&init_task.tss);
invalidate_all();
flush_tlb_all();
return start_mem;
}
......
......@@ -141,6 +141,7 @@ CONFIG_ISO9660_FS=y
# Character devices
#
CONFIG_SERIAL=y
# CONFIG_DIGI is not set
# CONFIG_CYCLADES is not set
# CONFIG_STALDRV is not set
# CONFIG_PRINTER is not set
......
......@@ -370,7 +370,7 @@ asmlinkage void do_fast_IRQ(int irq)
{
struct irqaction * action = *(irq + irq_action);
#ifdef __SMP__
/* IRQ 13 is allowed - thats an invalidate */
/* IRQ 13 is allowed - thats a flush tlb */
if(smp_threads_ready && active_kernel_processor!=smp_processor_id() && irq!=13)
panic("fast_IRQ %d: active processor set wrongly(%d not %d).\n", irq, active_kernel_processor, smp_processor_id());
#endif
......
......@@ -160,7 +160,7 @@ int cpu_idle(void *unused)
* Oops.. This is kind of important in some cases...
*/
if(clear_bit(smp_processor_id(), &smp_invalidate_needed))
local_invalidate();
local_flush_tlb();
}
if (0==(0x7fffffff & smp_process_available)){
clear_bit(31,&smp_process_available);
......
......@@ -179,7 +179,7 @@ static void put_long(struct task_struct * tsk, struct vm_area_struct * vma, unsi
/* we're bypassing pagetables, so we have to set the dirty bit ourselves */
/* this should also re-instate whatever read-only mode there was before */
set_pte(pgtable, pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
invalidate();
flush_tlb();
}
static struct vm_area_struct * find_extend_vma(struct task_struct * tsk, unsigned long addr)
......
......@@ -346,7 +346,7 @@ int smp_scan_config(unsigned long base, unsigned long length)
cfg=pg0[0];
pg0[0] = (apic_addr | 7);
local_invalidate();
local_flush_tlb();
boot_cpu_id = GET_APIC_ID(*((volatile unsigned long *) APIC_ID));
......@@ -355,7 +355,7 @@ int smp_scan_config(unsigned long base, unsigned long length)
*/
pg0[0]= cfg;
local_invalidate();
local_flush_tlb();
/*
*
......@@ -563,11 +563,11 @@ void smp_callin(void)
load_ldt(0);
/* printk("Testing faulting...\n");
*(long *)0=1; OOPS... */
local_invalidate();
local_flush_tlb();
while(!smp_commenced);
if (cpu_number_map[cpuid] == -1)
while(1);
local_invalidate();
local_flush_tlb();
SMP_PRINTK(("Commenced..\n"));
load_TR(cpu_number_map[cpuid]);
......@@ -716,7 +716,7 @@ void smp_boot_cpus(void)
CMOS_WRITE(0xa, 0xf);
pg0[0]=7;
local_invalidate();
local_flush_tlb();
*((volatile unsigned short *) 0x469) = ((unsigned long)stack)>>4;
*((volatile unsigned short *) 0x467) = 0;
......@@ -725,7 +725,7 @@ void smp_boot_cpus(void)
*/
pg0[0]= cfg;
local_invalidate();
local_flush_tlb();
/*
* Be paranoid about clearing APIC errors.
......@@ -874,7 +874,7 @@ void smp_boot_cpus(void)
cfg = pg0[0];
pg0[0] = 3; /* writeable, present, addr 0 */
local_invalidate();
local_flush_tlb();
/*
* Paranoid: Set warm reset code and vector here back
......@@ -890,7 +890,7 @@ void smp_boot_cpus(void)
*/
pg0[0] = cfg;
local_invalidate();
local_flush_tlb();
/*
* Allow the user to impress friends.
......@@ -927,7 +927,7 @@ void smp_boot_cpus(void)
* get stuck with irq's off waiting to send a message and thus not replying to the person
* spinning for a reply....
*
* In the end invalidate ought to be the NMI and a very very short function (to avoid the old
* In the end flush tlb ought to be the NMI and a very very short function (to avoid the old
* IDE disk problems), and other messages sent with IRQ's enabled in a civilised fashion. That
* will also boost performance.
*/
......@@ -966,7 +966,7 @@ void smp_message_pass(int target, int msg, unsigned long data, int wait)
* Sanity check we don't re-enter this across CPU's. Only the kernel
* lock holder may send messages. For a STOP_CPU we are bringing the
* entire box to the fastest halt we can.. A reschedule carries
* no data and can occur during an invalidate.. guess what panic
* no data and can occur during a flush.. guess what panic
* I got to notice this bug...
*/
......@@ -1081,22 +1081,22 @@ void smp_message_pass(int target, int msg, unsigned long data, int wait)
}
/*
* This is fraught with deadlocks. Linus does an invalidate at a whim
* even with IRQ's off. We have to avoid a pair of crossing invalidates
* This is fraught with deadlocks. Linus does a flush tlb at a whim
* even with IRQ's off. We have to avoid a pair of crossing flushes
* or we are doomed. See the notes about smp_message_pass.
*/
void smp_invalidate(void)
void smp_flush_tlb(void)
{
unsigned long flags;
if(smp_activated && smp_processor_id()!=active_kernel_processor)
panic("CPU #%d:Attempted invalidate IPI when not AKP(=%d)\n",smp_processor_id(),active_kernel_processor);
panic("CPU #%d:Attempted flush tlb IPI when not AKP(=%d)\n",smp_processor_id(),active_kernel_processor);
/* printk("SMI-");*/
/*
* The assignment is safe because its volatile so the compiler cannot reorder it,
* because the i586 has strict memory ordering and because only the kernel lock holder
* may issue an invalidate. If you break any one of those three change this to an atomic
* may issue a tlb flush. If you break any one of those three change this to an atomic
* bus locked or.
*/
......@@ -1104,7 +1104,7 @@ void smp_invalidate(void)
/*
* Processors spinning on the lock will see this IRQ late. The smp_invalidate_needed map will
* ensure they dont do a spurious invalidate or miss one.
* ensure they dont do a spurious flush tlb or miss one.
*/
save_flags(flags);
......@@ -1115,7 +1115,7 @@ void smp_invalidate(void)
* Flush the local TLB
*/
local_invalidate();
local_flush_tlb();
restore_flags(flags);
......@@ -1174,7 +1174,7 @@ void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs)
case MSG_INVALIDATE_TLB:
if(clear_bit(i,(unsigned long *)&smp_invalidate_needed))
local_invalidate();
local_flush_tlb();
set_bit(i, (unsigned long *)&cpu_callin_map[0]);
/* cpu_callin_map[0]|=1<<smp_processor_id();*/
break;
......
......@@ -193,7 +193,7 @@ asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
{
#ifdef CONFIG_SMP_NMI_INVAL
smp_invalidate_rcv();
smp_flush_tlb_rcv();
#else
#ifndef CONFIG_IGNORE_NMI
printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
......
......@@ -96,7 +96,7 @@ static void mark_screen_rdonly(struct task_struct * tsk)
set_pte(pte, pte_wrprotect(*pte));
pte++;
}
invalidate();
flush_tlb();
}
asmlinkage int sys_vm86(struct vm86_struct * v86)
......
......@@ -116,7 +116,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (wp_works_ok < 0 && address == TASK_SIZE && (error_code & 1)) {
wp_works_ok = 1;
pg0[0] = pte_val(mk_pte(0, PAGE_SHARED));
invalidate();
flush_tlb();
printk("This processor honours the WP bit even when in supervisor mode. Good.\n");
return;
}
......
......@@ -193,7 +193,7 @@ unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
address += PAGE_SIZE;
}
}
invalidate();
flush_tlb();
return free_area_init(start_mem, end_mem);
}
......@@ -267,10 +267,10 @@ void mem_init(unsigned long start_mem, unsigned long end_mem)
/* test if the WP bit is honoured in supervisor mode */
if (wp_works_ok < 0) {
pg0[0] = pte_val(mk_pte(0, PAGE_READONLY));
invalidate();
flush_tlb();
__asm__ __volatile__("movb 0,%%al ; movb %%al,0": : :"ax", "memory");
pg0[0] = 0;
invalidate();
flush_tlb();
if (wp_works_ok < 0)
wp_works_ok = 0;
}
......
......@@ -5,6 +5,7 @@ mainmenu_option next_comment
comment 'Character devices'
bool 'Standard/generic serial support' CONFIG_SERIAL
bool 'Digiboard PC/X Support' CONFIG_DIGI
tristate 'Cyclades async mux support' CONFIG_CYCLADES
bool 'Stallion multiport serial support' CONFIG_STALDRV
if [ "$CONFIG_STALDRV" = "y" ]; then
......
......@@ -23,11 +23,15 @@ M_OBJS :=
L_OBJS := tty_io.o n_tty.o console.o keyboard.o \
tty_ioctl.o pty.o vt.o mem.o vc_screen.o random.o \
defkeymap.o consolemap.o selection.o
ifeq ($(CONFIG_SERIAL),y)
L_OBJS += serial.o
endif
ifeq ($(CONFIG_DIGI),y)
L_OBJS += pcxx.o
endif
ifeq ($(CONFIG_CYCLADES),y)
L_OBJS += cyclades.o
else
......
/* Definitions for DigiBoard ditty(1) command. */
#if !defined(TIOCMODG)
#define TIOCMODG ('d'<<8) | 250 /* get modem ctrl state */
#define TIOCMODS ('d'<<8) | 251 /* set modem ctrl state */
#endif
#if !defined(TIOCMSET)
#define TIOCMSET ('d'<<8) | 252 /* set modem ctrl state */
#define TIOCMGET ('d'<<8) | 253 /* set modem ctrl state */
#endif
#if !defined(TIOCMBIC)
#define TIOCMBIC ('d'<<8) | 254 /* set modem ctrl state */
#define TIOCMBIS ('d'<<8) | 255 /* set modem ctrl state */
#endif
#if !defined(TIOCSDTR)
#define TIOCSDTR ('e'<<8) | 0 /* set DTR */
#define TIOCCDTR ('e'<<8) | 1 /* clear DTR */
#endif
/************************************************************************
* Ioctl command arguments for DIGI parameters.
************************************************************************/
#define DIGI_GETA ('e'<<8) | 94 /* Read params */
#define DIGI_SETA ('e'<<8) | 95 /* Set params */
#define DIGI_SETAW ('e'<<8) | 96 /* Drain & set params */
#define DIGI_SETAF ('e'<<8) | 97 /* Drain, flush & set params */
#define DIGI_GETFLOW ('e'<<8) | 99 /* Get startc/stopc flow */
/* control characters */
#define DIGI_SETFLOW ('e'<<8) | 100 /* Set startc/stopc flow */
/* control characters */
#define DIGI_GETAFLOW ('e'<<8) | 101 /* Get Aux. startc/stopc */
/* flow control chars */
#define DIGI_SETAFLOW ('e'<<8) | 102 /* Set Aux. startc/stopc */
/* flow control chars */
struct digiflow_struct {
unsigned char startc; /* flow cntl start char */
unsigned char stopc; /* flow cntl stop char */
};
typedef struct digiflow_struct digiflow_t;
/************************************************************************
* Values for digi_flags
************************************************************************/
#define DIGI_IXON 0x0001 /* Handle IXON in the FEP */
#define DIGI_FAST 0x0002 /* Fast baud rates */
#define RTSPACE 0x0004 /* RTS input flow control */
#define CTSPACE 0x0008 /* CTS output flow control */
#define DSRPACE 0x0010 /* DSR output flow control */
#define DCDPACE 0x0020 /* DCD output flow control */
#define DTRPACE 0x0040 /* DTR input flow control */
#define DIGI_FORCEDCD 0x0100 /* Force carrier */
#define DIGI_ALTPIN 0x0200 /* Alternate RJ-45 pin config */
#define DIGI_AIXON 0x0400 /* Aux flow control in fep */
/************************************************************************
* Structure used with ioctl commands for DIGI parameters.
************************************************************************/
struct digi_struct {
unsigned short digi_flags; /* Flags (see above) */
};
typedef struct digi_struct digi_t;
This diff is collapsed.
This diff is collapsed.
#define CSTART 0x400L
#define CMAX 0x800L
#define ISTART 0x800L
#define IMAX 0xC00L
#define CIN 0xD10L
#define GLOBAL 0xD10L
#define EIN 0xD18L
#define FEPSTAT 0xD20L
#define CHANSTRUCT 0x1000L
#define RXTXBUF 0x4000L
struct global_data {
volatile ushort cin;
volatile ushort cout;
volatile ushort cstart;
volatile ushort cmax;
volatile ushort ein;
volatile ushort eout;
volatile ushort istart;
volatile ushort imax;
};
struct board_chan {
int filler1;
int filler2;
volatile ushort tseg;
volatile ushort tin;
volatile ushort tout;
volatile ushort tmax;
volatile ushort rseg;
volatile ushort rin;
volatile ushort rout;
volatile ushort rmax;
volatile ushort tlow;
volatile ushort rlow;
volatile ushort rhigh;
volatile ushort incr;
volatile ushort etime;
volatile ushort edelay;
volatile unchar *dev;
volatile ushort iflag;
volatile ushort oflag;
volatile ushort cflag;
volatile ushort gmask;
volatile ushort col;
volatile ushort delay;
volatile ushort imask;
volatile ushort tflush;
int filler3;
int filler4;
int filler5;
int filler6;
volatile unchar num;
volatile unchar ract;
volatile unchar bstat;
volatile unchar tbusy;
volatile unchar iempty;
volatile unchar ilow;
volatile unchar idata;
volatile unchar eflag;
volatile unchar tflag;
volatile unchar rflag;
volatile unchar xmask;
volatile unchar xval;
volatile unchar mstat;
volatile unchar mchange;
volatile unchar mint;
volatile unchar lstat;
volatile unchar mtran;
volatile unchar orun;
volatile unchar startca;
volatile unchar stopca;
volatile unchar startc;
volatile unchar stopc;
volatile unchar vnext;
volatile unchar hflow;
volatile unchar fillc;
volatile unchar ochar;
volatile unchar omask;
unchar filler7;
unchar filler8[28];
};
#define SRXLWATER 0xE0
#define SRXHWATER 0xE1
#define STOUT 0xE2
#define PAUSETX 0xE3
#define RESUMETX 0xE4
#define SAUXONOFFC 0xE6
#define SENDBREAK 0xE8
#define SETMODEM 0xE9
#define SETIFLAGS 0xEA
#define SONOFFC 0xEB
#define STXLWATER 0xEC
#define PAUSERX 0xEE
#define RESUMERX 0xEF
#define SETBUFFER 0xF2
#define SETCOOKED 0xF3
#define SETHFLOW 0xF4
#define SETCTRLFLAGS 0xF5
#define SETVNEXT 0xF6
#define BREAK_IND 0x01
#define LOWTX_IND 0x02
#define EMPTYTX_IND 0x04
#define DATA_IND 0x08
#define MODEMCHG_IND 0x20
#define RTS 0x02
#define CD 0x08
#define DSR 0x10
#define CTS 0x20
#define RI 0x40
#define DTR 0x80
/* These are termios bits as the FEP understands them */
/* c_cflag bit meaning */
#define FEP_CBAUD 0000017
#define FEP_B0 0000000 /* hang up */
#define FEP_B50 0000001
#define FEP_B75 0000002
#define FEP_B110 0000003
#define FEP_B134 0000004
#define FEP_B150 0000005
#define FEP_B200 0000006
#define FEP_B300 0000007
#define FEP_B600 0000010
#define FEP_B1200 0000011
#define FEP_B1800 0000012
#define FEP_B2400 0000013
#define FEP_B4800 0000014
#define FEP_B9600 0000015
#define FEP_B19200 0000016
#define FEP_B38400 0000017
#define FEP_EXTA FEP_B19200
#define FEP_EXTB FEP_B38400
#define FEP_CSIZE 0000060
#define FEP_CS5 0000000
#define FEP_CS6 0000020
#define FEP_CS7 0000040
#define FEP_CS8 0000060
#define FEP_CSTOPB 0000100
#define FEP_CREAD 0000200
#define FEP_PARENB 0000400
#define FEP_PARODD 0001000
#define FEP_HUPCL 0002000
#define FEP_CLOCAL 0004000
#define FEP_CIBAUD 03600000 /* input baud rate (not used) */
#define FEP_CRTSCTS 020000000000 /* flow control */
This diff is collapsed.
#define FEPCODESEG 0x0200L
#define FEPCODE 0x2000L
#define BIOSCODE 0xf800L
#define MISCGLOBAL 0x0C00L
#define NPORT 0x0C22L
#define MBOX 0x0C40L
#define PORTBASE 0x0C90L
#define FEPCLR 0x00
#define FEPMEM 0x02
#define FEPRST 0x04
#define FEPINT 0x08
#define FEPMASK 0x0e
#define FEPWIN 0x80
#define PCXI 0
#define PCXE 1
#define PCXEVE 2
static char *board_desc[] = {
"PC/Xi (64K)",
"PC/Xe (64K)",
"PC/Xe (8K) ",
};
#define STARTC 021
#define STOPC 023
#define IAIXON 0x2000
struct board_info {
unchar status;
unchar type;
unchar altpin;
ushort numports;
ushort port;
ulong membase;
};
#define TXSTOPPED 0x01
#define LOWWAIT 0x02
#define EMPTYWAIT 0x04
#define RXSTOPPED 0x08
#define TXBUSY 0x10
#define DISABLED 0
#define ENABLED 1
#define OFF 0
#define ON 1
#define FEPTIMEOUT 200000
#define SERIAL_TYPE_NORMAL 1
#define SERIAL_TYPE_CALLOUT 2
#define PCXE_EVENT_HANGUP 1
#define PCXX_MAGIC 0x5c6df104L
struct channel {
/* --------- Board/channel information ---------- */
long magic;
unchar boardnum;
unchar channelnum;
uint dev;
long session;
long pgrp;
struct tty_struct *tty;
struct board_info *board;
volatile struct board_chan *brdchan;
volatile struct global_data *mailbox;
int asyncflags;
int count;
int blocked_open;
int close_delay;
int event;
struct wait_queue *open_wait;
struct wait_queue *close_wait;
struct tq_struct tqueue;
/* ------------ Async control data ------------- */
unchar modemfake; /* Modem values to be forced */
unchar modem; /* Force values */
ulong statusflags;
unchar omodem; /* FEP output modem status */
unchar imodem; /* FEP input modem status */
unchar hflow;
unchar dsr;
unchar dcd;
unchar stopc;
unchar startc;
unchar stopca;
unchar startca;
unchar fepstopc;
unchar fepstartc;
unchar fepstopca;
unchar fepstartca;
ushort fepiflag;
ushort fepcflag;
ushort fepoflag;
/* ---------- Transmit/receive system ---------- */
unchar txwin;
unchar rxwin;
ushort txbufsize;
ushort rxbufsize;
unchar *txptr;
unchar *rxptr;
unchar *tmp_buf; /* Temp buffer */
/* ---- Termios data ---- */
ulong c_iflag;
ulong c_cflag;
ulong c_lflag;
ulong c_oflag;
struct termios normal_termios;
struct termios callout_termios;
struct digi_struct digiext;
ulong dummy[8];
};
#define NUMCARDS 1
struct board_info boards[NUMCARDS]={
{ ENABLED, 0, ON, 16, 0x200, 0xd0000 }
};
......@@ -1837,6 +1837,9 @@ int tty_init(void)
#endif
#ifdef CONFIG_ISTALLION
stli_init();
#endif
#ifdef CONFIG_DIGI
pcxe_init();
#endif
pty_init();
vcs_init();
......
This diff is collapsed.
......@@ -16,17 +16,21 @@ ELP_NEED_HARD_RESET
to 1 may help. As of 3c505.c v0.8 the driver should be able to find
out whether of not this is needed, but I'm not completely sure.
ELP_DEBUG
The driver debug level. 1 is ok for most everything, 0 will provide
less verbose bootup messages, and 2 and 3 are usually too verbose
for anything.
The driver debug level. It's probably best to leave it at 0 most of the time.
If you are having trouble, setting it to 1 may give you more information.
Any higher setting is too verbose for most purposes.
Known problems:
During startup the driver shows the following two messages:
*** timeout at 3c505.c:elp_set_mc_list (line 1158) ***
*** timeout at 3c505.c:elp_set_mc_list (line 1183) ***
These are because upper parts of the networking code attempt
to load multicast address lists to the adapter before the
adapter is properly up and running.
The 3c505 is a slow card, mostly because of the way it talks to the host.
Don't expect any great performance from it.
I am seeing periodic "transmit timed out" and "timeout waiting for PCB
acknowledge" messages under high load. I'm not sure what's causing these -
it seems that the 3c505 occasionally just loses a command. They seem not to
be fatal, anyway.
There may be some initialisation problems still lurking, particularly when
warm-booting from DOS (ELP_NEED_HARD_RESET seems not to help).
Authors:
The driver is mainly written by Craig Southeren, email
......@@ -34,3 +38,4 @@ Authors:
Parts of the driver (adapting the driver to 1.1.4+ kernels,
IRQ/address detection, some changes) and this README by
Juha Laiho <jlaiho@ichaos.nullnet.fi>.
Philip Blundell <pjb27@cam.ac.uk> made some more changes.
......@@ -426,7 +426,7 @@ probe_pss_mpu (struct address_info *hw_config)
#if (defined(CONFIG_MPU401) || defined(CONFIG_MPU_EMU)) && defined(CONFIG_MIDI)
return probe_mpu401 (hw_config);
#else
return 0
return 0;
#endif
}
......@@ -775,15 +775,17 @@ static coproc_operations pss_coproc_operations =
long
attach_pss_mpu (long mem_start, struct address_info *hw_config)
{
int prev_devs;
long ret;
#if (defined(CONFIG_MPU401) || defined(CONFIG_MPU_EMU)) && defined(CONFIG_MIDI)
prev_devs = num_midis;
ret = attach_mpu401 (mem_start, hw_config);
{
int prev_devs;
prev_devs = num_midis;
ret = attach_mpu401 (mem_start, hw_config);
if (num_midis == (prev_devs + 1)) /* The MPU driver installed itself */
midi_devs[prev_devs]->coproc = &pss_coproc_operations;
if (num_midis == (prev_devs + 1)) /* The MPU driver installed itself */
midi_devs[prev_devs]->coproc = &pss_coproc_operations;
}
#endif
return ret;
}
......
......@@ -125,10 +125,10 @@ struct inode_operations ncp_dir_inode_operations = {
/* Here we encapsulate the inode number handling that depends upon the
* mount mode: When we mount a complete server, the memory address of
* the npc_inode_info is used as an inode. When only a single volume
* is mounted, then the DosDirNum is used as the inode number. As this
* is unique for the complete volume, this should enable the NFS
* exportability of a ncpfs-mounted volume.
* the ncp_inode_info is used as the inode number. When only a single
* volume is mounted, then the DosDirNum is used as the inode
* number. As this is unique for the complete volume, this should
* enable the NFS exportability of a ncpfs-mounted volume.
*/
static inline int
......@@ -183,10 +183,6 @@ ncp_dir_read(struct inode *inode, struct file *filp, char *buf, int count)
return -EISDIR;
}
/* In ncpfs, we have unique inodes across all mounted filesystems, for
all inodes that are in memory. That's why it's enough to index the
directory cache by the inode number. */
static kdev_t c_dev = 0;
static unsigned long c_ino = 0;
static int c_size;
......@@ -320,11 +316,6 @@ ncp_readdir(struct inode *inode, struct file *filp,
while (index < c_size)
{
/* We found it. For getwd(), we have to return the
correct inode in d_ino if the inode is currently in
use. Otherwise the inode number does not
matter. (You can argue a lot about this..) */
ino_t ino;
if (ncp_single_volume(server))
......@@ -333,6 +324,10 @@ ncp_readdir(struct inode *inode, struct file *filp,
}
else
{
/* For getwd() we have to return the correct
* inode in d_ino if the inode is currently in
* use. Otherwise the inode number does not
* matter. (You can argue a lot about this..) */
struct ncp_inode_info *ino_info;
ino_info = ncp_find_dir_inode(inode,
entry->i.entryName);
......
/*
* linux/fs/ncp/sock.c
* linux/fs/ncpfs/sock.c
*
* Copyright (C) 1992, 1993 Rick Sladkey
*
......
......@@ -412,10 +412,8 @@ rpc_recv(struct rpc_sock *rsock, struct rpc_wait *slot)
while (rsock->pending != slot) {
if (!slot->w_gotit)
interruptible_sleep_on(&slot->w_wait);
if (slot->w_gotit) {
result = slot->w_result; /* quite important */
return result;
}
if (slot->w_gotit)
return slot->w_result; /* quite important */
if (current->signal & ~current->blocked)
return -ERESTARTSYS;
if (rsock->shutdown)
......@@ -427,15 +425,15 @@ rpc_recv(struct rpc_sock *rsock, struct rpc_wait *slot)
/* Wait for data to arrive */
if ((result = rpc_select(rsock)) < 0) {
dprintk("RPC: select error = %d\n", result);
break;
return result;
}
/* Receive and dispatch */
if ((result = rpc_grok(rsock)) < 0)
break;
return result;
} while (current->timeout && !slot->w_gotit);
return slot->w_gotit? result : -ETIMEDOUT;
return slot->w_gotit? slot->w_result : -ETIMEDOUT;
}
/*
......
......@@ -267,6 +267,8 @@ int mem_mmap(struct inode * inode, struct file * file,
stmp = vma->vm_offset;
dtmp = vma->vm_start;
flush_cache_range(vma->vm_mm, vma->vm_start, vma->vm_end);
flush_cache_range(src_vma->vm_mm, src_vma->vm_start, src_vma->vm_end);
while (dtmp < vma->vm_end) {
while (src_vma && stmp > src_vma->vm_end)
src_vma = src_vma->vm_next;
......@@ -297,8 +299,8 @@ int mem_mmap(struct inode * inode, struct file * file,
dtmp += PAGE_SIZE;
}
invalidate_range(vma->vm_mm, vma->vm_start, vma->vm_end);
invalidate_range(src_vma->vm_mm, src_vma->vm_start, src_vma->vm_end);
flush_tlb_range(vma->vm_mm, vma->vm_start, vma->vm_end);
flush_tlb_range(src_vma->vm_mm, src_vma->vm_start, src_vma->vm_end);
return 0;
}
......
......@@ -37,7 +37,7 @@
#define ST_DEC_EB64P 20 /* EB64+ systype */
#define ST_DEC_EB66P -19 /* EB66 systype */
#define ST_DEC_EBPC64 -20 /* Cabriolet (AlphaPC64) systype */
#defien ST_DEC_EB164 26 /* EB164 systype */
#define ST_DEC_EB164 26 /* EB164 systype */
struct pcb_struct {
unsigned long ksp;
......
......@@ -11,27 +11,34 @@
#include <asm/system.h>
/* Caches aren't brain-dead on the alpha. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(mm, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
/*
* Invalidate current user mapping.
* Flush current user mapping.
*/
static inline void invalidate(void)
static inline void flush_tlb(void)
{
tbiap();
}
/*
* Invalidate everything (kernel mapping may also have
* Flush everything (kernel mapping may also have
* changed due to vmalloc/vfree)
*/
static inline void invalidate_all(void)
static inline void flush_tlb_all(void)
{
tbia();
}
/*
* Invalidate a specified user mapping
* Flush a specified user mapping
*/
static inline void invalidate_mm(struct mm_struct *mm)
static inline void flush_tlb_mm(struct mm_struct *mm)
{
if (mm != current->mm)
mm->context = 0;
......@@ -40,14 +47,14 @@ static inline void invalidate_mm(struct mm_struct *mm)
}
/*
* Page-granular invalidate.
* Page-granular tlb flush.
*
* do a tbisd (type = 2) normally, and a tbis (type = 3)
* if it is an executable mapping. We want to avoid the
* itlb invalidate, because that potentially also does a
* icache invalidate.
* itlb flush, because that potentially also does a
* icache flush.
*/
static inline void invalidate_page(struct vm_area_struct *vma,
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
struct mm_struct * mm = vma->vm_mm;
......@@ -59,10 +66,10 @@ static inline void invalidate_page(struct vm_area_struct *vma,
}
/*
* Invalidate a specified range of user mapping: on the
* alpha we invalidate the whole user tlb
* Flush a specified range of user mapping: on the
* alpha we flush the whole user tlb
*/
static inline void invalidate_range(struct mm_struct *mm,
static inline void flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
if (mm != current->mm)
......
......@@ -40,7 +40,7 @@ extern __inline__ void prim_spin_lock(struct spinlock *sp)
if(smp_invalidate_needed&(1<<processor));
while(lock_clear_bit(processor,&smp_invalidate_needed))
local_invalidate();
local_flush_tlb();
sp->spins++;
}
/*
......
......@@ -19,139 +19,146 @@
* the i386 page table tree.
*/
/* Caches aren't brain-dead on the intel. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(mm, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
/*
* TLB invalidation:
* TLB flushing:
*
* - invalidate() invalidates the current mm struct TLBs
* - invalidate_all() invalidates all processes TLBs
* - invalidate_mm(mm) invalidates the specified mm context TLB's
* - invalidate_page(mm, vmaddr) invalidates one page
* - invalidate_range(mm, start, end) invalidates a range of pages
* - flush_tlb() flushes the current mm struct TLBs
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(mm, start, end) flushes a range of pages
*
* ..but the i386 has somewhat limited invalidation capabilities,
* and page-granular invalidates are available only on i486 and up.
* ..but the i386 has somewhat limited tlb flushing capabilities,
* and page-granular flushes are available only on i486 and up.
*/
#define __invalidate() \
#define __flush_tlb() \
__asm__ __volatile__("movl %%cr3,%%eax\n\tmovl %%eax,%%cr3": : :"ax")
#ifdef CONFIG_M386
#define __invalidate_one(addr) invalidate()
#define __flush_tlb_one(addr) flush_tlb()
#else
#define __invalidate_one(addr) \
#define __flush_tlb_one(addr) \
__asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
#endif
#ifndef __SMP__
#define invalidate() __invalidate()
#define invalidate_all() __invalidate()
#define flush_tlb() __flush_tlb()
#define flush_tlb_all() __flush_tlb()
static inline void invalidate_mm(struct mm_struct *mm)
static inline void flush_tlb_mm(struct mm_struct *mm)
{
if (mm == current->mm)
__invalidate();
__flush_tlb();
}
static inline void invalidate_page(struct vm_area_struct *vma,
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
if (vma->vm_mm == current->mm)
__invalidate_one(addr);
__flush_tlb_one(addr);
}
static inline void invalidate_range(struct mm_struct *mm,
static inline void flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
if (mm == current->mm)
__invalidate();
__flush_tlb();
}
#else
/*
* We aren't very clever about this yet - SMP could certainly
* avoid some global invalidates..
* avoid some global flushes..
*/
#include <asm/smp.h>
#define local_invalidate() \
__invalidate()
#define local_flush_tlb() \
__flush_tlb()
#undef CLEVER_SMP_INVALIDATE
#ifdef CLEVER_SMP_INVALIDATE
/*
* Smarter SMP invalidation macros.
* Smarter SMP flushing macros.
* c/o Linus Torvalds.
*
* These mean you can really definitely utterly forget about
* writing to user space from interrupts. (Its not allowed anyway).
*
* Doesn't currently work as Linus makes invalidate calls before
* Doesn't currently work as Linus makes flush tlb calls before
* stuff like current/current->mm are setup properly
*/
static inline void invalidate_current_task(void)
static inline void flush_tlb_current_task(void)
{
if (current->mm->count == 1) /* just one copy of this mm */
local_invalidate(); /* and that's us, so.. */
local_flush_tlb(); /* and that's us, so.. */
else
smp_invalidate();
smp_flush_tlb();
}
#define invalidate() invalidate_current_task()
#define flush_tlb() flush_tlb_current_task()
#define invalidate_all() smp_invalidate()
#define flush_tlb_all() smp_flush_tlb()
static inline void invalidate_mm(struct mm_struct * mm)
static inline void flush_tlb_mm(struct mm_struct * mm)
{
if (mm == current->mm && mm->count == 1)
local_invalidate();
local_flush_tlb();
else
smp_invalidate();
smp_flush_tlb();
}
static inline void invalidate_page(struct vm_area_struct * vma,
static inline void flush_tlb_page(struct vm_area_struct * vma,
unsigned long va)
{
if (vma->vm_mm == current->mm && current->mm->count == 1)
__invalidate_one(va);
__flush_tlb_one(va);
else
smp_invalidate();
smp_flush_tlb();
}
static inline void invalidate_range(struct mm_struct * mm,
static inline void flush_tlb_range(struct mm_struct * mm,
unsigned long start, unsigned long end)
{
invalidate_mm(mm);
flush_tlb_mm(mm);
}
#else
#define invalidate() \
smp_invalidate()
#define flush_tlb() \
smp_flush_tlb()
#define invalidate_all() invalidate()
#define flush_tlb_all() flush_tlb()
static inline void invalidate_mm(struct mm_struct *mm)
static inline void flush_tlb_mm(struct mm_struct *mm)
{
invalidate();
flush_tlb();
}
static inline void invalidate_page(struct vm_area_struct *vma,
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
invalidate();
flush_tlb();
}
static inline void invalidate_range(struct mm_struct *mm,
static inline void flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
invalidate();
flush_tlb();
}
#endif
#endif
......
......@@ -184,7 +184,7 @@ extern unsigned long cpu_present_map;
extern volatile int cpu_number_map[NR_CPUS];
extern volatile int cpu_logical_map[NR_CPUS];
extern volatile unsigned long smp_invalidate_needed;
extern void smp_invalidate(void);
extern void smp_flush_tlb(void);
extern volatile unsigned long kernel_flag, kernel_counter;
extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern volatile unsigned char active_kernel_processor;
......
......@@ -32,7 +32,7 @@ extern __inline void lock_kernel(void)
*/
if (test_bit(proc, (void *)&smp_invalidate_needed))
if (clear_bit(proc, (void *)&smp_invalidate_needed))
local_invalidate();
local_flush_tlb();
}
while(test_bit(0, (void *)&kernel_flag));
}
......
......@@ -199,17 +199,17 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
switch (size) {
case 1:
__asm__("xchgb %b0,%1"
:"=q" (x), "=m" (*__xg(ptr))
:"=&q" (x), "=m" (*__xg(ptr))
:"0" (x), "m" (*__xg(ptr)));
break;
case 2:
__asm__("xchgw %w0,%1"
:"=r" (x), "=m" (*__xg(ptr))
:"=&r" (x), "=m" (*__xg(ptr))
:"0" (x), "m" (*__xg(ptr)));
break;
case 4:
__asm__("xchgl %0,%1"
:"=r" (x), "=m" (*__xg(ptr))
:"=&r" (x), "=m" (*__xg(ptr))
:"0" (x), "m" (*__xg(ptr)));
break;
}
......
......@@ -29,6 +29,7 @@ enum {
TIMER_BH = 0,
CONSOLE_BH,
TQUEUE_BH,
DIGI_BH,
SERIAL_BH,
NET_BH,
IMMEDIATE_BH,
......
......@@ -55,6 +55,8 @@
#define HD_TIMER2 24
#define GSCD_TIMER 25
#define DIGI_TIMER 29
struct timer_struct {
unsigned long expires;
void (*fn)(void);
......
......@@ -284,6 +284,7 @@ extern int rs_init(void);
extern int lp_init(void);
extern int pty_init(void);
extern int tty_init(void);
extern int pcxe_init(void);
extern int vcs_init(void);
extern int cy_init(void);
extern int stl_init(void);
......@@ -328,6 +329,7 @@ extern int rs_open(struct tty_struct * tty, struct file * filp);
/* pty.c */
extern int pty_open(struct tty_struct * tty, struct file * filp);
extern int pcxe_open(struct tty_struct *tty, struct file *filp);
/* console.c */
......
......@@ -433,6 +433,7 @@ static int shm_map (struct vm_area_struct *shmd)
/* map page range */
shm_sgn = shmd->vm_pte +
SWP_ENTRY(0, (shmd->vm_offset >> PAGE_SHIFT) << SHM_IDX_SHIFT);
flush_cache_range(shmd->vm_mm, shmd->vm_start, shmd->vm_end);
for (tmp = shmd->vm_start;
tmp < shmd->vm_end;
tmp += PAGE_SIZE, shm_sgn += SWP_ENTRY(0, 1 << SHM_IDX_SHIFT))
......@@ -446,7 +447,7 @@ static int shm_map (struct vm_area_struct *shmd)
return -ENOMEM;
set_pte(page_table, __pte(shm_sgn));
}
invalidate_range(shmd->vm_mm, shmd->vm_start, shmd->vm_end);
flush_tlb_range(shmd->vm_mm, shmd->vm_start, shmd->vm_end);
return 0;
}
......@@ -760,12 +761,13 @@ int shm_swap (int prio, int dma)
}
if (pte_page(pte) != pte_page(page))
printk("shm_swap_out: page and pte mismatch\n");
flush_cache_page(shmd, tmp);
set_pte(page_table,
__pte(shmd->vm_pte + SWP_ENTRY(0, idx << SHM_IDX_SHIFT)));
mem_map[MAP_NR(pte_page(pte))].count--;
if (shmd->vm_mm->rss > 0)
shmd->vm_mm->rss--;
invalidate_page(shmd, tmp);
flush_tlb_page(shmd, tmp);
/* continue looping through circular list */
} while (0);
if ((shmd = shmd->vm_next_share) == shp->attaches)
......
......@@ -125,7 +125,7 @@ static inline void add_to_runqueue(struct task_struct * p)
{
if(clear_bit(cpu,&smp_invalidate_needed))
{
local_invalidate();
local_flush_tlb();
set_bit(cpu,&cpu_callin_map[0]);
}
}
......
......@@ -579,9 +579,9 @@ int filemap_swapout(struct vm_area_struct * vma,
unsigned long page = pte_page(*page_table);
unsigned long entry = SWP_ENTRY(SHM_SWP_TYPE, MAP_NR(page));
flush_cache_page(vma, (offset + vma->vm_start - vma->vm_offset));
set_pte(page_table, __pte(entry));
/* Yuck, perhaps a slightly modified swapout parameter set? */
invalidate_page(vma, (offset + vma->vm_start - vma->vm_offset));
flush_tlb_page(vma, (offset + vma->vm_start - vma->vm_offset));
error = filemap_write_page(vma, offset, page);
if (pte_val(*page_table) == entry)
pte_clear(page_table);
......@@ -618,15 +618,17 @@ static inline int filemap_sync_pte(pte_t * ptep, struct vm_area_struct *vma,
return 0;
if (!pte_dirty(pte))
return 0;
flush_cache_page(vma, address);
set_pte(ptep, pte_mkclean(pte));
invalidate_page(vma, address);
flush_tlb_page(vma, address);
page = pte_page(pte);
mem_map[MAP_NR(page)].count++;
} else {
if (pte_none(pte))
return 0;
flush_cache_page(vma, address);
pte_clear(ptep);
invalidate_page(vma, address);
flush_tlb_page(vma, address);
if (!pte_present(pte)) {
swap_free(pte_val(pte));
return 0;
......@@ -710,12 +712,13 @@ static int filemap_sync(struct vm_area_struct * vma, unsigned long address,
int error = 0;
dir = pgd_offset(current->mm, address);
flush_cache_range(vma->vm_mm, end - size, end);
while (address < end) {
error |= filemap_sync_pmd_range(dir, address, end - address, vma, flags);
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
}
invalidate_range(vma->vm_mm, end - size, end);
flush_tlb_range(vma->vm_mm, end - size, end);
return error;
}
......
......@@ -135,9 +135,10 @@ void clear_page_tables(struct task_struct * tsk)
printk("%s trying to clear kernel page-directory: not good\n", tsk->comm);
return;
}
flush_cache_mm(tsk->mm);
for (i = 0 ; i < USER_PTRS_PER_PGD ; i++)
free_one_pgd(page_dir + i);
invalidate_mm(tsk->mm);
flush_tlb_mm(tsk->mm);
}
/*
......@@ -156,7 +157,8 @@ void free_page_tables(struct task_struct * tsk)
printk("%s trying to free kernel page-directory: not good\n", tsk->comm);
return;
}
invalidate_mm(tsk->mm);
flush_cache_mm(tsk->mm);
flush_tlb_mm(tsk->mm);
SET_PAGE_DIR(tsk, swapper_pg_dir);
tsk->mm->pgd = swapper_pg_dir; /* or else... */
for (i = 0 ; i < USER_PTRS_PER_PGD ; i++)
......@@ -171,9 +173,10 @@ int new_page_tables(struct task_struct * tsk)
if (!(new_pg = pgd_alloc()))
return -ENOMEM;
page_dir = pgd_offset(&init_mm, 0);
flush_cache_mm(tsk->mm);
memcpy(new_pg + USER_PTRS_PER_PGD, page_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof (pgd_t));
invalidate_mm(tsk->mm);
flush_tlb_mm(tsk->mm);
SET_PAGE_DIR(tsk, new_pg);
tsk->mm->pgd = new_pg;
return 0;
......@@ -285,6 +288,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
cow = (vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE;
src_pgd = pgd_offset(src, address);
dst_pgd = pgd_offset(dst, address);
flush_cache_range(src, vma->vm_start, vma->vm_end);
flush_cache_range(dst, vma->vm_start, vma->vm_end);
while (address < end) {
error = copy_pmd_range(dst_pgd++, src_pgd++, address, end - address, cow);
if (error)
......@@ -292,8 +297,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
address = (address + PGDIR_SIZE) & PGDIR_MASK;
}
/* Note that the src ptes get c-o-w treatment, so they change too. */
invalidate_range(src, vma->vm_start, vma->vm_end);
invalidate_range(dst, vma->vm_start, vma->vm_end);
flush_tlb_range(src, vma->vm_start, vma->vm_end);
flush_tlb_range(dst, vma->vm_start, vma->vm_end);
return error;
}
......@@ -373,12 +378,13 @@ int zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long si
unsigned long end = address + size;
dir = pgd_offset(mm, address);
flush_cache_range(mm, end - size, end);
while (address < end) {
zap_pmd_range(dir, address, end - address);
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
}
invalidate_range(mm, end - size, end);
flush_tlb_range(mm, end - size, end);
return 0;
}
......@@ -428,6 +434,7 @@ int zeromap_page_range(unsigned long address, unsigned long size, pgprot_t prot)
zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE, prot));
dir = pgd_offset(current->mm, address);
flush_cache_range(current->mm, beg, end);
while (address < end) {
pmd_t *pmd = pmd_alloc(dir, address);
error = -ENOMEM;
......@@ -439,7 +446,7 @@ int zeromap_page_range(unsigned long address, unsigned long size, pgprot_t prot)
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
}
invalidate_range(current->mm, beg, end);
flush_tlb_range(current->mm, beg, end);
return error;
}
......@@ -499,6 +506,7 @@ int remap_page_range(unsigned long from, unsigned long offset, unsigned long siz
offset -= from;
dir = pgd_offset(current->mm, from);
flush_cache_range(current->mm, beg, from);
while (from < end) {
pmd_t *pmd = pmd_alloc(dir, from);
error = -ENOMEM;
......@@ -510,7 +518,7 @@ int remap_page_range(unsigned long from, unsigned long offset, unsigned long siz
from = (from + PGDIR_SIZE) & PGDIR_MASK;
dir++;
}
invalidate_range(current->mm, beg, from);
flush_tlb_range(current->mm, beg, from);
return error;
}
......@@ -619,19 +627,24 @@ void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma,
if (mem_map[MAP_NR(old_page)].reserved)
++vma->vm_mm->rss;
copy_page(old_page,new_page);
flush_page_to_ram(old_page);
flush_page_to_ram(new_page);
flush_cache_page(vma, address);
set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
free_page(old_page);
invalidate_page(vma, address);
flush_tlb_page(vma, address);
return;
}
flush_cache_page(vma, address);
set_pte(page_table, BAD_PAGE);
flush_tlb_page(vma, address);
free_page(old_page);
oom(tsk);
invalidate_page(vma, address);
return;
}
flush_cache_page(vma, address);
set_pte(page_table, pte_mkdirty(pte_mkwrite(pte)));
invalidate_page(vma, address);
flush_tlb_page(vma, address);
if (new_page)
free_page(new_page);
return;
......@@ -901,6 +914,7 @@ void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma,
}
address &= PAGE_MASK;
if (!vma->vm_ops || !vma->vm_ops->nopage) {
flush_cache_page(vma, address);
get_empty_page(tsk, vma, page_table, write_access);
return;
}
......@@ -914,7 +928,9 @@ void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma,
page = vma->vm_ops->nopage(vma, address, write_access && !(vma->vm_flags & VM_SHARED));
if (!page) {
send_sig(SIGBUS, current, 1);
flush_cache_page(vma, address);
put_page(page_table, BAD_PAGE);
flush_tlb_page(vma, address);
return;
}
/*
......@@ -932,7 +948,9 @@ void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma,
entry = pte_mkwrite(pte_mkdirty(entry));
} else if (mem_map[MAP_NR(page)].count > 1 && !(vma->vm_flags & VM_SHARED))
entry = pte_wrprotect(entry);
flush_cache_page(vma, address);
put_page(page_table, entry);
flush_tlb_page(vma, address);
}
/*
......
......@@ -75,12 +75,13 @@ static void change_protection(unsigned long start, unsigned long end, pgprot_t n
unsigned long beg = start;
dir = pgd_offset(current->mm, start);
flush_cache_range(current->mm, beg, end);
while (start < end) {
change_pmd_range(dir, start, end - start, newprot);
start = (start + PGDIR_SIZE) & PGDIR_MASK;
dir++;
}
invalidate_range(current->mm, beg, end);
flush_tlb_range(current->mm, beg, end);
return;
}
......
......@@ -93,7 +93,8 @@ static int move_page_tables(struct mm_struct * mm,
{
unsigned long offset = len;
invalidate_range(mm, old_addr, old_addr + len);
flush_cache_range(mm, old_addr, old_addr + len);
flush_tlb_range(mm, old_addr, old_addr + len);
/*
* This is not the clever way to do this, but we're taking the
......@@ -115,9 +116,10 @@ static int move_page_tables(struct mm_struct * mm,
* the old page tables)
*/
oops_we_failed:
flush_cache_range(mm, new_addr, new_addr + len);
while ((offset += PAGE_SIZE) < len)
move_one_page(mm, new_addr + offset, old_addr + offset);
invalidate_range(mm, new_addr, new_addr + len);
flush_tlb_range(mm, new_addr, new_addr + len);
zap_page_range(mm, new_addr, new_addr + len);
return -1;
}
......
......@@ -100,12 +100,13 @@ static void free_area_pages(unsigned long address, unsigned long size)
unsigned long end = address + size;
dir = pgd_offset(&init_mm, address);
flush_cache_all();
while (address < end) {
free_area_pmd(dir, address, end - address);
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
}
invalidate_all();
flush_tlb_all();
}
static inline int alloc_area_pte(pte_t * pte, unsigned long address, unsigned long size)
......@@ -156,6 +157,7 @@ static int alloc_area_pages(unsigned long address, unsigned long size)
unsigned long end = address + size;
dir = pgd_offset(&init_mm, address);
flush_cache_all();
while (address < end) {
pmd_t *pmd = pmd_alloc_kernel(dir, address);
if (!pmd)
......@@ -166,7 +168,7 @@ static int alloc_area_pages(unsigned long address, unsigned long size)
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
}
invalidate_all();
flush_tlb_all();
return 0;
}
......@@ -217,6 +219,7 @@ static int remap_area_pages(unsigned long address, unsigned long offset, unsigne
offset -= address;
dir = pgd_offset(&init_mm, address);
flush_cache_all();
while (address < end) {
pmd_t *pmd = pmd_alloc_kernel(dir, address);
if (!pmd)
......@@ -227,7 +230,7 @@ static int remap_area_pages(unsigned long address, unsigned long offset, unsigne
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
}
invalidate_all();
flush_tlb_all();
return 0;
}
......
......@@ -110,8 +110,9 @@ static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struc
if (!(entry = get_swap_page()))
return 0;
vma->vm_mm->rss--;
flush_cache_page(vma, address);
set_pte(page_table, __pte(entry));
invalidate_page(vma, address);
flush_tlb_page(vma, address);
tsk->nswap++;
rw_swap_page(WRITE, entry, (char *) page, wait);
}
......@@ -125,14 +126,16 @@ static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struc
return 0;
}
vma->vm_mm->rss--;
flush_cache_page(vma, address);
set_pte(page_table, __pte(entry));
invalidate_page(vma, address);
flush_tlb_page(vma, address);
free_page(page);
return 1;
}
vma->vm_mm->rss--;
flush_cache_page(vma, address);
pte_clear(page_table);
invalidate_page(vma, address);
flush_tlb_page(vma, address);
entry = page_unuse(page);
free_page(page);
return entry;
......
......@@ -41,13 +41,18 @@ int register_firewall(int pf, struct firewall_ops *fw)
p=&((*p)->next);
}
fw->next=*p;
/*
* We need to set p atomically in case someone runs down the list
* at the wrong moment. This saves locking it
* We need to use a memory barrier to make sure that this
* works correctly even in SMP with weakly ordered writes.
*
* This is atomic wrt interrupts (and generally walking the
* chain), but not wrt itself (so you can't call this from
* an interrupt. Not that you'd want to).
*/
xchg(p,fw);
fw->next=*p;
mb();
*p = fw;
/*
* And release the sleep lock
......@@ -83,7 +88,7 @@ int unregister_firewall(int pf, struct firewall_ops *fw)
if(*nl==fw)
{
struct firewall_ops *f=fw->next;
xchg(nl,f);
*nl = f;
firewall_lock=0;
return 0;
}
......
......@@ -1384,35 +1384,6 @@ static int tcp_data(struct sk_buff *skb, struct sock *sk,
if (!skb->acked)
{
/*
* This is important. If we don't have much room left,
* we need to throw out a few packets so we have a good
* window. Note that mtu is used, not mss, because mss is really
* for the send side. He could be sending us stuff as large as mtu.
*/
while (sock_rspace(sk) < sk->mtu)
{
struct sk_buff * skb1 = skb_peek(&sk->receive_queue);
if (skb1 == NULL)
{
printk("INET: tcp.c:tcp_data memory leak detected.\n");
break;
}
/*
* Don't throw out something that has been acked.
*/
if (skb1->acked)
{
break;
}
skb_unlink(skb1);
kfree_skb(skb1, FREE_READ);
}
tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
sk->ack_backlog++;
tcp_reset_xmit_timer(sk, TIME_WRITE, min(sk->ato, HZ/2));
......@@ -1500,6 +1471,66 @@ static inline void tcp_urg(struct sock *sk, struct tcphdr *th, unsigned long len
}
}
/*
* Throw out all unnecessary packets: we've gone over the
* receive queue limit. This shouldn't happen in a normal
* TCP connection, but we might have gotten duplicates etc.
*/
static inline void tcp_forget_unacked(struct sk_buff_head * list)
{
for (;;) {
struct sk_buff * skb = list->prev;
/* gone through it all? */
if (skb == (struct sk_buff *) list)
break;
if (skb->acked)
break;
__skb_unlink(skb, list);
}
}
/*
* This should be a bit smarter and remove partially
* overlapping stuff too, but this should be good
* enough for any even remotely normal case (and the
* worst that can happen is that we have a few
* unnecessary packets in the receive queue).
*/
static inline void tcp_remove_dups(struct sk_buff_head * list)
{
struct sk_buff * skb = list->next;
for (;;) {
struct sk_buff * next;
if (skb == (struct sk_buff *) list)
break;
next = skb->next;
if (next->seq == skb->seq) {
if (before(next->end_seq, skb->end_seq)) {
__skb_unlink(next, list);
continue;
}
__skb_unlink(skb, list);
}
skb = next;
}
}
static void prune_queue(struct sk_buff_head * list)
{
/*
* Throw out things we haven't acked.
*/
tcp_forget_unacked(list);
/*
* Throw out duplicates
*/
tcp_remove_dups(list);
}
/*
* A TCP packet has arrived.
......@@ -1846,19 +1877,6 @@ int tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
* now drop it (we must process the ack first to avoid
* deadlock cases).
*/
#if 0
/*
* Is this test really a good idea? We should
* throw away packets that aren't in order, not
* new packets.
*/
if (sk->rmem_alloc >= sk->rcvbuf)
{
kfree_skb(skb, FREE_READ);
return(0);
}
#endif
/*
* Process urgent data
......@@ -1873,6 +1891,13 @@ int tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
if(tcp_data(skb,sk, saddr, len))
kfree_skb(skb, FREE_READ);
/*
* If our receive queue has grown past its limits,
* try to prune away duplicates etc..
*/
if (sk->rmem_alloc > sk->rcvbuf)
prune_queue(&sk->receive_queue);
/*
* And done
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment