Commit 0057ae60 authored by Linus Torvalds's avatar Linus Torvalds

Import 1.1.80

parent 5b7b4923
......@@ -32,6 +32,14 @@ N: Krishna Balasubramanian
E: balasub@cis.ohio-state.edu
D: Wrote SYS V IPC (part of standard kernel since 0.99.10)
N: Dario Ballabio
E: dario@milano.europe.dg.com
D: Author and maintainer of the Ultrastor 14F/34F SCSI driver
D: Author and maintainer of the EATA ISA/EISA SCSI driver
S: Data General Corporation
S: Milano
S: Italy
N: Arindam Banerji
E: axb@cse.nd.edu
D: Contributed ESDI driver routines needed to port LINUX to the PS/2 MCA.
......@@ -303,6 +311,16 @@ S: Great Baddow
S: CM2 8HN
S: United Kingdom
N: Jochen Hein
E: Hein@Informatik.TU-Clausthal.de
D: National Language Support
D: German Support-Disks for SLS/Slackware called SLT
D: Linux Internationalization Project
D: DOSemu
S: Mohlenweg 19
S: 34266 Niestetal
S: Germany
N: Michael Hipp
E: mhipp@student.uni-tuebingen.de
D: drivers for the racal ni5210 & ni6510 ethernet-boards
......@@ -499,15 +517,10 @@ S: Victoria Park 6100
S: Australia
N: John A. Martin
E: jmartin@csc.com
E: jam@acm.org
E: j.a.martin@ieee.org
D: FSSTND contributor
D: Credit file compilator
S: Computer Sciences Corporation
S: 1100 West Street
S: Laurel, Maryland 20707-3587
S: USA
N: Kevin E. Martin
E: martin@cs.unc.edu
......@@ -710,6 +723,14 @@ D: HPFS filesystem
S: Richardson, Texas
S: USA
N: Scott Snyder
E: snyder@fnald0.fnal.gov
D: ATAPI cdrom driver
S: MS 352, Fermilab
S: Post Office Box 500
S: Batavia, Illinois 60510
S: USA
N: Drew Sullivan
E: drew@lethe.north.net
D: iBCS2 developer
......
VERSION = 1
PATCHLEVEL = 1
SUBLEVEL = 79
SUBLEVEL = 80
ARCH = i386
......
......@@ -45,6 +45,8 @@
* $28 - compare status
*/
#define halt .long 0
/*
* Select function type and registers
*/
......@@ -66,17 +68,21 @@
* For 32-bit operations, we need to extend to 64-bit
*/
#ifdef INTSIZE
#define function func(lu)
#define ufunction func(lu)
#define sfunction func(l)
#define LONGIFY(x) zapnot x,15,x
#define SLONGIFY(x) addl x,0,x
#else
#define function func(qu)
#define ufunction func(qu)
#define sfunction func(q)
#define LONGIFY(x)
#define SLONGIFY(x)
#endif
.set noat
.globl function
.ent function
function:
.globl ufunction
.ent ufunction
ufunction:
subq $30,32,$30
stq $0, 0($30)
stq $1, 8($30)
......@@ -113,4 +119,18 @@ function:
ldq $2, 16($30)
addq $30,32,$30
ret $31,($23),1
.end function
.end ufunction
/*
* The "signed" version just does a halt if either of the value is
* signed: the kernel shouldn't mess with signed divides anyway (who
* knows what way they'll round..)
*/
.globl sfunction
.ent sfunction
sfunction:
bis $24,$25,$28
SLONGIFY($28)
bge $28,ufunction
halt
.end sfunction
......@@ -39,8 +39,8 @@ endif
HEAD := arch/i386/kernel/head.o
SUBDIRS := $(SUBDIRS) arch/i386/kernel
ARCHIVES := arch/i386/kernel/kernel.o $(ARCHIVES)
SUBDIRS := $(SUBDIRS) arch/i386/kernel arch/i386/mm
ARCHIVES := arch/i386/kernel/kernel.o arch/i386/mm/mm.o $(ARCHIVES)
ifdef CONFIG_IBCS
SUBDIRS := $(SUBDIRS) arch/i386/ibcs
......
......@@ -134,7 +134,7 @@ if [ "$CONFIG_NET_ISA" = "y" ]; then
bool 'DEPCA support' CONFIG_DEPCA n
bool 'EtherWorks 3 support' CONFIG_EWRK3 n
if [ "$CONFIG_NET_ALPHA" = "y" ]; then
# bool 'Arcnet support' CONFIG_ARCNET n
bool 'Arcnet support' CONFIG_ARCNET n
bool 'AT1700 support' CONFIG_AT1700 n
# bool 'EtherExpressPro support' CONFIG_EEXPRESS_PRO n
bool 'EtherExpress support' CONFIG_EEXPRESS n
......
/*
* linux/kernel/ioport.c
* linux/arch/i386/kernel/ioport.c
*
* This contains the io-permission bitmap code - written by obz, with changes
* by Linus.
......@@ -11,50 +11,8 @@
#include <linux/types.h>
#include <linux/ioport.h>
#define IOTABLE_SIZE 32
typedef struct resource_entry_t {
u_long from, num;
const char *name;
struct resource_entry_t *next;
} resource_entry_t;
static resource_entry_t iolist = { 0, 0, "", NULL };
static resource_entry_t iotable[IOTABLE_SIZE];
#define _IODEBUG
#ifdef IODEBUG
static char * ios(unsigned long l)
{
static char str[33] = { '\0' };
int i;
unsigned long mask;
for (i = 0, mask = 0x80000000; i < 32; ++i, mask >>= 1)
str[i] = (l & mask) ? '1' : '0';
return str;
}
static void dump_io_bitmap(void)
{
int i, j;
int numl = sizeof(current->tss.io_bitmap) >> 2;
for (i = j = 0; j < numl; ++i)
{
printk("%4d [%3x]: ", 64*i, 64*i);
printk("%s ", ios(current->tss.io_bitmap[j++]));
if (j < numl)
printk("%s", ios(current->tss.io_bitmap[j++]));
printk("\n");
}
}
#endif
/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
asmlinkage void set_bitmap(unsigned long *bitmap, short base, short extent, int new_value)
static void set_bitmap(unsigned long *bitmap, short base, short extent, int new_value)
{
int mask;
unsigned long *bitmap_base = bitmap + (base >> 5);
......@@ -87,22 +45,6 @@ asmlinkage void set_bitmap(unsigned long *bitmap, short base, short extent, int
}
}
/*
* This generates the report for /proc/ioports
*/
int get_ioport_list(char *buf)
{
resource_entry_t *p;
int len = 0;
for (p = iolist.next; (p) && (len < 4000); p = p->next)
len += sprintf(buf+len, "%04lx-%04lx : %s\n",
p->from, p->from+p->num-1, p->name);
if (p)
len += sprintf(buf+len, "4K limit reached!\n");
return len;
}
/*
* this changes the io permissions bitmap in the current task.
*/
......@@ -115,9 +57,6 @@ asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int turn_on)
if (!suser())
return -EPERM;
#ifdef IODEBUG
printk("io: from=%d num=%d %s\n", from, num, (turn_on ? "on" : "off"));
#endif
set_bitmap((unsigned long *)current->tss.io_bitmap, from, num, !turn_on);
return 0;
}
......@@ -148,99 +87,3 @@ asmlinkage int sys_iopl(long ebx,long ecx,long edx,
*(&eflags) = (eflags & 0xffffcfff) | (level << 12);
return 0;
}
/*
* The workhorse function: find where to put a new entry
*/
static resource_entry_t *find_gap(resource_entry_t *root,
u_long from, u_long num)
{
unsigned long flags;
resource_entry_t *p;
if (from > from+num-1)
return NULL;
save_flags(flags);
cli();
for (p = root; ; p = p->next) {
if ((p != root) && (p->from+p->num-1 >= from)) {
p = NULL;
break;
}
if ((p->next == NULL) || (p->next->from > from+num-1))
break;
}
restore_flags(flags);
return p;
}
/*
* Call this from the device driver to register the ioport region.
*/
void request_region(unsigned int from, unsigned int num, const char *name)
{
resource_entry_t *p;
int i;
for (i = 0; i < IOTABLE_SIZE; i++)
if (iotable[i].num == 0)
break;
if (i == IOTABLE_SIZE)
printk("warning: ioport table is full\n");
else {
p = find_gap(&iolist, from, num);
if (p == NULL)
return;
iotable[i].name = name;
iotable[i].from = from;
iotable[i].num = num;
iotable[i].next = p->next;
p->next = &iotable[i];
return;
}
}
/*
* This is for compatibility with older drivers.
* It can be removed when all driver call the new function.
*/
void snarf_region(unsigned int from, unsigned int num)
{
request_region(from,num,"No name given.");
}
/*
* Call this when the device driver is unloaded
*/
void release_region(unsigned int from, unsigned int num)
{
resource_entry_t *p, *q;
for (p = &iolist; ; p = q) {
q = p->next;
if (q == NULL)
break;
if ((q->from == from) && (q->num == num)) {
q->num = 0;
p->next = q->next;
return;
}
}
}
/*
* Call this to check the ioport region before probing
*/
int check_region(unsigned int from, unsigned int num)
{
return (find_gap(&iolist, from, num) == NULL) ? -EBUSY : 0;
}
/* Called from init/main.c to reserve IO ports. */
void reserve_setup(char *str, int *ints)
{
int i;
for (i = 1; i < ints[0]; i += 2)
request_region(ints[i], ints[i+1], "reserved");
}
......@@ -23,6 +23,27 @@
#include <asm/segment.h>
#include <asm/system.h>
/*
* Tell us the machine setup..
*/
char hard_math = 0; /* set by boot/head.S */
char x86 = 0; /* set by boot/head.S to 3 or 4 */
char x86_model = 0; /* set by boot/head.S */
char x86_mask = 0; /* set by boot/head.S */
int x86_capability = 0; /* set by boot/head.S */
int fdiv_bug = 0; /* set if Pentium(TM) with FP bug */
char x86_vendor_id[13] = "Unknown";
char ignore_irq13 = 0; /* set if exception 16 works */
char wp_works_ok = 0; /* set if paging hardware honours WP */
char hlt_works_ok = 1; /* set if the "hlt" instruction works */
/*
* Bus types ..
*/
int EISA_bus = 0;
asmlinkage void ret_from_sys_call(void) __asm__("ret_from_sys_call");
/*
......
......@@ -87,7 +87,7 @@ static unsigned long get_long(struct vm_area_struct * vma, unsigned long addr)
unsigned long page;
repeat:
page = *PAGE_DIR_OFFSET(vma->vm_task->tss.cr3, addr);
page = *PAGE_DIR_OFFSET(vma->vm_task, addr);
if (page & PAGE_PRESENT) {
page &= PAGE_MASK;
page += PAGE_PTR(addr);
......@@ -121,7 +121,7 @@ static void put_long(struct vm_area_struct * vma, unsigned long addr,
int readonly = 0;
repeat:
page = *PAGE_DIR_OFFSET(vma->vm_task->tss.cr3, addr);
page = *PAGE_DIR_OFFSET(vma->vm_task, addr);
if (page & PAGE_PRESENT) {
page &= PAGE_MASK;
page += PAGE_PTR(addr);
......
......@@ -311,6 +311,8 @@ void trap_init(void)
int i;
struct desc_struct * p;
if (strncmp((char*)0x0FFFD9, "EISA", 4) == 0)
EISA_bus = 1;
set_call_gate(&default_ldt,lcall7);
set_trap_gate(0,&divide_error);
set_trap_gate(1,&debug);
......
#
# Makefile for the linux i386-specific parts of the memory manager.
#
# Note! Dependencies are done automagically by 'make dep', which also
# removes any old dependencies. DON'T put your own dependencies here
# unless it's something special (ie not a .c file).
#
# Note 2! The CFLAGS definition is now in the main makefile...
.c.o:
$(CC) $(CFLAGS) -c $<
.s.o:
$(AS) -o $*.o $<
.c.s:
$(CC) $(CFLAGS) -S $<
OBJS = fault.o
mm.o: $(OBJS)
$(LD) -r -o mm.o $(OBJS)
modules:
dep:
$(CPP) -M *.c > .depend
#
# include a dependency file if one exists
#
ifeq (.depend,$(wildcard .depend))
include .depend
endif
/*
* linux/arch/i386/mm/fault.c
*
* Copyright (C) 1995 Linus Torvalds
*/
#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/head.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <asm/system.h>
#include <asm/segment.h>
extern unsigned long pg0[1024]; /* page table for 0-4MB for everybody */
extern void scsi_mem_init(unsigned long);
extern void sound_mem_init(void);
extern void die_if_kernel(char *,struct pt_regs *,long);
extern void show_net_buffers(void);
/*
* Define this if things work differently on a i386 and a i486:
* it will (on a i486) warn about kernel memory accesses that are
* done without a 'verify_area(VERIFY_WRITE,..)'
*/
#undef CONFIG_TEST_VERIFY_AREA
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
struct vm_area_struct * vma;
unsigned long address;
unsigned long page;
/* get the address */
__asm__("movl %%cr2,%0":"=r" (address));
for (vma = current->mm->mmap ; ; vma = vma->vm_next) {
if (!vma)
goto bad_area;
if (vma->vm_end > address)
break;
}
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur)
goto bad_area;
vma->vm_offset -= vma->vm_start - (address & PAGE_MASK);
vma->vm_start = (address & PAGE_MASK);
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
if (regs->eflags & VM_MASK) {
unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
if (bit < 32)
current->tss.screen_bitmap |= 1 << bit;
}
if (!(vma->vm_page_prot & PAGE_USER))
goto bad_area;
if (error_code & PAGE_PRESENT) {
if (!(vma->vm_page_prot & (PAGE_RW | PAGE_COW)))
goto bad_area;
#ifdef CONFIG_TEST_VERIFY_AREA
if (regs->cs == KERNEL_CS)
printk("WP fault at %08x\n", regs->eip);
#endif
do_wp_page(vma, address, error_code);
return;
}
do_no_page(vma, address, error_code);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
if (error_code & PAGE_USER) {
current->tss.cr2 = address;
current->tss.error_code = error_code;
current->tss.trap_no = 14;
send_sig(SIGSEGV, current, 1);
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
if (wp_works_ok < 0 && address == TASK_SIZE && (error_code & PAGE_PRESENT)) {
wp_works_ok = 1;
pg0[0] = PAGE_SHARED;
invalidate();
printk("This processor honours the WP bit even when in supervisor mode. Good.\n");
return;
}
if ((unsigned long) (address-TASK_SIZE) < PAGE_SIZE) {
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
pg0[0] = PAGE_SHARED;
} else
printk(KERN_ALERT "Unable to handle kernel paging request");
printk(" at virtual address %08lx\n",address);
__asm__("movl %%cr3,%0" : "=r" (page));
printk(KERN_ALERT "current->tss.cr3 = %08lx, %%cr3 = %08lx\n",
current->tss.cr3, page);
page = ((unsigned long *) page)[address >> 22];
printk(KERN_ALERT "*pde = %08lx\n", page);
if (page & PAGE_PRESENT) {
page &= PAGE_MASK;
address &= 0x003ff000;
page = ((unsigned long *) page)[address >> PAGE_SHIFT];
printk(KERN_ALERT "*pte = %08lx\n", page);
}
die_if_kernel("Oops", regs, error_code);
do_exit(SIGKILL);
}
/*
* BAD_PAGE is the page that is used for page faults when linux
* is out-of-memory. Older versions of linux just did a
* do_exit(), but using this instead means there is less risk
* for a process dying in kernel mode, possibly leaving a inode
* unused etc..
*
* BAD_PAGETABLE is the accompanying page-table: it is initialized
* to point to BAD_PAGE entries.
*
* ZERO_PAGE is a special page that is used for zero-initialized
* data and COW.
*/
unsigned long __bad_pagetable(void)
{
extern char empty_bad_page_table[PAGE_SIZE];
__asm__ __volatile__("cld ; rep ; stosl":
:"a" (BAD_PAGE + PAGE_TABLE),
"D" ((long) empty_bad_page_table),
"c" (PTRS_PER_PAGE)
:"di","cx");
return (unsigned long) empty_bad_page_table;
}
unsigned long __bad_page(void)
{
extern char empty_bad_page[PAGE_SIZE];
__asm__ __volatile__("cld ; rep ; stosl":
:"a" (0),
"D" ((long) empty_bad_page),
"c" (PTRS_PER_PAGE)
:"di","cx");
return (unsigned long) empty_bad_page;
}
unsigned long __zero_page(void)
{
extern char empty_zero_page[PAGE_SIZE];
__asm__ __volatile__("cld ; rep ; stosl":
:"a" (0),
"D" ((long) empty_zero_page),
"c" (PTRS_PER_PAGE)
:"di","cx");
return (unsigned long) empty_zero_page;
}
void show_mem(void)
{
int i,free = 0,total = 0,reserved = 0;
int shared = 0;
printk("Mem-info:\n");
show_free_areas();
printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
i = high_memory >> PAGE_SHIFT;
while (i-- > 0) {
total++;
if (mem_map[i] & MAP_PAGE_RESERVED)
reserved++;
else if (!mem_map[i])
free++;
else
shared += mem_map[i]-1;
}
printk("%d pages of RAM\n",total);
printk("%d free pages\n",free);
printk("%d reserved pages\n",reserved);
printk("%d pages shared\n",shared);
show_buffers();
#ifdef CONFIG_NET
show_net_buffers();
#endif
}
extern unsigned long free_area_init(unsigned long, unsigned long);
/*
* paging_init() sets up the page tables - note that the first 4MB are
* already mapped by head.S.
*
* This routines also unmaps the page at virtual kernel address 0, so
* that we can trap those pesky NULL-reference errors in the kernel.
*/
unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
{
unsigned long * pg_dir;
unsigned long * pg_table;
unsigned long tmp;
unsigned long address;
/*
* Physical page 0 is special; it's not touched by Linux since BIOS
* and SMM (for laptops with [34]86/SL chips) may need it. It is read
* and write protected to detect null pointer references in the
* kernel.
*/
#if 0
memset((void *) 0, 0, PAGE_SIZE);
#endif
start_mem = PAGE_ALIGN(start_mem);
address = 0;
pg_dir = swapper_pg_dir;
while (address < end_mem) {
tmp = *(pg_dir + 768); /* at virtual addr 0xC0000000 */
if (!tmp) {
tmp = start_mem | PAGE_TABLE;
*(pg_dir + 768) = tmp;
start_mem += PAGE_SIZE;
}
*pg_dir = tmp; /* also map it in at 0x0000000 for init */
pg_dir++;
pg_table = (unsigned long *) (tmp & PAGE_MASK);
for (tmp = 0 ; tmp < PTRS_PER_PAGE ; tmp++,pg_table++) {
if (address < end_mem)
*pg_table = address | PAGE_SHARED;
else
*pg_table = 0;
address += PAGE_SIZE;
}
}
invalidate();
return free_area_init(start_mem, end_mem);
}
void mem_init(unsigned long start_low_mem,
unsigned long start_mem, unsigned long end_mem)
{
int codepages = 0;
int reservedpages = 0;
int datapages = 0;
unsigned long tmp;
extern int etext;
end_mem &= PAGE_MASK;
high_memory = end_mem;
/* mark usable pages in the mem_map[] */
start_low_mem = PAGE_ALIGN(start_low_mem);
start_mem = PAGE_ALIGN(start_mem);
/*
* IBM messed up *AGAIN* in their thinkpad: 0xA0000 -> 0x9F000.
* They seem to have done something stupid with the floppy
* controller as well..
*/
while (start_low_mem < 0x9f000) {
mem_map[MAP_NR(start_low_mem)] = 0;
start_low_mem += PAGE_SIZE;
}
while (start_mem < high_memory) {
mem_map[MAP_NR(start_mem)] = 0;
start_mem += PAGE_SIZE;
}
#ifdef CONFIG_SCSI
scsi_mem_init(high_memory);
#endif
#ifdef CONFIG_SOUND
sound_mem_init();
#endif
for (tmp = 0 ; tmp < high_memory ; tmp += PAGE_SIZE) {
if (mem_map[MAP_NR(tmp)]) {
if (tmp >= 0xA0000 && tmp < 0x100000)
reservedpages++;
else if (tmp < (unsigned long) &etext)
codepages++;
else
datapages++;
continue;
}
mem_map[MAP_NR(tmp)] = 1;
free_page(tmp);
}
tmp = nr_free_pages << PAGE_SHIFT;
printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
tmp >> 10,
high_memory >> 10,
codepages << (PAGE_SHIFT-10),
reservedpages << (PAGE_SHIFT-10),
datapages << (PAGE_SHIFT-10));
/* test if the WP bit is honoured in supervisor mode */
wp_works_ok = -1;
pg0[0] = PAGE_READONLY;
invalidate();
__asm__ __volatile__("movb 0,%%al ; movb %%al,0": : :"ax", "memory");
pg0[0] = 0;
invalidate();
if (wp_works_ok < 0)
wp_works_ok = 0;
#ifdef CONFIG_TEST_VERIFY_AREA
wp_works_ok = 0;
#endif
return;
}
void si_meminfo(struct sysinfo *val)
{
int i;
i = high_memory >> PAGE_SHIFT;
val->totalram = 0;
val->sharedram = 0;
val->freeram = nr_free_pages << PAGE_SHIFT;
val->bufferram = buffermem;
while (i-- > 0) {
if (mem_map[i] & MAP_PAGE_RESERVED)
continue;
val->totalram++;
if (!mem_map[i])
continue;
val->sharedram += mem_map[i]-1;
}
val->totalram <<= PAGE_SHIFT;
val->sharedram <<= PAGE_SHIFT;
return;
}
......@@ -735,8 +735,8 @@ static void scrdown(int currcons, unsigned int t, unsigned int b)
if (b > video_num_lines || t >= b)
return;
d = (unsigned short *) origin+video_size_row*b;
s = (unsigned short *) origin+video_size_row*(b-1);
d = (unsigned short *) (origin+video_size_row*b);
s = (unsigned short *) (origin+video_size_row*(b-1));
count = (b-t-1)*video_num_columns;
while (count) {
count--;
......
......@@ -171,6 +171,10 @@ ifdef CONFIG_8390
NETDRV_OBJS := $(NETDRV_OBJS) 8390.o
endif
ifdef CONFIG_ARCNET
NETDRV_OBJS := $(NETDRV_OBJS) arcnet.o
endif
ifdef CONFIG_PI
NETDRV_OBJS := $(NETDRV_OBJS) pi2.o
CONFIG_PI = CONFIG_PI
......
......@@ -283,6 +283,14 @@ static struct device ppp0_dev = {
#define NEXT_DEV (&ppp0_dev)
#endif /* PPP */
#ifdef CONFIG_ARCNET
extern int arcnet_probe(struct device *dev);
static struct device arcnet_dev = {
"arc0", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, arcnet_probe, };
# undef NEXT_DEV
# define NEXT_DEV (&arcnet_dev)
#endif
#ifdef CONFIG_DUMMY
extern int dummy_init(struct device *dev);
static struct device dummy_dev = {
......
/*
arcnet.c written 1994 by Avery Pennarun, derived from skeleton.c
by Donald Becker.
Contact Avery at: apenwarr@tourism.807-city.on.ca or
RR #5 Pole Line Road, Thunder Bay, ON, Canada P7C 5M9
!!! This is a dangerous alpha version !!!
**********************
skeleton.c Written 1993 by Donald Becker.
Copyright 1993 United States Government as represented by the Director,
National Security Agency. This software may only be used and distributed
according to the terms of the GNU Public License as modified by SRC,
incorporated herein by reference.
The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
Center of Excellence in Space Data and Information Sciences
Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
**********************
TO DO:
- Polled transmits probably take a lot more CPU time than needed.
Perhaps use the system timer? A better solution would be to
just figure out how to get both xmit and receive IRQ's working
at the same time. No luck yet...
- I'd also like to get ping-pong TX buffers working.
- Test in systems with NON-ARCnet network cards, just to see if
autoprobe kills anything. With any luck, it won't. (It's pretty
strict and careful.)
- cards with shared memory that can be "turned off?"
- examine TRXNET for information about this
*/
/**************************************************************************/
/* define this for "careful" transmitting. Try with and without if you have
* problems.
*/
#define CAREFUL_XMIT
/* define this for an extra-careful memory detect. This should work all
* the time now, but you never know.
*/
#define STRICT_MEM_DETECT
/* define this to use the "old-style" limited MTU by default. It basically
* disables packet splitting. ifconfig can still be used to reset the MTU.
*
* leave this disabled if possible, so it will use ethernet defaults,
* which is our goal.
*/
#undef LIMIT_MTU
/* define this if you have a problem with the card getting "stuck" now and
* then, which can only be fixed by a reboot or resetting the card manually
* via ifconfig up/down. ARCnet will set a timer function which is called
* 8 times every second.
*
* This should no longer be necessary. if you experience "stuck" ARCnet
* drivers, please email apenwarr@tourism.807-city.on.ca or I will remove
* this feature in a future release.
*/
#undef USE_TIMER_HANDLER
/**************************************************************************/
static char *version =
"arcnet.c:v0.32 ALPHA 94/12/26 Avery Pennarun <apenwarr@tourism.807-city.on.ca>\n";
/* Always include 'config.h' first in case the user wants to turn on
or override something. */
#include <linux/config.h>
/*
Sources:
Crynwr arcnet.com/arcether.com packet drivers.
arcnet.c v0.00 dated 1/1/94 and apparently by
Donald Becker - it didn't work :)
skeleton.c v0.05 dated 11/16/93 by Donald Becker
(from Linux Kernel 1.1.45)
...I sure wish I had the ARCnet data sheets right about now!
RFC's 1201 and 1051 (mostly 1201) - re: ARCnet IP packets
net/inet/eth.c (from kernel 1.1.50) for header-building info...
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/malloc.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <asm/system.h>
#include <asm/bitops.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include "arp.h"
/* debug levels:
* D_OFF production
* D_NORMAL verification
* D_INIT show init/detect messages
* D_DURING show messages during normal use (ie interrupts)
* D_TX show tx packets
* D_RX show tx+rx packets
*/
#define D_OFF 0
#define D_NORMAL 1
#define D_INIT 2
#define D_EXTRA 3
#define D_DURING 4
#define D_TX 5
#define D_RX 6
#ifndef NET_DEBUG
#define NET_DEBUG D_INIT
#endif
static unsigned int net_debug = NET_DEBUG;
#ifndef HAVE_AUTOIRQ
/* From auto_irq.c, in ioport.h for later versions. */
extern void autoirq_setup(int waittime);
extern int autoirq_report(int waittime);
/* The map from IRQ number (as passed to the interrupt handler) to
'struct device'. */
extern struct device *irq2dev_map[16];
#endif
#ifndef HAVE_PORTRESERVE
#define check_region(ioaddr, size) 0
#define snarf_region(ioaddr, size); do ; while (0)
#endif
/* macro to simplify debug checking */
#define BUGLVL(x) if (net_debug>=x)
/* The number of low I/O ports used by the ethercard. */
#define ETHERCARD_TOTAL_SIZE 16
/* Handy defines for ARCnet specific stuff */
/* COM 9026 (?) --> ARCnet register addresses */
#define INTMASK (ioaddr+0) /* writable */
#define STATUS (ioaddr+0) /* readable */
#define COMMAND (ioaddr+1) /* writable, returns random vals on read (?) */
#define RESET (ioaddr+8) /* software reset writable */
/* time needed for various things (in clock ticks, 1/100 sec) */
#define RESETtime 40 /* reset */
#define XMITtime 10 /* send */
#define ACKtime 10 /* acknowledge */
/* these are the max/min lengths of packet data. (including
* ClientData header)
* note: packet sizes 250, 251, 252 are impossible (God knows why)
* so exception packets become necessary.
*
* These numbers are compared with the length of the full packet,
* including ClientData header.
*/
#define MTU (253+EXTRA_CLIENTDATA) /* normal packet max size */
#define MinTU (257+EXTRA_CLIENTDATA) /* extended packet min size */
#define XMTU (508+EXTRA_CLIENTDATA) /* extended packet max size */
/* status/interrupt mask bit fields */
#define TXFREEflag 0x001 /* transmitter available */
#define TXACKflag 0x002 /* transmitted msg. ackd */
#define RECONflag 0x004 /* system reconfigured */
#define TESTflag 0x008 /* test flag */
#define RESETflag 0x010 /* power-on-reset */
#define RES1flag 0x020 /* unused */
#define RES2flag 0x040 /* unused */
#define NORXflag 0x080 /* receiver inhibited */
/* in the command register, the following bits have these meanings:
* 0-2 command
* 3-4 page number (for enable rcv/xmt command)
* 7 receive broadcasts
*/
#define NOTXcmd 0x001 /* disable transmitter */
#define NORXcmd 0x002 /* disable receiver */
#define TXcmd 0x003 /* enable transmitter */
#define RXcmd 0x004 /* enable receiver */
#define CONFIGcmd 0x005 /* define configuration */
#define CFLAGScmd 0x006 /* clear flags */
#define TESTcmd 0x007 /* load test flags */
/* flags for "clear flags" command */
#define RESETclear 0x008 /* power-on-reset */
#define CONFIGclear 0x010 /* system reconfigured */
/* flags for "load test flags" command */
#define TESTload 0x008 /* test flag (diagnostic) */
/* byte deposited into first address of buffers on reset */
#define TESTvalue 0321 /* that's octal for 0xD1 :) */
/* for "enable receiver" command */
#define RXbcasts 0x080 /* receive broadcasts */
/* flags for "define configuration" command */
#define NORMALconf 0x000 /* 1-249 byte packets */
#define EXTconf 0x008 /* 250-504 byte packets */
/* buffers (4 total) used for receive and xmit.
*/
#define EnableReceiver() outb(RXcmd|(recbuf<<3)|RXbcasts,COMMAND)
#define TXbuf 2
/* Protocol ID's */
#define ARC_P_IP 212 /* 0xD4 */
#define ARC_P_ARP 213 /* 0xD5 */
#define ARC_P_RARP 214 /* 0xD6 */
/* Length of time between "stuck" checks */
#define TIMERval (HZ/8) /* about 1/8 second */
/* these structures define the format of an arcnet packet. */
#define NORMAL 0
#define EXTENDED 1
#define EXCEPTION 2
/* the header required by the card itself */
struct HardHeader
{
u_char source, /* source ARCnet - filled in automagically */
destination, /* destination ARCnet - 0 for broadcast */
offset1, /* offset of ClientData (256-byte packets) */
offset2; /* offset of ClientData (512-byte packets) */
};
/* a complete ARCnet packet */
union ArcPacket
{
struct HardHeader hardheader; /* the hardware header */
u_char raw[512]; /* raw packet info, incl ClientData */
};
/* the "client data" header - RFC-1201 information
* notice that this screws up if it's not an even number of bytes
* <sigh>
*/
struct ClientData
{
/* data that's NOT part of real packet */
u_char daddr; /* Destination address - stored here,
* but WE MUST GET RID OF IT BEFORE SENDING A
* PACKET!!
*/
u_char stupid; /* filler to make struct an even # of bytes */
/* data that IS part of real packet */
u_char protocol_id, /* ARC_P_IP, ARC_P_ARP, or ARC_P_RARP */
split_flag; /* for use with split packets */
u_short sequence; /* sequence number (?) */
};
#define EXTRA_CLIENTDATA (sizeof(struct ClientData)-4)
/* "Incoming" is information needed for each address that could be sending
* to us. Mostly for partially-received split packets.
*/
struct Incoming
{
struct sk_buff *skb; /* packet data buffer */
unsigned char lastpacket, /* number of last packet (from 1) */
numpackets; /* number of packets in split */
u_short sequence; /* sequence number of assembly */
};
/* Information that needs to be kept for each board. */
struct arcnet_local {
struct enet_statistics stats;
u_char arcnum; /* arcnet number - our 8-bit address */
u_short sequence; /* sequence number (incs with each packet) */
u_char recbuf; /* receive buffer # (0 or 1) */
int intx; /* in TX routine? */
struct timer_list timer; /* the timer interrupt struct */
struct Incoming incoming[256]; /* one from each address */
};
/* Index to functions, as function prototypes. */
extern int arcnet_probe(struct device *dev);
static int arcnet_memprobe(struct device *dev,u_char *addr);
static int arcnet_ioprobe(struct device *dev, short ioaddr);
static int arcnet_open(struct device *dev);
static int arcnet_close(struct device *dev);
static int arcnet_send_packet(struct sk_buff *skb, struct device *dev);
static void careful_xmit_wait(struct device *dev);
static int arcnet_tx(struct device *dev,struct ClientData *hdr,short length,
char *data);
static void arcnet_interrupt(int reg_ptr);
static void arcnet_inthandler(struct device *dev);
static void arcnet_rx(struct device *dev,int recbuf);
static void arcnet_timer(unsigned long arg);
static struct enet_statistics *arcnet_get_stats(struct device *dev);
static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
/* annoying functions for header/arp/etc building */
int arc_header(unsigned char *buff,struct device *dev,unsigned short type,
void *daddr,void *saddr,unsigned len,struct sk_buff *skb);
int arc_rebuild_header(void *eth,struct device *dev,unsigned long raddr,
struct sk_buff *skb);
unsigned short arc_type_trans(struct sk_buff *skb,struct device *dev);
#define tx_done(dev) 1
#define JIFFER(time) for (delayval=jiffies+(time); delayval>=jiffies;);
static int arcnet_reset(struct device *dev);
/* Check for a network adaptor of this type, and return '0' if one exists.
* If dev->base_addr == 0, probe all likely locations.
* If dev->base_addr == 1, always return failure.
* If dev->base_addr == 2, allocate space for the device and return success
* (detachable devices only).
*/
int
arcnet_probe(struct device *dev)
{
/* I refuse to probe anything less than 0x200, because anyone using
* an address like that should probably be shot.
*/
int *port, ports[] = {/* first the suggested values! */
0x300,0x2E0,0x2F0,0x2D0,
/* ...now everything else possible. */
0x200,0x210,0x220,0x230,0x240,0x250,0x260,0x270,
0x280,0x290,0x2a0,0x2b0,0x2c0,
0x310,0x320,0x330,0x340,0x350,0x360,0x370,
0x380,0x390,0x3a0,0x3b0,0x3c0,0x3d0,0x3e0,0x3f0,
/* a null ends the list */
0};
/* I'm not going to probe under 0xA0000 either, for similar reasons.
*/
unsigned long *addr, addrs[] = {0xD0000,0xE0000,0xA0000,0xB0000,
0xC0000,0xF0000,
/* from <mdrejhon@magi.com> */
0xE1000,
0xDD000,0xDC000,
0xD9000,0xD8000,0xD5000,0xD4000,0xD1000,
0xCD000,0xCC000,
0xC9000,0xC8000,0xC5000,0xC4000,
/* terminator */
0};
int base_addr=dev->base_addr, status=0,delayval;
struct arcnet_local *lp;
if (net_debug) printk(version);
BUGLVL(D_INIT)
printk("arcnet: given: base %Xh, IRQ %Xh, shmem %lXh\n",
dev->base_addr,dev->irq,dev->mem_start);
if (base_addr > 0x1ff) /* Check a single specified location. */
status=arcnet_ioprobe(dev, base_addr);
else if (base_addr > 0) /* Don't probe at all. */
return ENXIO;
else for (port = &ports[0]; *port; port++)
{
int ioaddr = *port;
if (check_region(ioaddr, ETHERCARD_TOTAL_SIZE))
{
BUGLVL(D_INIT)
printk("arcnet: Skipping %Xh because of check_region...\n",
ioaddr);
continue;
}
status=arcnet_ioprobe(dev, ioaddr);
if (!status) break;
}
if (status) return status;
/* ioprobe turned out okay. Now give it a couple seconds to finish
* initializing...
*/
BUGLVL(D_INIT)
printk("arcnet: ioprobe okay! Waiting for reset...\n");
JIFFER(100);
/* okay, now we have to find the shared memory area. */
BUGLVL(D_INIT)
printk("arcnet: starting memory probe, given %lXh\n",
dev->mem_start);
if (dev->mem_start) /* value given - probe just that one */
{
status=arcnet_memprobe(dev,(u_char *)dev->mem_start);
if (status) return status;
}
else /* no value given - probe everything */
{
for (addr = &addrs[0]; *addr; addr++) {
status=arcnet_memprobe(dev,(u_char *)(*addr));
if (!status) break;
}
if (status) return status;
}
/* now reserve the irq... */
{ int irqval = request_irq(dev->irq, &arcnet_interrupt, 0, "arcnet");
if (irqval) {
printk("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
dev->irq, irqval);
return EAGAIN;
}
}
/* Grab the region so we can find another board if autoIRQ fails. */
snarf_region(dev->base_addr, ETHERCARD_TOTAL_SIZE);
printk("%s: ARCnet card found at %03Xh, IRQ %d, ShMem at %lXh.\n", dev->name,
dev->base_addr, dev->irq, dev->mem_start);
/* Initialize the device structure. */
dev->priv = kmalloc(sizeof(struct arcnet_local), GFP_KERNEL);
memset(dev->priv, 0, sizeof(struct arcnet_local));
lp=(struct arcnet_local *)(dev->priv);
dev->open = arcnet_open;
dev->stop = arcnet_close;
dev->hard_start_xmit = arcnet_send_packet;
dev->get_stats = arcnet_get_stats;
#ifdef HAVE_MULTICAST
dev->set_multicast_list = &set_multicast_list;
#endif
/* Fill in the fields of the device structure with ethernet-generic values. */
ether_setup(dev);
/* And now fill particular ones with arcnet values :) */
dev->type=ARPHRD_ARCNET;
dev->hard_header_len=sizeof(struct ClientData);
BUGLVL(D_EXTRA)
printk("arcnet: ClientData header size is %d.\narcnet: HardHeader size is %d.\n",
sizeof(struct ClientData),sizeof(struct HardHeader));
#if LIMIT_MTU /* the old way - normally, now use ethernet default */
dev->mtu=512-sizeof(struct HardHeader)+EXTRA_CLIENTDATA;
#endif
/* since we strip EXTRA_CLIENTDATA bytes off before sending,
* we let Linux add that many bytes to the packet data...
*/
dev->addr_len=1;
dev->broadcast[0]=0x00;
BUGLVL(D_INIT) printk("arcnet: arcnet_probe: resetting card.\n");
arcnet_reset(dev);
JIFFER(50);
BUGLVL(D_NORMAL)
printk("arcnet: We appear to be station %d (%02Xh)\n",
lp->arcnum,lp->arcnum);
if (lp->arcnum==0)
printk("arcnet: WARNING! Station address 0 is reserved for broadcasts!\n");
if (lp->arcnum==255)
printk("arcnet: WARNING! Station address 255 may confuse DOS networking programs!\n");
dev->dev_addr[0]=lp->arcnum;
lp->sequence=1;
lp->recbuf=0;
dev->hard_header = arc_header;
/* dev->add_arp = arc_add_arp; AVE unavailable in 1.1.51?! */
dev->rebuild_header = arc_rebuild_header;
dev->type_trans = arc_type_trans;
return 0;
}
int arcnet_ioprobe(struct device *dev, short ioaddr)
{
int delayval,airq;
BUGLVL(D_INIT)
printk("arcnet: probing address %Xh\n",ioaddr);
BUGLVL(D_INIT)
printk("arcnet: status1=%Xh\n",inb(STATUS));
/* very simple - all we have to do is reset the card, and if there's
* no irq, it's not an ARCnet. We can also kill two birds with
* one stone because we detect the IRQ at the same time :)
*/
/* reset the card by reading the reset port */
inb(RESET);
JIFFER(RESETtime);
/* if status port is FF, there's certainly no arcnet... give up. */
if (inb(STATUS)==0xFF)
{
BUGLVL(D_INIT)
printk("arcnet: probe failed. Status port empty.\n");
return ENODEV;
}
/* we'll try to be reasonably sure it's an arcnet by making sure
* the value of the COMMAND port changes automatically once in a
* while. I have no idea what those values ARE, but at least
* they work.
*/
{
int initval,curval;
curval=initval=inb(COMMAND);
delayval=jiffies+5;
while (delayval>=jiffies && curval==initval)
curval=inb(COMMAND);
if (curval==initval)
{
printk("arcnet: probe failed. never-changing command port (%02Xh).\n",
initval);
return ENODEV;
}
}
BUGLVL(D_INIT)
printk("arcnet: status2=%Xh\n",inb(STATUS));
/* now we turn the reset bit off so we can IRQ next reset... */
outb(CFLAGScmd|RESETclear|CONFIGclear,COMMAND);
JIFFER(ACKtime);
if (inb(STATUS) & RESETflag) /* reset flag STILL on */
{
BUGLVL(D_INIT)
printk("arcnet: probe failed. eternal reset flag1...(status=%Xh)\n",
inb(STATUS));
return ENODEV;
}
/* set up automatic IRQ detection */
autoirq_setup(0);
/* enable reset IRQ's (shouldn't be necessary, but hey) */
outb(RESETflag,INTMASK);
/* now reset it again to generate an IRQ */
inb(RESET);
JIFFER(RESETtime);
BUGLVL(D_INIT)
printk("arcnet: status3=%Xh\n",inb(STATUS));
/* enable reset IRQ's again */
outb(RESETflag,INTMASK);
/* and turn the reset flag back off */
outb(CFLAGScmd|RESETclear|CONFIGclear,COMMAND);
JIFFER(ACKtime);
BUGLVL(D_INIT)
printk("arcnet: status4=%Xh\n",inb(STATUS));
/* now reset it again to generate an IRQ */
inb(RESET);
JIFFER(RESETtime);
BUGLVL(D_INIT)
printk("arcnet: status5=%Xh\n",inb(STATUS));
airq = autoirq_report(0);
if (net_debug>=D_INIT && airq)
printk("arcnet: autoirq is %d\n", airq);
/* if there was no autoirq AND the user hasn't set any defaults,
* give up.
*/
if (!airq && !(dev->base_addr && dev->irq))
{
BUGLVL(D_INIT)
printk("arcnet: probe failed. no autoirq...\n");
return ENODEV;
}
/* otherwise we probably have a card. Let's make sure. */
if (inb(STATUS) & RESETflag) /* reset flag on */
{
/* now we turn the reset bit off */
outb(CFLAGScmd|RESETclear|CONFIGclear,COMMAND);
JIFFER(ACKtime);
}
if (inb(STATUS) & RESETflag) /* reset flag STILL on */
{
BUGLVL(D_INIT)
printk("arcnet: probe failed. eternal reset flag...(status=%Xh)\n",
inb(STATUS));
return ENODEV;
}
/* okay, we've got a real, live ARCnet on our hands. */
if (!dev->base_addr) dev->base_addr=ioaddr;
if (dev->irq < 2) /* "Auto-IRQ" */
{
/* we already did the autoirq above, so store the values */
dev->irq=airq;
}
else if (dev->irq == 2)
{
if (net_debug)
printk("arcnet: IRQ2 == IRQ9, don't worry.\n");
dev->irq = 9;
}
BUGLVL(D_INIT)
printk("arcnet: irq and base address seem okay. (%Xh, IRQ %d)\n",
dev->base_addr,dev->irq);
return 0;
}
/* A memory probe that is called after the card is reset.
* It checks for the official TESTvalue in byte 0 and makes sure the buffer
* has certain characteristics of an ARCnet...
*/
int arcnet_memprobe(struct device *dev,u_char *addr)
{
BUGLVL(D_INIT)
printk("arcnet: probing memory at %lXh\n",(u_long)addr);
dev->mem_start=0;
#ifdef STRICT_MEM_DETECT /* probably better. */
/* ARCnet memory byte 0 is TESTvalue */
if (addr[0]!=TESTvalue)
{
BUGLVL(D_INIT)
printk("arcnet: probe failed. addr=%lXh, addr[0]=%Xh (not %Xh)\n",
(unsigned long)addr,addr[0],TESTvalue);
return ENODEV;
}
/* now verify the shared memory writability */
addr[0]=0x42;
if (addr[0]!=0x42)
{
BUGLVL(D_INIT)
printk("arcnet: probe failed. addr=%lXh, addr[0]=%Xh (not 42h)\n",
(unsigned long)addr,addr[0]);
return ENODEV;
}
#else
if (addr[0]!=TESTvalue)
{
BUGLVL(D_INIT)
printk("arcnet: probe failed. addr=%lXh, addr[0]=%Xh (not %Xh)\n",
(unsigned long)addr,addr[0],TESTvalue);
return ENODEV;
}
#endif
/* got it! fill in dev */
dev->mem_start=(unsigned long)addr;
dev->mem_end=dev->mem_start+512*4-1;
dev->rmem_start=dev->mem_start+512*0;
dev->rmem_end=dev->mem_start+512*2-1;
return 0;
}
/* Open/initialize the board. This is called (in the current kernel)
sometime after booting when the 'ifconfig' program is run.
This routine should set everything up anew at each open, even
registers that "should" only need to be set once at boot, so that
there is non-reboot way to recover if something goes wrong.
*/
static int
arcnet_open(struct device *dev)
{
struct arcnet_local *lp = (struct arcnet_local *)dev->priv;
/* int ioaddr = dev->base_addr;*/
if (net_debug) printk(version);
#if 0 /* Yup, they're hardwired in arcnets */
/* This is used if the interrupt line can turned off (shared).
See 3c503.c for an example of selecting the IRQ at config-time. */
if (request_irq(dev->irq, &arcnet_interrupt, 0, "arcnet")) {
return -EAGAIN;
}
#endif
irq2dev_map[dev->irq] = dev;
/* Reset the hardware here. */
BUGLVL(D_EXTRA) printk("arcnet: arcnet_open: resetting card.\n");
if (arcnet_reset(dev)) return -ENODEV;
/* chipset_init(dev, 1);*/
/* outb(0x00, ioaddr);*/
/* lp->open_time = jiffies;*/
dev->tbusy = 0;
dev->interrupt = 0;
dev->start = 1;
/* grab a timer handler to recover from any missed IRQ's */
init_timer(&lp->timer);
lp->timer.expires = TIMERval; /* length of time */
lp->timer.data = (unsigned long)dev; /* pointer to "dev" structure */
lp->timer.function = &arcnet_timer; /* timer handler */
#ifdef USE_TIMER_HANDLER
add_timer(&lp->timer);
#endif
return 0;
}
/* The inverse routine to arcnet_open(). */
static int
arcnet_close(struct device *dev)
{
struct arcnet_local *lp = (struct arcnet_local *)dev->priv;
int ioaddr = dev->base_addr, delayval;
/* lp->open_time = 0;*/
dev->tbusy = 1;
dev->start = 0;
/* release the timer */
del_timer(&lp->timer);
/* Flush the Tx and disable Rx here. */
/* resetting the card should do the job. */
/*inb(RESET);*/
outb(0,INTMASK); /* no IRQ's */
outb(NOTXcmd,COMMAND); /* disable transmit */
JIFFER(ACKtime);
outb(NORXcmd,COMMAND); /* disable receive */
#if 0 /* we better not do this - hard wired IRQ's */
/* If not IRQ jumpered, free up the line. */
outw(0x00, ioaddr+0); /* Release the physical interrupt line. */
free_irq(dev->irq);
irq2dev_map[dev->irq] = 0;
#endif
/* Update the statistics here. */
return 0;
}
static int
arcnet_send_packet(struct sk_buff *skb, struct device *dev)
{
struct arcnet_local *lp = (struct arcnet_local *)dev->priv;
int ioaddr=dev->base_addr,stat=0;
/* short daddr;*/
lp->intx++;
BUGLVL(D_DURING)
printk("arcnet: transmit requested (status=%Xh, inTX=%d)\n",
inb(STATUS),lp->intx);
if (dev->tbusy)
{
/* If we get here, some higher level has decided we are broken.
There should really be a "kick me" function call instead. */
int tickssofar = jiffies - dev->trans_start;
int recbuf=lp->recbuf;
int status=inb(STATUS);
if (tickssofar < 5) return 1;
BUGLVL(D_INIT)
printk("arcnet: transmit timed out (status=%Xh, inTX=%d, tickssofar=%d)\n",
status,lp->intx,tickssofar);
/* Try to restart the adaptor. */
/*arcnet_reset(dev);*/
if (status&NORXflag) EnableReceiver();
if (!(status&TXFREEflag)) outb(NOTXcmd,COMMAND);
dev->tbusy=0;
mark_bh(NET_BH);
dev->trans_start = jiffies;
lp->intx--;
return 1;
}
/* If some higher layer thinks we've missed a tx-done interrupt
we are passed NULL. Caution: dev_tint() handles the cli()/sti()
itself. */
if (skb == NULL) {
BUGLVL(D_INIT)
printk("arcnet: tx passed null skb (status=%Xh, inTX=%d, tickssofar=%ld)\n",
inb(STATUS),lp->intx,jiffies-dev->trans_start);
dev_tint(dev);
lp->intx--;
return 0;
}
/* Block a timer-based transmit from overlapping. This could better be
done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
if (set_bit(0, (void*)&dev->tbusy) != 0)
{
printk("arcnet: Transmitter called with busy bit set! (status=%Xh, inTX=%d, tickssofar=%ld)\n",
inb(STATUS),lp->intx,jiffies-dev->trans_start);
stat=-EBUSY;
}
else {
short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
struct ClientData *hdr=(struct ClientData*)skb->data;
if (length<=XMTU) /* fits in one packet? */
{
BUGLVL(D_TX) printk("arcnet: not splitting %d-byte packet. (split_flag=%d)\n",
length,hdr->split_flag);
BUGLVL(D_INIT) if (hdr->split_flag)
printk("arcnet: short packet has split_flag set?! (split_flag=%d)\n",
hdr->split_flag);
stat=arcnet_tx(dev,hdr,
length-sizeof(struct ClientData),
((char *)skb->data)+sizeof(struct ClientData));
}
else /* too big for one - split it */
{
u_char *data=(u_char *)skb->data
+ sizeof(struct ClientData);
int dataleft=length-sizeof(struct ClientData),
maxsegsize=XMTU-sizeof(struct ClientData),
numsegs=(dataleft+maxsegsize-1)/maxsegsize,
seglen,segnum=0;
BUGLVL(D_TX) printk("arcnet: packet (%d bytes) split into %d fragments:\n",
length,numsegs);
while (!stat && dataleft)
{
if (!segnum) /* first packet */
hdr->split_flag=((numsegs-2)<<1)+1;
else
hdr->split_flag=segnum<<1;
seglen=maxsegsize;
if (seglen>dataleft) seglen=dataleft;
BUGLVL(D_TX) printk("arcnet: packet #%d (%d bytes) of %d (%d total), splitflag=%d\n",
segnum+1,seglen,numsegs,length,hdr->split_flag);
stat=arcnet_tx(dev,hdr,seglen,data);
dataleft-=seglen;
data+=seglen;
segnum++;
#if 0 /* sequence # should not update here... I think! */
/* sequence number goes up on each packet */
hdr->sequence++;
lp->sequence++;
#endif
}
}
/* I don't know if this should be in or out of these braces,
* but freeing it too often seems worse than too little.
* (maybe?) (v0.30)
*/
if (!stat) dev_kfree_skb(skb, FREE_WRITE);
/* we're done now */
if (stat!=-EBUSY)
{
dev->tbusy=0;
mark_bh(NET_BH); /* Inform upper layers. */
/* this should be on an IRQ, but can't
* because ARCnets (at least mine) are stupid.
*/
}
}
lp->intx--;
if (!stat) lp->stats.tx_packets++;
return stat;
}
#ifdef CAREFUL_XMIT
static void careful_xmit_wait(struct device *dev)
{
int ioaddr=dev->base_addr;
struct arcnet_local *lp = (struct arcnet_local *)dev->priv;
/* wait patiently for tx to become available again */
while ( !(inb(STATUS)&TXFREEflag) )
{
if (jiffies-dev->trans_start > 20 || !dev->tbusy)
{
BUGLVL(D_INIT)
printk("arcnet: CAREFUL_XMIT timeout. (busy=%d, status=%Xh)\n",
dev->tbusy,inb(STATUS));
lp->stats.tx_errors++;
outb(NOTXcmd,COMMAND);
return;
}
}
BUGLVL(D_TX) printk("arcnet: transmit completed successfully. (status=%Xh)\n",
inb(STATUS));
}
#endif
static int
arcnet_tx(struct device *dev,struct ClientData *hdr,short length,
char *data)
{
int ioaddr = dev->base_addr;
#if 0
struct arcnet_local *lp = (struct arcnet_local *)dev->priv;
#endif
struct ClientData *arcsoft;
union ArcPacket *arcpacket =
(union ArcPacket *)(dev->mem_start+512*TXbuf);
u_char pkttype;
int offset;
short daddr;
length+=sizeof(struct ClientData);
BUGLVL(D_TX)
printk("arcnet: arcnet_tx: hdr:%ph, length:%d, data:%ph\n",
hdr,length,data);
#if 0
/* make sure transmitter is available before sending */
if (! (inb(STATUS) & TXFREEflag))
{
BUGLVL(D_TX)
printk("arcnet: transmitter in use! (status=%Xh)\n",
inb(STATUS));
return -EBUSY;
}
#endif
/* <blah> Gruesome hack because tx+rx irq's don't work at
* the same time (or so it seems to me)
*
* Our transmits just won't be interrupt driven, I guess. (ugh)
*/
#ifdef CAREFUL_XMIT
careful_xmit_wait(dev);
#endif
/* clean out the page to make debugging make more sense :) */
BUGLVL(D_DURING)
memset((void *)dev->mem_start+TXbuf*512,0x42,512);
daddr=arcpacket->hardheader.destination=hdr->daddr;
/* load packet into shared memory */
if (length<=MTU) /* Normal (256-byte) Packet */
{
pkttype=NORMAL;
arcpacket->hardheader.offset1=offset=256-length
+ EXTRA_CLIENTDATA;
arcsoft=(struct ClientData *)
(&arcpacket->raw[offset-EXTRA_CLIENTDATA]);
}
else if (length>=MinTU) /* Extended (512-byte) Packet */
{
pkttype=EXTENDED;
arcpacket->hardheader.offset1=0;
arcpacket->hardheader.offset2=offset=512-length
+ EXTRA_CLIENTDATA;
arcsoft=(struct ClientData *)
(&arcpacket->raw[offset-EXTRA_CLIENTDATA]);
}
else /* Exception Packet */
{
pkttype=EXCEPTION;
arcpacket->hardheader.offset1=0;
arcpacket->hardheader.offset2=offset=512-length-4
+ EXTRA_CLIENTDATA;
arcsoft=(struct ClientData *)
(&arcpacket->raw[offset+4-EXTRA_CLIENTDATA]);
/* exception-specific stuff - these four bytes
* make the packet long enough to fit in a 512-byte
* frame.
*/
arcpacket->raw[offset+0]=arcsoft->protocol_id;
arcpacket->raw[offset+1]=0xFF; /* FF flag */
arcpacket->raw[offset+2]=0xFF; /* FF padding */
arcpacket->raw[offset+3]=0xFF; /* FF padding */
}
/* copy the packet into ARCnet shmem
* - the first bytes of ClientData header are skipped
*/
memcpy((u_char*)arcsoft+EXTRA_CLIENTDATA,
(u_char*)hdr+EXTRA_CLIENTDATA,
sizeof(struct ClientData)-EXTRA_CLIENTDATA);
memcpy((u_char*)arcsoft+sizeof(struct ClientData),
data,
length-sizeof(struct ClientData));
BUGLVL(D_DURING) printk("arcnet: transmitting packet to station %02Xh (%d bytes, type=%d)\n",
daddr,length,pkttype);
BUGLVL(D_TX)
{
int countx,county;
printk("arcnet: packet dump [tx] follows:");
for (county=0; county<16+(pkttype!=NORMAL)*16; county++)
{
printk("\n[%04X] ",county*16);
for (countx=0; countx<16; countx++)
printk("%02X ",
arcpacket->raw[county*16+countx]);
}
printk("\n");
}
/* start sending */
outb(TXcmd|(TXbuf<<3),COMMAND);
dev->trans_start = jiffies;
BUGLVL(D_TX) printk("arcnet: transmit started successfully. (status=%Xh)\n",
inb(STATUS));
#ifdef CAREFUL_XMIT
#if 0
careful_xmit_wait(dev);
/* if we're not broadcasting, make sure the xmit was ack'd.
* if it wasn't, there is probably no card with that
* address... or else it missed our tx somehow.
*/
if (daddr && !(inb(STATUS)&TXACKflag))
{
BUGLVL(D_INIT)
printk("arcnet: transmit not acknowledged. (status=%Xh, daddr=%02Xh)\n",
inb(STATUS),daddr);
lp->stats.tx_errors++;
return -ENONET; /* "machine is not on the network" */
}
#endif
#endif
return 0;
}
/* The typical workload of the driver:
Handle the network interface interrupts. */
static void
arcnet_interrupt(int reg_ptr)
{
int irq = -(((struct pt_regs *)reg_ptr)->orig_eax+2);
struct device *dev = (struct device *)(irq2dev_map[irq]);
if (dev == NULL) {
if (net_debug >= D_DURING)
printk("arcnet: irq %d for unknown device.\n", irq);
return;
}
arcnet_inthandler(dev);
}
static void
arcnet_inthandler(struct device *dev)
{
struct arcnet_local *lp;
int ioaddr, status, boguscount = 20;
dev->interrupt = 1;
ioaddr = dev->base_addr;
lp = (struct arcnet_local *)dev->priv;
BUGLVL(D_DURING)
printk("arcnet: in net_interrupt (status=%Xh)\n",inb(STATUS));
do
{
status = inb(STATUS);
if (!dev->start)
{
BUGLVL(D_EXTRA)
printk("arcnet: ARCnet not yet initialized. irq ignored. (status=%Xh)\n",
inb(STATUS));
break;
}
/* RESET flag was enabled - card is resetting and if RX
* is disabled, it's NOT because we just got a packet.
*/
if (status & RESETflag)
{
BUGLVL(D_INIT)
printk("arcnet: reset irq (status=%Xh)\n",
status);
break;
}
#if 1 /* yes, it's silly to disable this part but it makes good testing */
/* RX is inhibited - we must have received something. */
if (status & NORXflag)
{
int recbuf=lp->recbuf=!lp->recbuf;
BUGLVL(D_DURING)
printk("arcnet: receive irq (status=%Xh)\n",
status);
/* enable receive of our next packet */
EnableReceiver();
/* Got a packet. */
arcnet_rx(dev,!recbuf);
}
#endif
#if 0 /* this doesn't actually work, and will now zonk everything. leave
* disabled until I fix it.
*/
/* it can only be a xmit-done irq if we're xmitting :) */
else if (dev->tbusy && status&TXFREEflag)
{
BUGLVL(D_DURING)
printk("arcnet: transmit IRQ?!? (status=%Xh)\n",
status);
/*lp->stats.tx_packets++;*/
dev->tbusy = 0;
mark_bh(NET_BH); /* Inform upper layers. */
break;
}
else
break;
#endif
#if 0
break; /* delete me */
#endif
} while (--boguscount);
BUGLVL(D_DURING)
printk("arcnet: net_interrupt complete (status=%Xh)\n",
inb(STATUS));
dev->interrupt=0;
return;
}
/* A packet has arrived; grab it from the buffers and possibly unsplit it.
*/
static void
arcnet_rx(struct device *dev,int recbuf)
{
struct arcnet_local *lp = (struct arcnet_local *)dev->priv;
int ioaddr = dev->base_addr;
/* int status = inb(STATUS);*/
struct sk_buff *skb;
union ArcPacket *arcpacket=
(union ArcPacket *)(dev->mem_start+recbuf*512);
struct ClientData *soft,*arcsoft;
short length,offset;
u_char pkttype,daddr,saddr;
daddr=arcpacket->hardheader.destination;
saddr=arcpacket->hardheader.source;
/* if source is 0, it's not a "used" packet! */
if (saddr==0)
{
/*BUGLVL(D_DURING)*/
printk("arcnet: discarding old packet. (status=%Xh)\n",
inb(STATUS));
lp->stats.rx_errors++;
return;
}
arcpacket->hardheader.source=0;
if (arcpacket->hardheader.offset1) /* Normal Packet */
{
offset=arcpacket->hardheader.offset1;
arcsoft=(struct ClientData *)
(&arcpacket->raw[offset-EXTRA_CLIENTDATA]);
length=256-offset+EXTRA_CLIENTDATA;
pkttype=NORMAL;
}
else /* ExtendedPacket or ExceptionPacket */
{
offset=arcpacket->hardheader.offset2;
arcsoft=(struct ClientData *)
(&arcpacket->raw[offset-EXTRA_CLIENTDATA]);
if (arcsoft->split_flag!=0xFF) /* Extended Packet */
{
length=512-offset+EXTRA_CLIENTDATA;
pkttype=EXTENDED;
}
else /* Exception Packet */
{
/* skip over 4-byte junkola */
arcsoft=(struct ClientData *)
((u_char *)arcsoft + 4);
length=512-offset+EXTRA_CLIENTDATA-4;
pkttype=EXCEPTION;
}
}
if (!arcsoft->split_flag) /* not split */
{
struct Incoming *in=&lp->incoming[saddr];
BUGLVL(D_RX) printk("arcnet: incoming is not split (splitflag=%d)\n",
arcsoft->split_flag);
if (in->skb) /* already assembling one! */
{
BUGLVL(D_INIT) printk("arcnet: aborting assembly (seq=%d) for unsplit packet (splitflag=%d, seq=%d)\n",
in->sequence,arcsoft->split_flag,
arcsoft->sequence);
kfree_skb(in->skb,FREE_WRITE);
in->skb=NULL;
}
in->sequence=arcsoft->sequence;
skb = alloc_skb(length, GFP_ATOMIC);
if (skb == NULL) {
printk("%s: Memory squeeze, dropping packet.\n",
dev->name);
lp->stats.rx_dropped++;
return;
}
soft=(struct ClientData *)skb->data;
skb->len = length;
skb->dev = dev;
memcpy((u_char *)soft+EXTRA_CLIENTDATA,
(u_char *)arcsoft+EXTRA_CLIENTDATA,
length-EXTRA_CLIENTDATA);
soft->daddr=daddr;
BUGLVL(D_DURING)
printk("arcnet: received packet from %02Xh to %02Xh (%d bytes, type=%d)\n",
saddr,daddr,length,pkttype);
BUGLVL(D_RX)
{
int countx,county;
printk("arcnet: packet dump [rx-unsplit] follows:");
for (county=0; county<16+(pkttype!=NORMAL)*16; county++)
{
printk("\n[%04X] ",county*16);
for (countx=0; countx<16; countx++)
printk("%02X ",
arcpacket->raw[county*16+countx]);
}
printk("\n");
}
/* ARP packets have problems when sent from DOS.
* source address is always 0! So we take the hardware
* source addr (which is impossible to fumble) and insert
* it ourselves.
*/
if (soft->protocol_id == ARC_P_ARP)
{
struct arphdr *arp=(struct arphdr *)
((char *)soft+sizeof(struct ClientData));
/* make sure addresses are the right length */
if (arp->ar_hln==1 && arp->ar_pln==4)
{
char *cptr=(char *)(arp)+sizeof(struct arphdr);
if (!*cptr) /* is saddr = 00? */
{
BUGLVL(D_DURING)
printk("arcnet: ARP source address was 00h, set to %02Xh.\n",
saddr);
*cptr=saddr;
}
else BUGLVL(D_DURING)
{
printk("arcnet: ARP source address (%Xh) is fine.\n",
*cptr);
}
}
else
{
printk("arcnet: funny-shaped ARP packet. (%Xh, %Xh)\n",
arp->ar_hln,arp->ar_pln);
}
}
netif_rx(skb);
lp->stats.rx_packets++;
}
else /* split packet */
{
/* NOTE: MSDOS ARP packet correction should only need to
* apply to unsplit packets, since ARP packets are so short.
*
* My interpretation of the RFC1201 (ARCnet) document is that
* if a packet is received out of order, the entire assembly
* process should be aborted.
*
* The RFC also mentions "it is possible for successfully
* received packets to be retransmitted." I'm hoping this
* means only the most recent one, which is the only one
* currently allowed.
*
* We allow multiple assembly processes, one for each
* ARCnet card possible on the network. Seems rather like
* a waste of memory. Necessary?
*/
struct Incoming *in=&lp->incoming[saddr];
BUGLVL(D_RX) printk("arcnet: packet is split (splitflag=%d, seq=%d)\n",
arcsoft->split_flag,in->sequence);
if (in->skb && in->sequence!=arcsoft->sequence)
{
BUGLVL(D_INIT) printk("arcnet: wrong seq number, aborting assembly (expected=%d, seq=%d, splitflag=%d)\n",
in->sequence,arcsoft->sequence,
arcsoft->split_flag);
kfree_skb(in->skb,FREE_WRITE);
in->skb=NULL;
in->lastpacket=in->numpackets=0;
return;
}
if (arcsoft->split_flag & 1) /* first packet in split */
{
BUGLVL(D_RX) printk("arcnet: brand new splitpacket (splitflag=%d)\n",
arcsoft->split_flag);
if (in->skb) /* already assembling one! */
{
BUGLVL(D_INIT) printk("arcnet: aborting previous (seq=%d) assembly (splitflag=%d, seq=%d)\n",
in->sequence,arcsoft->split_flag,
arcsoft->sequence);
kfree_skb(in->skb,FREE_WRITE);
}
in->sequence=arcsoft->sequence;
in->numpackets=((unsigned)arcsoft->split_flag>>1)+2;
in->lastpacket=1;
if (in->numpackets>16)
{
printk("arcnet: incoming packet more than 16 segments; dropping. (splitflag=%d)\n",
arcsoft->split_flag);
lp->stats.rx_dropped++;
return;
}
in->skb=skb=alloc_skb(508*in->numpackets
+ sizeof(struct ClientData),
GFP_ATOMIC);
if (skb == NULL) {
printk("%s: (split) memory squeeze, dropping packet.\n",
dev->name);
lp->stats.rx_dropped++;
return;
}
/* I don't know what this is for, but it DOES avoid
* warnings...
*/
skb->free=1;
if (skb==NULL)
{
printk("%s: Memory squeeze, dropping packet.\n",
dev->name);
lp->stats.rx_dropped++;
return;
}
soft=(struct ClientData *)skb->data;
skb->len=sizeof(struct ClientData);
skb->dev=dev;
memcpy((u_char *)soft+EXTRA_CLIENTDATA,
(u_char *)arcsoft+EXTRA_CLIENTDATA,
sizeof(struct ClientData)-EXTRA_CLIENTDATA);
soft->split_flag=0; /* final packet won't be split */
}
else /* not first packet */
{
int packetnum=((unsigned)arcsoft->split_flag>>1) + 1;
/* if we're not assembling, there's no point
* trying to continue.
*/
if (!in->skb)
{
BUGLVL(D_INIT) printk("arcnet: can't continue split without starting first! (splitflag=%d, seq=%d)\n",
arcsoft->split_flag,arcsoft->sequence);
return;
}
in->lastpacket++;
if (packetnum!=in->lastpacket) /* not the right flag! */
{
/* harmless duplicate? ignore. */
if (packetnum==in->lastpacket-1)
{
BUGLVL(D_INIT) printk("arcnet: duplicate splitpacket ignored! (splitflag=%d)\n",
arcsoft->split_flag);
return;
}
/* "bad" duplicate, kill reassembly */
BUGLVL(D_INIT) printk("arcnet: out-of-order splitpacket, reassembly (seq=%d) aborted (splitflag=%d, seq=%d)\n",
in->sequence,arcsoft->split_flag,
arcsoft->sequence);
kfree_skb(in->skb,FREE_WRITE);
in->skb=NULL;
in->lastpacket=in->numpackets=0;
return;
}
soft=(struct ClientData *)in->skb->data;
}
skb=in->skb;
memcpy(skb->data+skb->len,
(u_char *)arcsoft+sizeof(struct ClientData),
length-sizeof(struct ClientData));
skb->len+=length-sizeof(struct ClientData);
soft->daddr=daddr;
BUGLVL(D_DURING)
printk("arcnet: received packet from %02Xh to %02Xh (%d bytes, type=%d)\n",
saddr,daddr,length,pkttype);
BUGLVL(D_RX)
{
int countx,county;
printk("arcnet: packet dump [rx-split] follows:");
for (county=0; county<16+(pkttype!=NORMAL)*16; county++)
{
printk("\n[%04X] ",county*16);
for (countx=0; countx<16; countx++)
printk("%02X ",
arcpacket->raw[county*16+countx]);
}
printk("\n");
}
/* are we done? */
if (in->lastpacket == in->numpackets)
{
if (!skb || !in->skb)
printk("arcnet: ?!? done reassembling packet, no skb? (skb=%ph, in->skb=%ph)\n",
skb,in->skb);
in->skb=NULL;
in->lastpacket=in->numpackets=0;
netif_rx(skb);
lp->stats.rx_packets++;
}
}
/* If any worth-while packets have been received, dev_rint()
has done a mark_bh(NET_BH) for us and will work on them
when we get to the bottom-half routine. */
/* arcnet: pardon? */
}
/* this function is called every once in a while to make sure the ARCnet
* isn't stuck.
*
* If we miss a receive IRQ, the receiver (and IRQ) is permanently disabled
* and we might never receive a packet again! This will check if this
* is the case, and if so, re-enable the receiver.
*/
static void
arcnet_timer(unsigned long arg)
{
struct device *dev=(struct device *)arg;
struct arcnet_local *lp = (struct arcnet_local *)dev->priv;
short ioaddr=dev->base_addr;
int status=inb(STATUS);
/* if we didn't interrupt the IRQ handler, and RX's are still
* disabled, and we're not resetting the card... then we're stuck!
*/
if (!dev->interrupt && dev->start
&& status&NORXflag && !status&RESETflag)
{
BUGLVL(D_INIT)
printk("arcnet: timer: ARCnet was stuck! (status=%Xh)\n",
status);
arcnet_inthandler(dev);
}
/* requeue ourselves */
init_timer(&lp->timer);
lp->timer.expires=TIMERval;
add_timer(&lp->timer);
}
/* Get the current statistics. This may be called with the card open or
closed. */
static struct enet_statistics *
arcnet_get_stats(struct device *dev)
{
struct arcnet_local *lp = (struct arcnet_local *)dev->priv;
/* short ioaddr = dev->base_addr;*/
return &lp->stats;
}
/* Set or clear the multicast filter for this adaptor.
num_addrs == -1 Promiscuous mode, receive all packets
num_addrs == 0 Normal mode, clear multicast list
num_addrs > 0 Multicast mode, receive normal and MC packets, and do
best-effort filtering.
*/
static void
set_multicast_list(struct device *dev, int num_addrs, void *addrs)
{
#if 0 /* no promiscuous mode at all */
struct arcnet_local *lp=(struct arcnet_local *)(dev->priv);
short ioaddr = dev->base_addr;
if (num_addrs) {
outw(69, ioaddr); /* Enable promiscuous mode */
} else
outw(99, ioaddr); /* Disable promiscuous mode, use normal mode */
#endif
}
int arcnet_reset(struct device *dev)
{
struct arcnet_local *lp=(struct arcnet_local *)dev->priv;
short ioaddr=dev->base_addr;
int delayval,recbuf=lp->recbuf;
outb(0,INTMASK); /* no IRQ's, please! */
BUGLVL(D_INIT)
printk("arcnet: Resetting %s (status=%Xh)\n",
dev->name,inb(STATUS));
inb(RESET); /* Reset by reading this port */
JIFFER(RESETtime);
outb(CFLAGScmd|RESETclear|CONFIGclear,COMMAND); /* clear flags & end reset */
/* after a reset, the first byte of shared mem is TESTvalue and the
* second byte is our 8-bit ARCnet address
*/
{
u_char *cardmem = (u_char *) dev->mem_start;
if (cardmem[0] != TESTvalue)
{
BUGLVL(D_INIT)
printk("arcnet: reset failed: TESTvalue not present.\n");
return 1;
}
lp->arcnum=cardmem[1]; /* save address for later use */
}
/* clear out status variables */
recbuf=lp->recbuf=0;
dev->tbusy=0;
/* enable IRQ's on completed receive
* I messed around for a long time, but I couldn't get tx and rx
* irq's to work together. It looks like one or the other but not
* both... <sigh>. The Crynwr driver uses only rx, and so do I now.
*/
outb(NORXflag,INTMASK);
/* enable extended (512-byte) packets */
outb(CONFIGcmd|EXTconf,COMMAND);
JIFFER(ACKtime);
/* clean out all the memory to make debugging make more sense :) */
BUGLVL(D_DURING)
memset((void *)dev->mem_start,0x42,2048);
/* and enable receive of our first packet to the first buffer */
EnableReceiver();
/* done! return success. */
return 0;
}
/*
* Create the ARCnet ClientData header for an arbitrary protocol layer
*
* saddr=NULL means use device source address (always will anyway)
* daddr=NULL means leave destination address (eg unresolved arp)
*/
int arc_header(unsigned char *buff,struct device *dev,unsigned short type,
void *daddr,void *saddr,unsigned len,struct sk_buff *skb)
{
struct ClientData *head = (struct ClientData *)buff;
struct arcnet_local *lp=(struct arcnet_local *)(dev->priv);
/* set the protocol ID according to RFC-1201 */
switch(type)
{
case ETH_P_IP:
head->protocol_id=ARC_P_IP;
break;
case ETH_P_ARP:
head->protocol_id=ARC_P_ARP;
break;
case ETH_P_RARP:
head->protocol_id=ARC_P_RARP;
break;
default:
printk("arcnet: I don't understand protocol %d (%Xh)\n",
type,type);
return 0;
}
#if 0
/*
* Set the source hardware address.
* AVE: we can't do this, so we don't. Code below is directly
* stolen from eth.c driver and won't work.
*/
if(saddr)
memcpy(eth->h_source,saddr,dev->addr_len);
else
memcpy(eth->h_source,dev->dev_addr,dev->addr_len);
#endif
#if 0
/*
* Anyway, the loopback-device should never use this function...
*
* And the chances of it using the ARCnet version of it are so
* tiny that I don't think we have to worry :)
*/
if (dev->flags & IFF_LOOPBACK)
{
memset(eth->h_dest, 0, dev->addr_len);
return(dev->hard_header_len);
}
#endif
head->split_flag=0; /* split packets are done elsewhere */
head->sequence=(lp->sequence++);
/* supposedly if daddr is NULL, we should ignore it... */
if(daddr)
{
head->daddr=((u_char*)daddr)[0];
return dev->hard_header_len;
}
else
head->daddr=0; /* better fill one in anyway */
return -dev->hard_header_len;
}
/*
* Rebuild the ARCnet ClientData header. This is called after an ARP
* (or in future other address resolution) has completed on this
* sk_buff. We now let ARP fill in the other fields.
*/
int arc_rebuild_header(void *buff,struct device *dev,unsigned long dst,
struct sk_buff *skb)
{
struct ClientData *head = (struct ClientData *)buff;
/*
* Only ARP/IP is currently supported
*/
if(head->protocol_id != ARC_P_IP)
{
printk("arcnet: I don't understand resolve type %d (%Xh) addresses!\n",
head->protocol_id,head->protocol_id);
head->daddr=0;
/*memcpy(eth->h_source, dev->dev_addr, dev->addr_len);*/
return 0;
}
/*
* Try and get ARP to resolve the header.
*/
#ifdef CONFIG_INET
return arp_find(&(head->daddr), dst, dev, dev->pa_addr, skb)? 1 : 0;
#else
return 0;
#endif
}
/*
* Determine the packet's protocol ID.
*
* With ARCnet we have to convert everything to Ethernet-style stuff.
*/
unsigned short arc_type_trans(struct sk_buff *skb,struct device *dev)
{
struct ClientData *head = (struct ClientData *) skb->data;
/*unsigned char *rawp;*/
if (head->daddr==0)
skb->pkt_type=PACKET_BROADCAST;
#if 0 /* code for ethernet with multicast */
if(*eth->h_dest&1)
{
if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
skb->pkt_type=PACKET_BROADCAST;
else
skb->pkt_type=PACKET_MULTICAST;
}
#endif
if(dev->flags&IFF_PROMISC)
{
/* if we're not sending to ourselves :) */
if (head->daddr != dev->dev_addr[0])
skb->pkt_type=PACKET_OTHERHOST;
}
/* now return the protocol number */
switch (head->protocol_id)
{
case ARC_P_IP: return htons(ETH_P_IP); /* what the heck is
an htons, anyway? */
case ARC_P_ARP: return htons(ETH_P_ARP);
case ARC_P_RARP: return htons(ETH_P_RARP);
case 0xFA: /* IPX */
case 0xDD: /* Appletalk */
default:
BUGLVL(D_DURING)
printk("arcnet: received packet of unknown protocol id %d (%Xh)\n",
head->protocol_id,head->protocol_id);
return 0;
}
#if 0 /* more ethernet-specific junk */
if (ntohs(eth->h_proto) >= 1536)
return eth->h_proto;
rawp = (unsigned char *)(eth + 1);
if (*(unsigned short *)rawp == 0xFFFF)
return htons(ETH_P_802_3);
if (*(unsigned short *)rawp == 0xAAAA)
return htons(ETH_P_SNAP);
return htons(ETH_P_802_2);
#endif
return htons(ETH_P_IP);
}
/*
* Local variables:
* compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c skeleton.c"
* version-control: t
* kept-new-versions: 5
* tab-width: 4
* End:
*/
......@@ -46,9 +46,8 @@ void msdos_put_inode(struct inode *inode)
clear_inode(inode);
if (depend) {
if (MSDOS_I(depend)->i_old != inode) {
printk("Invalid link (0x%X): expected 0x%X, got 0x%X\n",
(int) depend,(int) inode,(int) MSDOS_I(depend)->
i_old);
printk("Invalid link (0x%p): expected 0x%p, got 0x%p\n",
depend, inode, MSDOS_I(depend)->i_old);
fs_panic(sb,"...");
return;
}
......
......@@ -15,7 +15,7 @@
* <middelin@polyware.iaf.nl>
*
* Danny ter Haar : Some minor additions for cpuinfo
* <danny@ow.nl>
* <danny@ow.nl>
*
* Alessandro Rubini : profile extension.
* <rubini@ipvvis.unipv.it>
......@@ -332,7 +332,7 @@ static unsigned long get_phys_addr(struct task_struct ** p, unsigned long ptr)
if (!p || !*p || ptr >= TASK_SIZE)
return 0;
page = *PAGE_DIR_OFFSET((*p)->tss.cr3,ptr);
page = *PAGE_DIR_OFFSET(*p,ptr);
if (!(page & PAGE_PRESENT))
return 0;
page &= PAGE_MASK;
......@@ -513,7 +513,7 @@ static int get_statm(int pid, char * buffer)
return 0;
tpag = (*p)->mm->end_code / PAGE_SIZE;
if ((*p)->state != TASK_ZOMBIE) {
pagedir = (unsigned long *) (*p)->tss.cr3;
pagedir = PAGE_DIR_OFFSET(*p, 0);
for (i = 0; i < 0x300; ++i) {
if ((ptbl = pagedir[i]) == 0) {
tpag -= PTRS_PER_PAGE;
......
......@@ -24,7 +24,8 @@
static int mem_read(struct inode * inode, struct file * file,char * buf, int count)
{
unsigned long addr, pid, cr3;
struct task_struct * tsk;
unsigned long addr, pid;
char *tmp;
unsigned long pte, page;
int i;
......@@ -33,20 +34,20 @@ static int mem_read(struct inode * inode, struct file * file,char * buf, int cou
return -EINVAL;
pid = inode->i_ino;
pid >>= 16;
cr3 = 0;
tsk = NULL;
for (i = 1 ; i < NR_TASKS ; i++)
if (task[i] && task[i]->pid == pid) {
cr3 = task[i]->tss.cr3;
tsk = task[i];
break;
}
if (!cr3)
if (!tsk)
return -EACCES;
addr = file->f_pos;
tmp = buf;
while (count > 0) {
if (current->signal & ~current->blocked)
break;
pte = *PAGE_DIR_OFFSET(cr3,addr);
pte = *PAGE_DIR_OFFSET(tsk,addr);
if (!(pte & PAGE_PRESENT))
break;
pte &= PAGE_MASK;
......@@ -72,7 +73,8 @@ static int mem_read(struct inode * inode, struct file * file,char * buf, int cou
static int mem_write(struct inode * inode, struct file * file,char * buf, int count)
{
unsigned long addr, pid, cr3;
struct task_struct * tsk;
unsigned long addr, pid;
char *tmp;
unsigned long pte, page;
int i;
......@@ -82,19 +84,19 @@ static int mem_write(struct inode * inode, struct file * file,char * buf, int co
addr = file->f_pos;
pid = inode->i_ino;
pid >>= 16;
cr3 = 0;
tsk = NULL;
for (i = 1 ; i < NR_TASKS ; i++)
if (task[i] && task[i]->pid == pid) {
cr3 = task[i]->tss.cr3;
tsk = task[i];
break;
}
if (!cr3)
if (!tsk)
return -EACCES;
tmp = buf;
while (count > 0) {
if (current->signal & ~current->blocked)
break;
pte = *PAGE_DIR_OFFSET(cr3,addr);
pte = *PAGE_DIR_OFFSET(tsk,addr);
if (!(pte & PAGE_PRESENT))
break;
pte &= PAGE_MASK;
......@@ -144,21 +146,22 @@ int
mem_mmap(struct inode * inode, struct file * file,
struct vm_area_struct * vma)
{
unsigned long *src_table, *dest_table, stmp, dtmp, cr3;
struct task_struct *tsk;
unsigned long *src_table, *dest_table, stmp, dtmp;
struct vm_area_struct *src_vma = 0;
int i;
/* Get the source's task information */
cr3 = 0;
tsk = NULL;
for (i = 1 ; i < NR_TASKS ; i++)
if (task[i] && task[i]->pid == (inode->i_ino >> 16)) {
cr3 = task[i]->tss.cr3;
tsk = task[i];
src_vma = task[i]->mm->mmap;
break;
}
if (!cr3)
if (!tsk)
return -EACCES;
/* Ensure that we have a valid source area. (Has to be mmap'ed and
......@@ -173,7 +176,7 @@ mem_mmap(struct inode * inode, struct file * file,
if (!src_vma || (src_vma->vm_flags & VM_SHM))
return -EINVAL;
src_table = PAGE_DIR_OFFSET(cr3, stmp);
src_table = PAGE_DIR_OFFSET(tsk, stmp);
if (!*src_table)
return -EINVAL;
src_table = (unsigned long *)((*src_table & PAGE_MASK) + PAGE_PTR(stmp));
......@@ -197,10 +200,10 @@ mem_mmap(struct inode * inode, struct file * file,
while (src_vma && stmp > src_vma->vm_end)
src_vma = src_vma->vm_next;
src_table = PAGE_DIR_OFFSET(cr3, stmp);
src_table = PAGE_DIR_OFFSET(tsk, stmp);
src_table = (unsigned long *)((*src_table & PAGE_MASK) + PAGE_PTR(stmp));
dest_table = PAGE_DIR_OFFSET(current->tss.cr3, dtmp);
dest_table = PAGE_DIR_OFFSET(current, dtmp);
if (!*dest_table) {
*dest_table = get_free_page(GFP_KERNEL);
......
......@@ -42,7 +42,6 @@ static int proc_lookupnet(struct inode *,const char *,int,struct inode **);
/* the get_*_info() functions are in the net code, and are configured
in via the standard mechanism... */
extern int unix_get_info(char *, char **, off_t, int);
extern int afinet_get_info(char *, char **, off_t, int);
#ifdef CONFIG_INET
extern int tcp_get_info(char *, char **, off_t, int);
extern int udp_get_info(char *, char **, off_t, int);
......@@ -52,6 +51,7 @@ extern int rarp_get_info(char *, char **, off_t, int);
extern int dev_get_info(char *, char **, off_t, int);
extern int rt_get_info(char *, char **, off_t, int);
extern int snmp_get_info(char *, char **, off_t, int);
extern int afinet_get_info(char *, char **, off_t, int);
extern int ip_acct_procinfo(char *, char **, off_t, int);
extern int ip_fw_blk_procinfo(char *, char **, off_t, int);
extern int ip_fw_fwd_procinfo(char *, char **, off_t, int);
......@@ -118,6 +118,7 @@ static struct proc_dir_entry net_dir[] = {
{ PROC_NET_TCP, 3, "tcp" },
{ PROC_NET_UDP, 3, "udp" },
{ PROC_NET_SNMP, 4, "snmp" },
{ PROC_NET_SOCKSTAT, 8, "sockstat" },
#ifdef CONFIG_INET_RARP
{ PROC_NET_RARP, 4, "rarp"},
#endif
......@@ -145,7 +146,6 @@ static struct proc_dir_entry net_dir[] = {
{ PROC_NET_NR, 2, "nr" },
#endif /* CONFIG_NETROM */
#endif /* CONFIG_AX25 */
{ PROC_NET_SOCKSTAT, 8, "sockstat" },
{ 0, 0, NULL }
};
......@@ -233,10 +233,10 @@ static int proc_readnet(struct inode * inode, struct file * file,
case PROC_NET_UNIX:
length = unix_get_info(page,&start,file->f_pos,thistime);
break;
#ifdef CONFIG_INET
case PROC_NET_SOCKSTAT:
length = afinet_get_info(page,&start,file->f_pos,thistime);
break;
#ifdef CONFIG_INET
case PROC_NET_ARP:
length = arp_get_info(page,&start,file->f_pos,thistime);
break;
......
......@@ -73,6 +73,9 @@
#define MAX_DMA_CHANNELS 8
/* The maximum address that we can perform a DMA transfer to on this platform */
#define MAX_DMA_ADDRESS 0x1000000
/* 8237 DMA controllers */
#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
......
......@@ -36,15 +36,33 @@ __asm__ __volatile__( \
/* 64-bit machines, beware! SRB. */
#define SIZEOF_PTR_LOG2 4
/* to find an entry in a page-table-directory */
#define PAGE_DIR_OFFSET(base,address) ((unsigned long*)((base)+\
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)*2&PTR_MASK&~PAGE_MASK)))
/* to find an entry in a page-table */
/* to find an entry in a page-table-directory */
/*
* XXXXX This isn't right: we shouldn't use the ptbr, but the L2 pointer.
* This is just for getting it through the compiler right now
*/
#define PAGE_DIR_OFFSET(tsk,address) \
((unsigned long *) ((tsk)->tss.ptbr + ((((unsigned long)(address)) >> 21) & PTR_MASK & ~PAGE_MASK)))
/* to find an entry in a page-table */
#define PAGE_PTR(address) \
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
/* the no. of pointers that fit on a page */
/* the no. of pointers that fit on a page */
#define PTRS_PER_PAGE (PAGE_SIZE/sizeof(void*))
/* to set the page-dir */
/*
* XXXXX This isn't right: we shouldn't use the ptbr, but the L2 pointer.
* This is just for getting it through the compiler right now
*/
#define SET_PAGE_DIR(tsk,pgdir) \
do { \
(tsk)->tss.ptbr = (unsigned long) (pgdir); \
if ((tsk) == current) \
invalidate(); \
} while (0)
#endif /* __KERNEL__ */
#endif /* _ALPHA_PAGE_H */
......@@ -18,9 +18,14 @@
/*
* Bus types
*/
extern int EISA_bus;
#define EISA_bus 1
#define MCA_bus 0
/*
* The alpha has no problems with write protection
*/
#define wp_works_ok 1
struct thread_struct {
unsigned long ksp;
unsigned long usp;
......
......@@ -27,15 +27,25 @@ __asm__ __volatile__("movl %%cr3,%%eax\n\tmovl %%eax,%%cr3": : :"ax")
/* 64-bit machines, beware! SRB. */
#define SIZEOF_PTR_LOG2 2
/* to find an entry in a page-table-directory */
#define PAGE_DIR_OFFSET(base,address) ((unsigned long*)((base)+\
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)*2&PTR_MASK&~PAGE_MASK)))
/* to find an entry in a page-table */
/* to find an entry in a page-table-directory */
#define PAGE_DIR_OFFSET(tsk,address) \
((((unsigned long)(address)) >> 22) + (unsigned long *) (tsk)->tss.cr3)
/* to find an entry in a page-table */
#define PAGE_PTR(address) \
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
/* the no. of pointers that fit on a page */
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
/* the no. of pointers that fit on a page */
#define PTRS_PER_PAGE (PAGE_SIZE/sizeof(void*))
/* to set the page-dir */
#define SET_PAGE_DIR(tsk,pgdir) \
do { \
(tsk)->tss.cr3 = (unsigned long) (pgdir); \
if ((tsk) == current) \
__asm__ __volatile__("movl %0,%%cr3": :"a" ((tsk)->tss.cr3)); \
} while (0)
#endif /* __KERNEL__ */
#endif /* _I386_PAGE_H */
......@@ -8,12 +8,10 @@
success, 1 on failure.
*/
extern __inline__ int enable_vac()
extern __inline__ int enable_vac(void)
{
int success;
int success=0;
&success;
__asm__ __volatile__("lduba [%1] 2, %0\n\t"
"or %0, 0x10, %0\n\t"
"stba %0, [%1] 2\n\t"
......@@ -28,9 +26,9 @@ extern __inline__ int enable_vac()
success, 1 on failure.
*/
extern __inline__ int disable_vac()
extern __inline__ int disable_vac(void)
{
int success;
int success=0;
__asm__ __volatile__("lduba [%1] 0x2, %0\n\t"
"xor %0, 0x10, %0\n\t"
......
......@@ -87,7 +87,7 @@ struct vm_operations_struct {
unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address,
unsigned long page);
void (*swapout)(struct vm_area_struct *, unsigned long, unsigned long *);
unsigned long (*swapin)(struct vm_area_struct *, unsigned long);
unsigned long (*swapin)(struct vm_area_struct *, unsigned long, unsigned long);
};
extern unsigned long __bad_page(void);
......@@ -282,11 +282,7 @@ extern inline long find_in_swap_cache (unsigned long addr)
#ifdef SWAP_CACHE_INFO
swap_cache_find_total++;
#endif
__asm__ __volatile__("xchgl %0,%1"
:"=m" (swap_cache[addr >> PAGE_SHIFT]),
"=r" (entry)
:"0" (swap_cache[addr >> PAGE_SHIFT]),
"1" (0));
entry = (unsigned long) xchg_ptr(swap_cache + (addr >> PAGE_SHIFT), NULL);
#ifdef SWAP_CACHE_INFO
if (entry)
swap_cache_find_success++;
......@@ -301,11 +297,7 @@ extern inline int delete_from_swap_cache(unsigned long addr)
#ifdef SWAP_CACHE_INFO
swap_cache_del_total++;
#endif
__asm__ __volatile__("xchgl %0,%1"
:"=m" (swap_cache[addr >> PAGE_SHIFT]),
"=r" (entry)
:"0" (swap_cache[addr >> PAGE_SHIFT]),
"1" (0));
entry = (unsigned long) xchg_ptr(swap_cache + (addr >> PAGE_SHIFT), NULL);
if (entry) {
#ifdef SWAP_CACHE_INFO
swap_cache_del_success++;
......
......@@ -24,6 +24,7 @@
#define NSOCKETS 2000 /* Dynamic, this is MAX LIMIT */
#define NSOCKETS_UNIX 128 /* unix domain static limit */
#define NPROTO 16 /* should be enough for now.. */
......
......@@ -50,7 +50,6 @@ struct shminfo {
bits 14..8 (SHM_ID) the id of the shared memory segment
bits 29..15 (SHM_IDX) the index of the page within the shared memory segment
(actually only bits 24..15 get used since SHMMAX is so low)
bit 31 (SHM_READ_ONLY) flag whether the page belongs to a read-only attach
*/
#define SHM_ID_SHIFT 8
......@@ -63,9 +62,7 @@ struct shminfo {
#define _SHM_IDX_BITS 15
#define SHM_IDX_MASK ((1<<_SHM_IDX_BITS)-1)
#define SHM_READ_ONLY (1<<31)
/* We must have SHM_ID_SHIFT + _SHM_ID_BITS + _SHM_IDX_BITS + 1 <= 32
/* We must have SHM_ID_SHIFT + _SHM_ID_BITS + _SHM_IDX_BITS <= 32
and SHMMAX <= (PAGE_SIZE << _SHM_IDX_BITS). */
#define SHMMAX 0x3fa000 /* max shared seg size (bytes) */
......
......@@ -437,8 +437,6 @@ asmlinkage void start_kernel(void)
}
low_memory_start = PAGE_ALIGN(low_memory_start);
memory_start = paging_init(memory_start,memory_end);
if (strncmp((char*)0x0FFFD9, "EISA", 4) == 0)
EISA_bus = 1;
trap_init();
init_IRQ();
sched_init();
......
......@@ -22,7 +22,7 @@ static int shm_map (struct vm_area_struct *shmd, int remap);
static void killseg (int id);
static void shm_open (struct vm_area_struct *shmd);
static void shm_close (struct vm_area_struct *shmd);
static unsigned long shm_swap_in (struct vm_area_struct *, unsigned long);
static unsigned long shm_swap_in (struct vm_area_struct *, unsigned long, unsigned long);
static int shm_tot = 0; /* total number of shared memory pages */
static int shm_rss = 0; /* number of shared memory pages that are in memory */
......@@ -378,12 +378,11 @@ static int shm_map (struct vm_area_struct *shmd, int remap)
{
unsigned long *page_table;
unsigned long tmp, shm_sgn;
unsigned long page_dir = shmd->vm_task->tss.cr3;
/* check that the range is unmapped */
if (!remap)
for (tmp = shmd->vm_start; tmp < shmd->vm_end; tmp += PAGE_SIZE) {
page_table = PAGE_DIR_OFFSET(page_dir,tmp);
page_table = PAGE_DIR_OFFSET(shmd->vm_task,tmp);
if (*page_table & PAGE_PRESENT) {
page_table = (ulong *) (PAGE_MASK & *page_table);
page_table += ((tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1));
......@@ -403,7 +402,7 @@ static int shm_map (struct vm_area_struct *shmd, int remap)
/* check that the range has page_tables */
for (tmp = shmd->vm_start; tmp < shmd->vm_end; tmp += PAGE_SIZE) {
page_table = PAGE_DIR_OFFSET(page_dir,tmp);
page_table = PAGE_DIR_OFFSET(shmd->vm_task,tmp);
if (*page_table & PAGE_PRESENT) {
page_table = (ulong *) (PAGE_MASK & *page_table);
page_table += ((tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1));
......@@ -429,7 +428,7 @@ static int shm_map (struct vm_area_struct *shmd, int remap)
shm_sgn = shmd->vm_pte + ((shmd->vm_offset >> PAGE_SHIFT) << SHM_IDX_SHIFT);
for (tmp = shmd->vm_start; tmp < shmd->vm_end; tmp += PAGE_SIZE,
shm_sgn += (1 << SHM_IDX_SHIFT)) {
page_table = PAGE_DIR_OFFSET(page_dir,tmp);
page_table = PAGE_DIR_OFFSET(shmd->vm_task,tmp);
page_table = (ulong *) (PAGE_MASK & *page_table);
page_table += (tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
*page_table = shm_sgn;
......@@ -496,8 +495,7 @@ int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
return -EIDRM;
}
shmd->vm_pte = (SHM_SWP_TYPE << 1) | (id << SHM_ID_SHIFT) |
(shmflg & SHM_RDONLY ? SHM_READ_ONLY : 0);
shmd->vm_pte = (SHM_SWP_TYPE << 1) | (id << SHM_ID_SHIFT);
shmd->vm_start = addr;
shmd->vm_end = addr + shp->shm_npages * PAGE_SIZE;
shmd->vm_task = current;
......@@ -604,30 +602,40 @@ int sys_shmdt (char *shmaddr)
/*
* page not present ... go through shm_pages
*/
static unsigned long shm_swap_in(struct vm_area_struct * vma, unsigned long code)
static unsigned long shm_swap_in(struct vm_area_struct * shmd, unsigned long offset, unsigned long code)
{
unsigned long page;
struct shmid_ds *shp;
unsigned int id, idx;
id = (code >> SHM_ID_SHIFT) & SHM_ID_MASK;
if (id != ((shmd->vm_pte >> SHM_ID_SHIFT) & SHM_ID_MASK)) {
printk ("shm_swap_in: code id = %d and shmd id = %ld differ\n",
id, (shmd->vm_pte >> SHM_ID_SHIFT) & SHM_ID_MASK);
return BAD_PAGE | PAGE_SHARED;
}
if (id > max_shmid) {
printk ("shm_no_page: id=%d too big. proc mem corrupted\n", id);
printk ("shm_swap_in: id=%d too big. proc mem corrupted\n", id);
return BAD_PAGE | PAGE_SHARED;
}
shp = shm_segs[id];
if (shp == IPC_UNUSED || shp == IPC_NOID) {
printk ("shm_no_page: id=%d invalid. Race.\n", id);
printk ("shm_swap_in: id=%d invalid. Race.\n", id);
return BAD_PAGE | PAGE_SHARED;
}
idx = (code >> SHM_IDX_SHIFT) & SHM_IDX_MASK;
if (idx != (offset >> PAGE_SHIFT)) {
printk ("shm_swap_in: code idx = %u and shmd idx = %lu differ\n",
idx, offset >> PAGE_SHIFT);
return BAD_PAGE | PAGE_SHARED;
}
if (idx >= shp->shm_npages) {
printk ("shm_no_page : too large page index. id=%d\n", id);
printk ("shm_swap_in : too large page index. id=%d\n", id);
return BAD_PAGE | PAGE_SHARED;
}
if (!(shp->shm_pages[idx] & PAGE_PRESENT)) {
if(!(page = get_free_page(GFP_KERNEL))) {
if (!(page = get_free_page(GFP_KERNEL))) {
oom(current);
return BAD_PAGE | PAGE_SHARED;
}
......@@ -652,8 +660,7 @@ static unsigned long shm_swap_in(struct vm_area_struct * vma, unsigned long code
done:
current->mm->min_flt++;
page = shp->shm_pages[idx];
if (code & SHM_READ_ONLY) /* write-protect */
page &= ~PAGE_RW;
page &= ~(PAGE_RW & ~shmd->vm_page_prot); /* write-protect */
mem_map[MAP_NR(page)]++;
return page;
}
......@@ -716,7 +723,7 @@ int shm_swap (int prio)
tmp = shmd->vm_start + (idx << PAGE_SHIFT) - shmd->vm_offset;
if (!(tmp >= shmd->vm_start && tmp < shmd->vm_end))
continue;
pte = PAGE_DIR_OFFSET(shmd->vm_task->tss.cr3,tmp);
pte = PAGE_DIR_OFFSET(shmd->vm_task,tmp);
if (!(*pte & PAGE_PRESENT)) {
printk("shm_swap: bad pgtbl! id=%ld start=%lx idx=%ld\n",
id, shmd->vm_start, idx);
......@@ -732,8 +739,7 @@ int shm_swap (int prio)
*pte &= ~PAGE_ACCESSED;
continue;
}
tmp = shmd->vm_pte | idx << SHM_IDX_SHIFT;
*pte = tmp;
*pte = shmd->vm_pte | idx << SHM_IDX_SHIFT;
mem_map[MAP_NR(page)]--;
shmd->vm_task->mm->rss--;
invalid++;
......
......@@ -17,7 +17,8 @@
$(CC) $(CFLAGS) -c $<
OBJS = sched.o dma.o fork.o exec_domain.o panic.o printk.o vsprintf.o sys.o \
module.o ksyms.o exit.o signal.o itimer.o info.o time.o softirq.o
module.o ksyms.o exit.o signal.o itimer.o info.o time.o softirq.o \
resource.o
all: kernel.o
......
......@@ -169,13 +169,18 @@ asmlinkage int sys_fork(struct pt_regs regs)
{
int nr;
struct task_struct *p;
unsigned long new_stack;
unsigned long clone_flags = COPYVM | SIGCHLD;
if(!(p = (struct task_struct*)__get_free_page(GFP_KERNEL)))
goto bad_fork;
new_stack = get_free_page(GFP_KERNEL);
if (!new_stack)
goto bad_fork_free;
nr = find_empty_process();
if (nr < 0)
goto bad_fork_free;
*p = *current;
if (p->exec_domain && p->exec_domain->use_count)
......@@ -184,7 +189,8 @@ asmlinkage int sys_fork(struct pt_regs regs)
(*p->binfmt->use_count)++;
p->did_exec = 0;
p->kernel_stack_page = 0;
p->kernel_stack_page = new_stack;
*(unsigned long *) p->kernel_stack_page = STACK_MAGIC;
p->state = TASK_UNINTERRUPTIBLE;
p->flags &= ~(PF_PTRACED|PF_TRACESYS);
p->pid = last_pid;
......@@ -201,11 +207,6 @@ asmlinkage int sys_fork(struct pt_regs regs)
p->start_time = jiffies;
task[nr] = p;
/* build new kernel stack */
if (!(p->kernel_stack_page = get_free_page(GFP_KERNEL)))
goto bad_fork_cleanup;
*(unsigned long *)p->kernel_stack_page = STACK_MAGIC;
/* copy all the process information */
clone_flags = copy_thread(nr, COPYVM | SIGCHLD, p, &regs);
if (copy_mm(clone_flags, p))
......@@ -222,8 +223,8 @@ asmlinkage int sys_fork(struct pt_regs regs)
bad_fork_cleanup:
task[nr] = NULL;
REMOVE_LINKS(p);
free_page(p->kernel_stack_page);
bad_fork_free:
free_page(new_stack);
free_page((long) p);
bad_fork:
return -EAGAIN;
......
......@@ -69,8 +69,14 @@ struct symbol_table symbol_table = { 0, 0, 0, /* for stacked module support */
X(rename_module_symbol),
/* system info variables */
/* These check that they aren't defines (0/1) */
#ifndef EISA_bus
X(EISA_bus),
#ifdef __i386__
#endif
#ifndef MCA_bus
X(MCA_bus),
#endif
#ifndef wp_works_ok
X(wp_works_ok),
#endif
......@@ -282,11 +288,13 @@ struct symbol_table symbol_table = { 0, 0, 0, /* for stacked module support */
X(__down),
#if defined(CONFIG_MSDOS_FS) && !defined(CONFIG_UMSDOS_FS)
/* support for umsdos fs */
X(msdos_bmap),
X(msdos_create),
X(msdos_file_read),
X(msdos_file_write),
X(msdos_lookup),
X(msdos_mkdir),
X(msdos_mmap),
X(msdos_put_inode),
X(msdos_put_super),
X(msdos_read_inode),
......
/*
* linux/kernel/resource.c
*
* Copyright (C) 1995 Linus Torvalds
* David Hinds
*
* Kernel io-region resource management
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/ioport.h>
#define IOTABLE_SIZE 32
typedef struct resource_entry_t {
u_long from, num;
const char *name;
struct resource_entry_t *next;
} resource_entry_t;
static resource_entry_t iolist = { 0, 0, "", NULL };
static resource_entry_t iotable[IOTABLE_SIZE];
/*
* This generates the report for /proc/ioports
*/
int get_ioport_list(char *buf)
{
resource_entry_t *p;
int len = 0;
for (p = iolist.next; (p) && (len < 4000); p = p->next)
len += sprintf(buf+len, "%04lx-%04lx : %s\n",
p->from, p->from+p->num-1, p->name);
if (p)
len += sprintf(buf+len, "4K limit reached!\n");
return len;
}
/*
* The workhorse function: find where to put a new entry
*/
static resource_entry_t *find_gap(resource_entry_t *root,
u_long from, u_long num)
{
unsigned long flags;
resource_entry_t *p;
if (from > from+num-1)
return NULL;
save_flags(flags);
cli();
for (p = root; ; p = p->next) {
if ((p != root) && (p->from+p->num-1 >= from)) {
p = NULL;
break;
}
if ((p->next == NULL) || (p->next->from > from+num-1))
break;
}
restore_flags(flags);
return p;
}
/*
* Call this from the device driver to register the ioport region.
*/
void request_region(unsigned int from, unsigned int num, const char *name)
{
resource_entry_t *p;
int i;
for (i = 0; i < IOTABLE_SIZE; i++)
if (iotable[i].num == 0)
break;
if (i == IOTABLE_SIZE)
printk("warning: ioport table is full\n");
else {
p = find_gap(&iolist, from, num);
if (p == NULL)
return;
iotable[i].name = name;
iotable[i].from = from;
iotable[i].num = num;
iotable[i].next = p->next;
p->next = &iotable[i];
return;
}
}
/*
* This is for compatibility with older drivers.
* It can be removed when all driver call the new function.
*/
void snarf_region(unsigned int from, unsigned int num)
{
request_region(from,num,"No name given.");
}
/*
* Call this when the device driver is unloaded
*/
void release_region(unsigned int from, unsigned int num)
{
resource_entry_t *p, *q;
for (p = &iolist; ; p = q) {
q = p->next;
if (q == NULL)
break;
if ((q->from == from) && (q->num == num)) {
q->num = 0;
p->next = q->next;
return;
}
}
}
/*
* Call this to check the ioport region before probing
*/
int check_region(unsigned int from, unsigned int num)
{
return (find_gap(&iolist, from, num) == NULL) ? -EBUSY : 0;
}
/* Called from init/main.c to reserve IO ports. */
void reserve_setup(char *str, int *ints)
{
int i;
for (i = 1; i < ints[0]; i += 2)
request_region(ints[i], ints[i+1], "reserved");
}
......@@ -65,27 +65,6 @@ long time_adjust_step = 0;
int need_resched = 0;
unsigned long event = 0;
/*
* Tell us the machine setup..
*/
char hard_math = 0; /* set by boot/head.S */
char x86 = 0; /* set by boot/head.S to 3 or 4 */
char x86_model = 0; /* set by boot/head.S */
char x86_mask = 0; /* set by boot/head.S */
int x86_capability = 0; /* set by boot/head.S */
int fdiv_bug = 0; /* set if Pentium(TM) with FP bug */
char x86_vendor_id[13] = "Unknown";
char ignore_irq13 = 0; /* set if exception 16 works */
char wp_works_ok = 0; /* set if paging hardware honours WP */
char hlt_works_ok = 1; /* set if the "hlt" instruction works */
/*
* Bus types ..
*/
int EISA_bus = 0;
extern int _setitimer(int, struct itimerval *, struct itimerval *);
unsigned long * prof_buffer = NULL;
unsigned long prof_len = 0;
......@@ -494,7 +473,7 @@ static void second_overflow(void)
time_status = TIME_OK;
break;
}
if (xtime.tv_sec > last_rtc_update + 660)
if (time_status != TIME_BAD && xtime.tv_sec > last_rtc_update + 660)
if (set_rtc_mmss(xtime.tv_sec) == 0)
last_rtc_update = xtime.tv_sec;
else
......
......@@ -201,7 +201,7 @@ int memcmp(const void * cs,const void * ct,size_t count)
/*
* find the first occurrence of byte 'c', or 1 past the area if none
*/
extern inline void * memscan(void * addr, unsigned char c, size_t size)
void * memscan(void * addr, unsigned char c, size_t size)
{
unsigned char * p = (unsigned char *) addr;
......
......@@ -90,7 +90,7 @@ static void file_mmap_sync(struct vm_area_struct * vma, unsigned long start,
unsigned long poff, pcnt, pc;
size = size >> PAGE_SHIFT;
dir = PAGE_DIR_OFFSET(current->tss.cr3,start);
dir = PAGE_DIR_OFFSET(current,start);
poff = (start >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
start -= vma->vm_start;
if ((pcnt = PTRS_PER_PAGE - poff) > size)
......
......@@ -47,22 +47,8 @@
#include <asm/system.h>
#include <asm/segment.h>
/*
* Define this if things work differently on a i386 and a i486:
* it will (on a i486) warn about kernel memory accesses that are
* done without a 'verify_area(VERIFY_WRITE,..)'
*/
#undef CONFIG_TEST_VERIFY_AREA
unsigned long high_memory = 0;
extern unsigned long pg0[1024]; /* page table for 0-4MB for everybody */
extern void scsi_mem_init(unsigned long);
extern void sound_mem_init(void);
extern void die_if_kernel(char *,struct pt_regs *,long);
extern void show_net_buffers(void);
/*
* The free_area_list arrays point to the queue heads of the free areas
* of different sizes
......@@ -130,20 +116,18 @@ static void free_one_table(unsigned long * page_dir)
void clear_page_tables(struct task_struct * tsk)
{
int i;
unsigned long pg_dir;
unsigned long * page_dir;
if (!tsk)
return;
if (tsk == task[0])
panic("task[0] (swapper) doesn't support exec()\n");
pg_dir = tsk->tss.cr3;
page_dir = (unsigned long *) pg_dir;
page_dir = PAGE_DIR_OFFSET(tsk, 0);
if (!page_dir || page_dir == swapper_pg_dir) {
printk("Trying to clear kernel page-directory: not good\n");
return;
}
if (mem_map[MAP_NR(pg_dir)] > 1) {
if (mem_map[MAP_NR((unsigned long) page_dir)] > 1) {
unsigned long * new_pg;
if (!(new_pg = (unsigned long*) get_free_page(GFP_KERNEL))) {
......@@ -152,8 +136,8 @@ void clear_page_tables(struct task_struct * tsk)
}
for (i = 768 ; i < 1024 ; i++)
new_pg[i] = page_dir[i];
free_page(pg_dir);
tsk->tss.cr3 = (unsigned long) new_pg;
free_page((unsigned long) page_dir);
SET_PAGE_DIR(tsk, new_pg);
return;
}
for (i = 0 ; i < 768 ; i++,page_dir++)
......@@ -168,7 +152,6 @@ void clear_page_tables(struct task_struct * tsk)
void free_page_tables(struct task_struct * tsk)
{
int i;
unsigned long pg_dir;
unsigned long * page_dir;
if (!tsk)
......@@ -177,22 +160,19 @@ void free_page_tables(struct task_struct * tsk)
printk("task[0] (swapper) killed: unable to recover\n");
panic("Trying to free up swapper memory space");
}
pg_dir = tsk->tss.cr3;
if (!pg_dir || pg_dir == (unsigned long) swapper_pg_dir) {
page_dir = PAGE_DIR_OFFSET(tsk, 0);
if (!page_dir || page_dir == swapper_pg_dir) {
printk("Trying to free kernel page-directory: not good\n");
return;
}
tsk->tss.cr3 = (unsigned long) swapper_pg_dir;
if (tsk == current)
__asm__ __volatile__("movl %0,%%cr3": :"a" (tsk->tss.cr3));
if (mem_map[MAP_NR(pg_dir)] > 1) {
free_page(pg_dir);
SET_PAGE_DIR(tsk, swapper_pg_dir);
if (mem_map[MAP_NR((unsigned long) page_dir)] > 1) {
free_page((unsigned long) page_dir);
return;
}
page_dir = (unsigned long *) pg_dir;
for (i = 0 ; i < PTRS_PER_PAGE ; i++,page_dir++)
free_one_table(page_dir);
free_page(pg_dir);
for (i = 0 ; i < PTRS_PER_PAGE ; i++)
free_one_table(page_dir + i);
free_page((unsigned long) page_dir);
invalidate();
}
......@@ -206,9 +186,9 @@ int clone_page_tables(struct task_struct * tsk)
{
unsigned long pg_dir;
pg_dir = current->tss.cr3;
pg_dir = (unsigned long) PAGE_DIR_OFFSET(current, 0);
mem_map[MAP_NR(pg_dir)]++;
tsk->tss.cr3 = pg_dir;
SET_PAGE_DIR(tsk, pg_dir);
return 0;
}
......@@ -220,15 +200,14 @@ int clone_page_tables(struct task_struct * tsk)
int copy_page_tables(struct task_struct * tsk)
{
int i;
unsigned long old_pg_dir, *old_page_dir;
unsigned long new_pg_dir, *new_page_dir;
unsigned long *old_page_dir;
unsigned long *new_page_dir;
if (!(new_pg_dir = get_free_page(GFP_KERNEL)))
new_page_dir = (unsigned long *) get_free_page(GFP_KERNEL);
if (!new_page_dir)
return -ENOMEM;
old_pg_dir = current->tss.cr3;
tsk->tss.cr3 = new_pg_dir;
old_page_dir = (unsigned long *) old_pg_dir;
new_page_dir = (unsigned long *) new_pg_dir;
old_page_dir = PAGE_DIR_OFFSET(current, 0);
SET_PAGE_DIR(tsk, new_page_dir);
for (i = 0 ; i < PTRS_PER_PAGE ; i++,old_page_dir++,new_page_dir++) {
int j;
unsigned long old_pg_table, *old_page_table;
......@@ -295,7 +274,7 @@ int unmap_page_range(unsigned long from, unsigned long size)
return -EINVAL;
}
size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
dir = PAGE_DIR_OFFSET(current,from);
poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
if ((pcnt = PTRS_PER_PAGE - poff) > size)
pcnt = size;
......@@ -353,7 +332,7 @@ int zeromap_page_range(unsigned long from, unsigned long size, int mask)
printk("zeromap_page_range: from = %08lx\n",from);
return -EINVAL;
}
dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
dir = PAGE_DIR_OFFSET(current,from);
size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
if ((pcnt = PTRS_PER_PAGE - poff) > size)
......@@ -415,7 +394,7 @@ int remap_page_range(unsigned long from, unsigned long to, unsigned long size, i
printk("remap_page_range: from = %08lx, to=%08lx\n",from,to);
return -EINVAL;
}
dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
dir = PAGE_DIR_OFFSET(current,from);
size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
if ((pcnt = PTRS_PER_PAGE - poff) > size)
......@@ -493,7 +472,7 @@ unsigned long put_page(struct task_struct * tsk,unsigned long page,
printk("put_page: trying to put page %08lx at %08lx\n",page,address);
return 0;
}
page_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
page_table = PAGE_DIR_OFFSET(tsk,address);
if ((*page_table) & PAGE_PRESENT)
page_table = (unsigned long *) (PAGE_MASK & *page_table);
else {
......@@ -527,7 +506,7 @@ unsigned long put_dirty_page(struct task_struct * tsk, unsigned long page, unsig
printk("put_dirty_page: trying to put page %08lx at %08lx\n",page,address);
if (mem_map[MAP_NR(page)] != 1)
printk("mem_map disagrees with %08lx at %08lx\n",page,address);
page_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
page_table = PAGE_DIR_OFFSET(tsk,address);
if (PAGE_PRESENT & *page_table)
page_table = (unsigned long *) (PAGE_MASK & *page_table);
else {
......@@ -567,7 +546,7 @@ void do_wp_page(struct vm_area_struct * vma, unsigned long address,
unsigned long new_page;
new_page = __get_free_page(GFP_KERNEL);
pde = PAGE_DIR_OFFSET(vma->vm_task->tss.cr3,address);
pde = PAGE_DIR_OFFSET(vma->vm_task,address);
pte = *pde;
if (!(pte & PAGE_PRESENT))
goto end_wp_page;
......@@ -720,8 +699,8 @@ static int try_to_share(unsigned long to_address, struct vm_area_struct * to_are
unsigned long from_page;
unsigned long to_page;
from_page = (unsigned long)PAGE_DIR_OFFSET(from_area->vm_task->tss.cr3,from_address);
to_page = (unsigned long)PAGE_DIR_OFFSET(to_area->vm_task->tss.cr3,to_address);
from_page = (unsigned long)PAGE_DIR_OFFSET(from_area->vm_task,from_address);
to_page = (unsigned long)PAGE_DIR_OFFSET(to_area->vm_task,to_address);
/* is there a page-directory at from? */
from = *(unsigned long *) from_page;
if (!(from & PAGE_PRESENT))
......@@ -854,7 +833,7 @@ static inline unsigned long get_empty_pgtable(struct task_struct * tsk,unsigned
unsigned long page;
unsigned long *p;
p = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
p = PAGE_DIR_OFFSET(tsk,address);
if (PAGE_PRESENT & *p)
return *p;
if (*p) {
......@@ -862,7 +841,7 @@ static inline unsigned long get_empty_pgtable(struct task_struct * tsk,unsigned
*p = 0;
}
page = get_free_page(GFP_KERNEL);
p = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
p = PAGE_DIR_OFFSET(tsk,address);
if (PAGE_PRESENT & *p) {
free_page(page);
return *p;
......@@ -886,7 +865,7 @@ static inline void do_swap_page(struct vm_area_struct * vma,
unsigned long page;
if (vma->vm_ops && vma->vm_ops->swapin)
page = vma->vm_ops->swapin(vma, entry);
page = vma->vm_ops->swapin(vma, address - vma->vm_start + vma->vm_offset, entry);
else
page = swap_in(entry);
if (*pge != entry) {
......@@ -964,319 +943,3 @@ void do_no_page(struct vm_area_struct * vma, unsigned long address,
free_page(page);
oom(current);
}
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
struct vm_area_struct * vma;
unsigned long address;
unsigned long page;
/* get the address */
__asm__("movl %%cr2,%0":"=r" (address));
for (vma = current->mm->mmap ; ; vma = vma->vm_next) {
if (!vma)
goto bad_area;
if (vma->vm_end > address)
break;
}
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur)
goto bad_area;
vma->vm_offset -= vma->vm_start - (address & PAGE_MASK);
vma->vm_start = (address & PAGE_MASK);
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
if (regs->eflags & VM_MASK) {
unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
if (bit < 32)
current->tss.screen_bitmap |= 1 << bit;
}
if (!(vma->vm_page_prot & PAGE_USER))
goto bad_area;
if (error_code & PAGE_PRESENT) {
if (!(vma->vm_page_prot & (PAGE_RW | PAGE_COW)))
goto bad_area;
#ifdef CONFIG_TEST_VERIFY_AREA
if (regs->cs == KERNEL_CS)
printk("WP fault at %08x\n", regs->eip);
#endif
do_wp_page(vma, address, error_code);
return;
}
do_no_page(vma, address, error_code);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
if (error_code & PAGE_USER) {
current->tss.cr2 = address;
current->tss.error_code = error_code;
current->tss.trap_no = 14;
send_sig(SIGSEGV, current, 1);
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
if (wp_works_ok < 0 && address == TASK_SIZE && (error_code & PAGE_PRESENT)) {
wp_works_ok = 1;
pg0[0] = PAGE_SHARED;
invalidate();
printk("This processor honours the WP bit even when in supervisor mode. Good.\n");
return;
}
if ((unsigned long) (address-TASK_SIZE) < PAGE_SIZE) {
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
pg0[0] = PAGE_SHARED;
} else
printk(KERN_ALERT "Unable to handle kernel paging request");
printk(" at virtual address %08lx\n",address);
__asm__("movl %%cr3,%0" : "=r" (page));
printk(KERN_ALERT "current->tss.cr3 = %08lx, %%cr3 = %08lx\n",
current->tss.cr3, page);
page = ((unsigned long *) page)[address >> 22];
printk(KERN_ALERT "*pde = %08lx\n", page);
if (page & PAGE_PRESENT) {
page &= PAGE_MASK;
address &= 0x003ff000;
page = ((unsigned long *) page)[address >> PAGE_SHIFT];
printk(KERN_ALERT "*pte = %08lx\n", page);
}
die_if_kernel("Oops", regs, error_code);
do_exit(SIGKILL);
}
/*
* BAD_PAGE is the page that is used for page faults when linux
* is out-of-memory. Older versions of linux just did a
* do_exit(), but using this instead means there is less risk
* for a process dying in kernel mode, possibly leaving a inode
* unused etc..
*
* BAD_PAGETABLE is the accompanying page-table: it is initialized
* to point to BAD_PAGE entries.
*
* ZERO_PAGE is a special page that is used for zero-initialized
* data and COW.
*/
unsigned long __bad_pagetable(void)
{
extern char empty_bad_page_table[PAGE_SIZE];
__asm__ __volatile__("cld ; rep ; stosl":
:"a" (BAD_PAGE + PAGE_TABLE),
"D" ((long) empty_bad_page_table),
"c" (PTRS_PER_PAGE)
:"di","cx");
return (unsigned long) empty_bad_page_table;
}
unsigned long __bad_page(void)
{
extern char empty_bad_page[PAGE_SIZE];
__asm__ __volatile__("cld ; rep ; stosl":
:"a" (0),
"D" ((long) empty_bad_page),
"c" (PTRS_PER_PAGE)
:"di","cx");
return (unsigned long) empty_bad_page;
}
unsigned long __zero_page(void)
{
extern char empty_zero_page[PAGE_SIZE];
__asm__ __volatile__("cld ; rep ; stosl":
:"a" (0),
"D" ((long) empty_zero_page),
"c" (PTRS_PER_PAGE)
:"di","cx");
return (unsigned long) empty_zero_page;
}
void show_mem(void)
{
int i,free = 0,total = 0,reserved = 0;
int shared = 0;
printk("Mem-info:\n");
show_free_areas();
printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
i = high_memory >> PAGE_SHIFT;
while (i-- > 0) {
total++;
if (mem_map[i] & MAP_PAGE_RESERVED)
reserved++;
else if (!mem_map[i])
free++;
else
shared += mem_map[i]-1;
}
printk("%d pages of RAM\n",total);
printk("%d free pages\n",free);
printk("%d reserved pages\n",reserved);
printk("%d pages shared\n",shared);
show_buffers();
#ifdef CONFIG_NET
show_net_buffers();
#endif
}
extern unsigned long free_area_init(unsigned long, unsigned long);
/*
* paging_init() sets up the page tables - note that the first 4MB are
* already mapped by head.S.
*
* This routines also unmaps the page at virtual kernel address 0, so
* that we can trap those pesky NULL-reference errors in the kernel.
*/
unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
{
unsigned long * pg_dir;
unsigned long * pg_table;
unsigned long tmp;
unsigned long address;
/*
* Physical page 0 is special; it's not touched by Linux since BIOS
* and SMM (for laptops with [34]86/SL chips) may need it. It is read
* and write protected to detect null pointer references in the
* kernel.
*/
#if 0
memset((void *) 0, 0, PAGE_SIZE);
#endif
start_mem = PAGE_ALIGN(start_mem);
address = 0;
pg_dir = swapper_pg_dir;
while (address < end_mem) {
tmp = *(pg_dir + 768); /* at virtual addr 0xC0000000 */
if (!tmp) {
tmp = start_mem | PAGE_TABLE;
*(pg_dir + 768) = tmp;
start_mem += PAGE_SIZE;
}
*pg_dir = tmp; /* also map it in at 0x0000000 for init */
pg_dir++;
pg_table = (unsigned long *) (tmp & PAGE_MASK);
for (tmp = 0 ; tmp < PTRS_PER_PAGE ; tmp++,pg_table++) {
if (address < end_mem)
*pg_table = address | PAGE_SHARED;
else
*pg_table = 0;
address += PAGE_SIZE;
}
}
invalidate();
return free_area_init(start_mem, end_mem);
}
void mem_init(unsigned long start_low_mem,
unsigned long start_mem, unsigned long end_mem)
{
int codepages = 0;
int reservedpages = 0;
int datapages = 0;
unsigned long tmp;
extern int etext;
end_mem &= PAGE_MASK;
high_memory = end_mem;
/* mark usable pages in the mem_map[] */
start_low_mem = PAGE_ALIGN(start_low_mem);
start_mem = PAGE_ALIGN(start_mem);
/*
* IBM messed up *AGAIN* in their thinkpad: 0xA0000 -> 0x9F000.
* They seem to have done something stupid with the floppy
* controller as well..
*/
while (start_low_mem < 0x9f000) {
mem_map[MAP_NR(start_low_mem)] = 0;
start_low_mem += PAGE_SIZE;
}
while (start_mem < high_memory) {
mem_map[MAP_NR(start_mem)] = 0;
start_mem += PAGE_SIZE;
}
#ifdef CONFIG_SCSI
scsi_mem_init(high_memory);
#endif
#ifdef CONFIG_SOUND
sound_mem_init();
#endif
for (tmp = 0 ; tmp < high_memory ; tmp += PAGE_SIZE) {
if (mem_map[MAP_NR(tmp)]) {
if (tmp >= 0xA0000 && tmp < 0x100000)
reservedpages++;
else if (tmp < (unsigned long) &etext)
codepages++;
else
datapages++;
continue;
}
mem_map[MAP_NR(tmp)] = 1;
free_page(tmp);
}
tmp = nr_free_pages << PAGE_SHIFT;
printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
tmp >> 10,
high_memory >> 10,
codepages << (PAGE_SHIFT-10),
reservedpages << (PAGE_SHIFT-10),
datapages << (PAGE_SHIFT-10));
/* test if the WP bit is honoured in supervisor mode */
wp_works_ok = -1;
pg0[0] = PAGE_READONLY;
invalidate();
__asm__ __volatile__("movb 0,%%al ; movb %%al,0": : :"ax", "memory");
pg0[0] = 0;
invalidate();
if (wp_works_ok < 0)
wp_works_ok = 0;
#ifdef CONFIG_TEST_VERIFY_AREA
wp_works_ok = 0;
#endif
return;
}
void si_meminfo(struct sysinfo *val)
{
int i;
i = high_memory >> PAGE_SHIFT;
val->totalram = 0;
val->sharedram = 0;
val->freeram = nr_free_pages << PAGE_SHIFT;
val->bufferram = buffermem;
while (i-- > 0) {
if (mem_map[i] & MAP_PAGE_RESERVED)
continue;
val->totalram++;
if (!mem_map[i])
continue;
val->sharedram += mem_map[i]-1;
}
val->totalram <<= PAGE_SHIFT;
val->sharedram <<= PAGE_SHIFT;
return;
}
......@@ -24,7 +24,7 @@ static void change_protection(unsigned long start, unsigned long end, int prot)
unsigned long page, offset;
int nr;
dir = PAGE_DIR_OFFSET(current->tss.cr3, start);
dir = PAGE_DIR_OFFSET(current, start);
offset = (start >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
nr = (end - start) >> PAGE_SHIFT;
while (nr > 0) {
......
......@@ -77,14 +77,8 @@ extern inline int add_to_swap_cache(unsigned long addr, unsigned long entry)
#ifdef SWAP_CACHE_INFO
swap_cache_add_total++;
#endif
if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
__asm__ __volatile__ (
"xchgl %0,%1\n"
: "=m" (swap_cache[addr >> PAGE_SHIFT]),
"=r" (entry)
: "0" (swap_cache[addr >> PAGE_SHIFT]),
"1" (entry)
);
if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
entry = (unsigned long) xchg_ptr(swap_cache + (addr >> PAGE_SHIFT), (void *) entry);
if (entry) {
printk("swap_cache: replacing non-NULL entry\n");
}
......@@ -398,7 +392,7 @@ static int swap_out_process(struct task_struct * p)
if (address < vma->vm_start)
address = vma->vm_start;
pgdir = (address >> PGDIR_SHIFT) + (unsigned long *) p->tss.cr3;
pgdir = PAGE_DIR_OFFSET(p, address);
offset = address & ~PGDIR_MASK;
address &= PGDIR_MASK;
for ( ; address < TASK_SIZE ;
......@@ -754,7 +748,7 @@ static int try_to_unuse(unsigned int type)
if (!p)
continue;
for (pgt = 0 ; pgt < PTRS_PER_PAGE ; pgt++) {
ppage = pgt + ((unsigned long *) p->tss.cr3);
ppage = pgt + PAGE_DIR_OFFSET(p, 0);
page = *ppage;
if (!page)
continue;
......
......@@ -39,7 +39,7 @@ static inline void set_pgdir(unsigned long dindex, unsigned long value)
p = &init_task;
do {
((unsigned long *) p->tss.cr3)[dindex] = value;
PAGE_DIR_OFFSET(p,0)[dindex] = value;
p = p->next_task;
} while (p != &init_task);
}
......
......@@ -580,7 +580,9 @@ int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
break;
#endif
case ARPHRD_ETHER:
#ifdef CONFIG_ARCNET
case ARPHRD_ARCNET:
#endif
if(arp->ar_pro != htons(ETH_P_IP))
{
kfree_skb(skb, FREE_READ);
......@@ -1054,10 +1056,12 @@ static int arp_req_set(struct arpreq *req)
htype = ARPHRD_ETHER;
hlen = ETH_ALEN;
break;
#ifdef CONFIG_ARCNET
case ARPHRD_ARCNET:
htype = ARPHRD_ARCNET;
hlen = 1; /* length of arcnet addresses */
break;
#endif
#ifdef CONFIG_AX25
case ARPHRD_AX25:
htype = ARPHRD_AX25;
......
......@@ -91,7 +91,7 @@ struct sock {
unsigned long lingertime;
int proc;
struct sock *next;
struct sock *prev; /* Doubdly linked chain.. */
struct sock *prev; /* Doubly linked chain.. */
struct sock *pair;
struct sk_buff * volatile send_head;
struct sk_buff * volatile send_tail;
......
......@@ -287,7 +287,6 @@ static struct socket *sock_alloc(int wait)
++nsockets;
}
sti();
printk("sock_alloc: Alloced some more, now %d sockets\n", nsockets);
}
......@@ -367,7 +366,7 @@ printk("sock_alloc: Alloced some more, now %d sockets\n", nsockets);
sti();
/*
* The rest of these are in fact vestigal from the previous
* The rest of these are in fact vestigial from the previous
* version, which didn't have growing list of sockets.
* These may become necessary if there are 2000 (or whatever
* the hard limit is set to) sockets already in system,
......@@ -979,7 +978,7 @@ static int sock_accept(int fd, struct sockaddr *upeer_sockaddr, int *upeer_addrl
if (!(newsock = sock_alloc(0)))
{
printk("NET: sock_accept: no more sockets\n");
return(-ENOSR); /* Was: EGAIN, but we are out of system
return(-ENOSR); /* Was: EAGAIN, but we are out of system
resources! */
}
newsock->type = sock->type;
......
......@@ -55,7 +55,7 @@
* space that need not be wasted.
*/
struct unix_proto_data unix_datas[NSOCKETS];
struct unix_proto_data unix_datas[NSOCKETS_UNIX];
static int unix_proto_create(struct socket *sock, int protocol);
static int unix_proto_dup(struct socket *newsock, struct socket *oldsock);
......
......@@ -43,10 +43,10 @@ struct unix_proto_data {
int lock_flag;
};
extern struct unix_proto_data unix_datas[NSOCKETS];
extern struct unix_proto_data unix_datas[NSOCKETS_UNIX];
#define last_unix_data (unix_datas + NSOCKETS - 1)
#define last_unix_data (unix_datas + NSOCKETS_UNIX - 1)
#define UN_DATA(SOCK) ((struct unix_proto_data *)(SOCK)->data)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment