Commit 3f4e170a authored by Linus Torvalds's avatar Linus Torvalds

Import 2.1.5

parent a1e82dcd
VERSION = 2
PATCHLEVEL = 1
SUBLEVEL = 4
SUBLEVEL = 5
ARCH = i386
......
......@@ -125,52 +125,3 @@ wrfpcr:
lda $30,0x10($30)
ret ($26)
.end wrfpcr
#define ex_count 0($16)
#define ex_r9 8($16)
#define ex_r10 16($16)
#define ex_r11 24($16)
#define ex_r12 32($16)
#define ex_r13 40($16)
#define ex_r14 48($16)
#define ex_r15 56($16)
#define ex_r26 64($16)
#define ex_r30 72($16)
.align 3
.globl __exception
.ent __exception
__exception:
ldq $1,ex_count
bis $31,$31,$0 /* return 0 */
addq $1,1,$2
bne $1,1f /* don't save state if orig_count != 0 */
stq $9,ex_r9
stq $10,ex_r10
stq $11,ex_r11
stq $12,ex_r12
stq $13,ex_r13
stq $14,ex_r14
stq $15,ex_r15
stq $26,ex_r26
stq $30,ex_r30
1: stq $2,ex_count
ret ($26)
.end __exception
.align 3
.globl __handle_exception
.ent __handle_exception
__handle_exception:
ldq $9,ex_r9
ldq $10,ex_r10
ldq $11,ex_r11
ldq $12,ex_r12
ldq $13,ex_r13
ldq $14,ex_r14
ldq $15,ex_r15
ldq $26,ex_r26
ldq $30,ex_r30
bis $31,1,$0 /* return 1 */
ret ($26)
.end __handle_exception
......@@ -726,7 +726,10 @@ void init_IRQ(void)
dma_outb(0, DMA1_CLR_MASK_REG);
dma_outb(0, DMA2_CLR_MASK_REG);
#if NR_IRQS == 48
*(unsigned int *)GRU_INT_MASK = ~(irq_mask >> 16); mb();/* invert */
*(unsigned int *)GRU_INT_MASK = ~(irq_mask >> 16); mb();/* invert */
*(unsigned int *)GRU_INT_EDGE = 0UL; mb();/* all are level */
*(unsigned int *)GRU_INT_HILO = 0x80000000UL; mb();/* ISA only HI */
*(unsigned int *)GRU_INT_CLEAR = 0UL; mb();/* all clear */
enable_irq(16 + 31); /* enable (E)ISA PIC cascade */
#elif NR_IRQS == 33
outl(irq_mask >> 16, 0x804);
......
......@@ -201,19 +201,37 @@ asmlinkage void do_entUna(void * va, unsigned long opcode, unsigned long reg,
/* $16-$18 are PAL-saved, and are offset by 19 entries */
if (reg >= 16 && reg <= 18)
reg += 19;
switch (opcode) {
case 0x28: /* ldl */
*(reg+regs.regs) = get_unaligned((int *)va);
return;
case 0x29: /* ldq */
*(reg+regs.regs) = get_unaligned((long *)va);
return;
case 0x2c: /* stl */
put_unaligned(*(reg+regs.regs), (int *)va);
return;
case 0x2d: /* stq */
put_unaligned(*(reg+regs.regs), (long *)va);
return;
{
/* Set up an exception handler address just in case we are
handling an unaligned fixup within get_user(). Notice
that we do *not* change the exception count because we
only want to bounce possible exceptions on through. */
__label__ handle_ex;
register void *ex_vector __asm__("$28");
__asm__ __volatile__ ("" : "=r"(ex_vector) : "0"(&&handle_ex));
switch (opcode) {
case 0x28: /* ldl */
*(reg+regs.regs) = get_unaligned((int *)va);
return;
case 0x29: /* ldq */
*(reg+regs.regs) = get_unaligned((long *)va);
return;
case 0x2c: /* stl */
put_unaligned(*(reg+regs.regs), (int *)va);
return;
case 0x2d: /* stq */
put_unaligned(*(reg+regs.regs), (long *)va);
return;
/* We'll only get back here if we are handling a
valid exception. */
handle_ex:
(&regs)->pc = *(28+regs.regs);
return;
}
}
printk("Bad unaligned kernel access at %016lx: %p %lx %ld\n",
regs.pc, va, opcode, reg);
......@@ -335,12 +353,12 @@ asmlinkage void do_entUnaUser(void * va, unsigned long opcode, unsigned long reg
reg_addr += (reg - 9);
break;
case 16: case 17: case 18:
case 16: case 17: case 18:
/* a0-a2 in PAL frame */
reg_addr += 7 + 20 + 3 + (reg - 16);
break;
case 19: case 20: case 21: case 22: case 23:
case 19: case 20: case 21: case 22: case 23:
case 24: case 25: case 26: case 27: case 28:
/* a3-at in SAVE_ALL frame */
reg_addr += 7 + 9 + (reg - 19);
......@@ -376,7 +394,7 @@ asmlinkage void do_entUnaUser(void * va, unsigned long opcode, unsigned long reg
case 0x23: /* ldt */
alpha_write_fp_reg(reg, get_unaligned((unsigned long *)va));
break;
break;
case 0x27: /* stt */
put_unaligned(alpha_read_fp_reg(reg), (unsigned long *)va);
break;
......@@ -403,7 +421,7 @@ asmlinkage void do_entUnaUser(void * va, unsigned long opcode, unsigned long reg
if (opcode >= 0x28 && reg == 30 && dir == VERIFY_WRITE) {
wrusp(usp);
}
}
}
/*
......
......@@ -4,7 +4,9 @@
OBJS = __divqu.o __remqu.o __divlu.o __remlu.o memset.o memcpy.o io.o \
checksum.o csum_partial_copy.o strlen.o \
get_user.o put_user.o copy_user.o
strcat.o strcpy.o strncat.o strncpy.o stxcpy.o stxncpy.o \
strchr.o strrchr.o \
copy_user.o clear_user.o strncpy_from_user.o
lib.a: $(OBJS)
$(AR) rcs lib.a $(OBJS)
......
/*
* arch/alpha/lib/clear_user.S
* Contributed by Richard Henderson <rth@tamu.edu>
*
* Zero user space, handling exceptions as we go.
*
* We have to make sure that $0 is always up-to-date and contains the
* right "bytes left to zero" value (and that it is updated only _after_
* a successful copy). There is also some rather minor exception setup
* stuff.
*
* NOTE! This is not directly C-callable, because the calling semantics
* are different:
*
* Inputs:
* length in $0
* destination address in $6
* exception pointer in $7
* return address in $28 (exceptions expect it there)
*
* Outputs:
* bytes left to copy in $0
*
* Clobbers:
* $1,$2,$3,$4,$5,$6
*/
.set noat
.set noreorder
.align 4
.globl __clear_user
.ent __clear_user
.frame $30, 0, $28
.prologue 0
$loop:
and $1, 3, $4 # e0 :
beq $4, 1f # .. e1 :
0: stq_u $31, 0($6) # e0 : zero one word
subq $0, 8, $0 # .. e1 :
subq $4, 1, $4 # e0 :
addq $6, 8, $6 # .. e1 :
bne $4, 0b # e1 :
unop # :
1: bic $1, 3, $1 # e0 :
beq $1, $tail # .. e1 :
2: stq_u $31, 0($6) # e0 : zero four words
subq $0, 8, $0 # .. e1 :
stq_u $31, 8($6) # e0 :
subq $0, 8, $0 # .. e1 :
stq_u $31, 16($6) # e0 :
subq $0, 8, $0 # .. e1 :
stq_u $31, 24($6) # e0 :
subq $0, 8, $0 # .. e1 :
subq $1, 4, $1 # e0 :
addq $6, 32, $6 # .. e1 :
bne $1, 2b # e1 :
$tail:
bne $2, 1f # e1 : is there a tail to do?
stq $3, 0($7) # e0 : decrement exception count
ret $31, ($28), 1 # .. e1 :
1: ldq_u $5, 0($6) # e1 :
mskqh $5, $0, $5 # e0 :
stq_u $5, 0($6) # e0 :
clr $0 # .. e1 :
stq $3, 0($7) # e0 : decrement exception count
ret $31, ($28), 1 # .. e1 :
__clear_user:
ldq $3, 0($7) # e0 : load exception count for increment
beq $0, $zerolength # .. e1 :
and $6, 7, $4 # e0 : find dest misalignment
addq $0, $4, $1 # e1 : bias counter
addq $3, 1, $5 # e0 :
and $1, 7, $2 # .. e1 : number of bytes in tail
srl $1, 3, $1 # e0 :
unop # :
stq $5, 0($7) # e0 : increment exception count
beq $4, $loop # .. e1 :
ldq_u $5, 0($6) # e0 : load dst word to mask back in
beq $1, $oneword # .. e1 : sub-word store?
mskql $5, $6, $5 # e0 : take care of misaligned head
addq $6, 8, $6 # .. e1 :
stq_u $5, -8($6) # e0 :
addq $0, $4, $0 # .. e1 : bytes left -= 8 - misalignment
subq $1, 1, $1 # e0 :
subq $0, 8, $0 # .. e1 :
br $loop # e1 :
unop # :
$oneword:
mskql $5, $6, $4 # e0 :
mskqh $5, $2, $5 # e0 :
or $5, $4, $5 # e1 :
stq_u $5, 0($6) # e0 :
clr $0 # .. e1 :
stq $3, 0($7) # e0 : decrement exception count
$zerolength:
ret $31, ($28), 1 # .. e1 :
.end __clear_user
/*
* arch/alpha/lib/get_user.S
*
* (C) Copyright 1996 Linus Torvalds
*/
/*
* This does simple reads from user mode, returning zero for
* success and -EINVAL for a fault. Note that we may NOT do
* unaligned accesses, because the unaligned fault handler
* must not take the exception..
*
* NOTE! These are NOT normal function calls callable from C.
* As we have two return values (the actual value gotten from
* user space, and the error return value) the calling sequence
* is different.
*
* Input:
* user address in $2
* exception structure address in $3
* return address in $28 (exceptions expect it there)
*
* Output:
* error number in $0
* actual result in $1
*
* Clobbers:
* $4,$5
*/
.set noat
.align 3
.globl __get_user_8
.ent __get_user_8
__get_user_8:
ldq $4,0($3)
lda $0,-14
addq $4,1,$5
stq $5,0($3)
ldq_u $1,0($2)
stq $4,0($3)
bis $31,$31,$0
extbl $1,$2,$1
ret $31,($28),1
.end __get_user_8
.align 3
.globl __get_user_16
.ent __get_user_16
__get_user_16:
ldq $4,0($3)
lda $0,-14
addq $4,1,$5
stq $5,0($3)
ldq_u $1,0($2)
ldq_u $5,1($2)
stq $4,0($3)
extwl $1,$2,$1
bis $31,$31,$0
extwh $5,$2,$5
bis $1,$5,$1
ret $31,($28),1
.end __get_user_16
.align 3
.globl __get_user_32
.ent __get_user_32
__get_user_32:
ldq $4,0($3)
lda $0,-14
addq $4,1,$5
stq $5,0($3)
ldq_u $1,0($2)
ldq_u $5,3($2)
stq $4,0($3)
extll $1,$2,$1
bis $31,$31,$0
extlh $5,$2,$5
bis $1,$5,$1
ret $31,($28),1
.end __get_user_32
.align 3
.globl __get_user_64
.ent __get_user_64
__get_user_64:
ldq $4,0($3)
lda $0,-14
addq $4,1,$5
stq $5,0($3)
ldq_u $1,0($2)
ldq_u $5,7($2)
stq $4,0($3)
extql $1,$2,$1
bis $31,$31,$0
extqh $5,$2,$5
bis $1,$5,$1
ret $31,($28),1
.end __get_user_64
/*
* arch/alpha/lib/put_user.S
*
* (C) Copyright 1996 Linus Torvalds
*/
/*
* This does simple writes to user mode, returning zero for
* success and -EINVAL for a fault. Note that we may NOT do
* unaligned accesses, because the unaligned fault handler
* must not take the exception..
*
* NOTE! These are NOT normal function calls callable from C.
* As we have two return values (the actual value gotten from
* user space, and the error return value) the calling sequence
* is different.
*
* Input:
* value to be written in $6
* user address in $7
* exception pointer in $8
* return address in $28 (exceptions expect it there)
* Output:
* return value in $0
* Clobbers:
* $1,$2,$3,$4,$5,$6
*/
.set noat
.align 3
.globl __put_user_8
.ent __put_user_8
__put_user_8:
ldq $2,0($8)
lda $0,-14
addq $2,1,$1
stq $1,0($8)
ldq_u $1,0($7)
insbl $6,$7,$6
mskbl $1,$7,$1
bis $6,$1,$6
stq_u $6,0($7)
stq $2,0($8)
bis $31,$31,$0
ret $31,($28),1
.end __put_user_8
.align 3
.globl __put_user_16
.ent __put_user_16
__put_user_16:
ldq $2,0($8)
lda $0,-14
addq $2,1,$1
stq $1,0($8)
ldq_u $4,1($7)
ldq_u $5,0($7)
inswh $6,$7,$1
inswl $6,$7,$3
mskwh $4,$7,$4
mskwl $5,$7,$5
bis $4,$1,$4
bis $5,$3,$5
stq_u $4,1($7)
stq_u $5,0($7)
stq $2,0($8)
bis $31,$31,$0
ret $31,($28),1
.end __put_user_16
.align 3
.globl __put_user_32
.ent __put_user_32
__put_user_32:
ldq $5,0($8)
lda $0,-14
and $7,3,$2
addq $5,1,$1
stq $1,0($8)
bne $2,__una32
stl $6,0($7)
stq $5,0($8)
bis $31,$31,$0
ret $31,($28),1
.align 4
__una32:
ldq_u $3,3($7)
ldq_u $4,0($7)
insll $6,$7,$2
inslh $6,$7,$1
msklh $3,$7,$3
mskll $4,$7,$4
bis $3,$1,$3
bis $4,$2,$4
stq_u $3,3($7)
stq_u $4,0($7)
stq $5,0($8)
bis $31,$31,$0
ret $31,($28),1
.end __put_user_32
.align 3
.globl __put_user_64
.ent __put_user_64
__put_user_64:
ldq $5,0($8)
lda $0,-14
and $7,7,$2
addq $5,1,$1
stq $1,0($8)
bne $2,__una64
stq $6,0($7)
stq $5,0($8)
bis $31,$31,$0
ret $31,($28),1
.align 4
__una64:
ldq_u $4,0($7)
ldq_u $3,8($7)
insql $6,$7,$2
insqh $6,$7,$1
mskql $4,$7,$4
mskqh $3,$7,$3
bis $4,$2,$4
bis $3,$1,$3
stq_u $4,0($7)
stq_u $3,8($7)
stq $5,0($8)
bis $31,$31,$0
ret $31,($28),1
.end __put_user_64
/*
* arch/alpha/lib/strcat.S
* Contributed by Richard Henderson (rth@tamu.edu)
*
* Append a null-terminated string from SRC to DST.
*/
.text
.align 3
.globl strcat
.ent strcat
strcat:
.frame $30, 0, $26
.prologue 0
mov $16, $0 # set up return value
/* Find the end of the string. */
ldq_u $1, 0($16) # load first quadword (a0 may be misaligned)
lda $2, -1
insqh $2, $16, $2
andnot $16, 7, $16
or $2, $1, $1
cmpbge $31, $1, $2 # bits set iff byte == 0
bne $2, $found
$loop: ldq $1, 8($16)
addq $16, 8, $16
cmpbge $31, $1, $2
beq $2, $loop
$found: negq $2, $3 # clear all but least set bit
and $2, $3, $2
and $2, 0xf0, $3 # binary search for that set bit
and $2, 0xcc, $4
and $2, 0xaa, $5
cmovne $3, 4, $3
cmovne $4, 2, $4
cmovne $5, 1, $5
addq $3, $4, $3
addq $16, $5, $16
addq $16, $3, $16
/* Now do the append. */
mov $26, $23
br __stxcpy
.end strcat
/*
* arch/alpha/lib/strchr.S
* Contributed by Richard Henderson (rth@tamu.edu)
*
* Return the address of a given character within a null-terminated
* string, or null if it is not found.
*/
#include <alpha/regdef.h>
.set noreorder
.set noat
.align 3
.globl strchr
.ent strchr
strchr:
.frame sp, 0, ra
.prologue 0
zapnot a1, 1, a1 # e0 : zero extend the search character
ldq_u t0, 0(a0) # .. e1 : load first quadword
sll a1, 8, t5 # e0 : replicate the search character
andnot a0, 7, v0 # .. e1 : align our loop pointer
or t5, a1, a1 # e0 :
lda t4, -1 # .. e1 : build garbage mask
sll a1, 16, t5 # e0 :
cmpbge zero, t0, t2 # .. e1 : bits set iff byte == zero
mskqh t4, a0, t4 # e0 :
or t5, a1, a1 # .. e1 :
sll a1, 32, t5 # e0 :
cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage
or t5, a1, a1 # e0 :
xor t0, a1, t1 # .. e1 : make bytes == c zero
cmpbge zero, t1, t3 # e0 : bits set iff byte == c
or t2, t3, t0 # e1 : bits set iff char match or zero match
andnot t0, t4, t0 # e0 : clear garbage bits
bne t0, $found # .. e1 (zdb)
$loop: ldq t0, 8(v0) # e0 :
addq v0, 8, v0 # .. e1 :
nop # e0 :
xor t0, a1, t1 # .. e1 (ev5 data stall)
cmpbge zero, t0, t2 # e0 : bits set iff byte == 0
cmpbge zero, t1, t3 # .. e1 : bits set iff byte == c
or t2, t3, t0 # e0 :
beq t0, $loop # .. e1 (zdb)
$found: negq t0, t1 # e0 : clear all but least set bit
and t0, t1, t0 # e1 (stall)
and t0, t3, t1 # e0 : bit set iff byte was the char
beq t1, $retnull # .. e1 (zdb)
and t0, 0xf0, t2 # e0 : binary search for that set bit
and t0, 0xcc, t3 # .. e1 :
and t0, 0xaa, t4 # e0 :
cmovne t2, 4, t2 # .. e1 :
cmovne t3, 2, t3 # e0 :
cmovne t4, 1, t4 # .. e1 :
addq t2, t3, t2 # e0 :
addq v0, t4, v0 # .. e1 :
addq v0, t2, v0 # e0 :
ret # .. e1 :
$retnull:
mov zero, v0 # e0 :
ret # .. e1 :
.end strchr
/*
* arch/alpha/lib/strcpy.S
* Contributed by Richard Henderson (rth@tamu.edu)
*
* Copy a null-terminated string from SRC to DST. Return a pointer
* to the null-terminator in the source.
*/
.text
.align 3
.globl strcpy
.ent strcpy
strcpy:
.frame $30, 0, $26
.prologue 0
mov $16, $0 # set up return value
mov $26, $23 # set up return address
br __stxcpy # do the copy
.end strcpy
/*
* arch/alpha/lib/strncat.S
* Contributed by Richard Henderson (rth@tamu.edu)
*
* Append no more than COUNT characters from the null-terminated string SRC
* to the null-terminated string DST. Always null-terminate the new DST.
*
* This differs slightly from the semantics in libc in that we never write
* past count, whereas libc may write to count+1. This follows the generic
* implementation in lib/string.c and is, IMHO, more sensible.
*/
.text
.align 3
.globl strncat
.ent strncat
strncat:
.frame $30, 0, $26
.prologue 0
mov $16, $0 # set up return value
beq $18, $zerocount
/* Find the end of the string. */
ldq_u $1, 0($16) # load first quadword ($16 may be misaligned)
lda $2, -1($31)
insqh $2, $16, $2
andnot $16, 7, $16
or $2, $1, $1
cmpbge $31, $1, $2 # bits set iff byte == 0
bne $2, $found
$loop: ldq $1, 8($16)
addq $16, 8, $16
cmpbge $31, $1, $2
beq $2, $loop
$found: negq $2, $3 # clear all but least set bit
and $2, $3, $2
and $2, 0xf0, $3 # binary search for that set bit
and $2, 0xcc, $4
and $2, 0xaa, $5
cmovne $3, 4, $3
cmovne $4, 2, $4
cmovne $5, 1, $5
addq $3, $4, $3
addq $16, $5, $16
addq $16, $3, $16
/* Now do the append. */
bsr $23, __stxncpy
/* Worry about the null termination. */
zapnot $1, $22, $2 # was last byte a null?
bne $2, 0f
ret
0: cmplt $22, $24, $2 # did we fill the buffer completely?
or $2, $18, $2
bne $2, 2f
and $24, 0x80, $2 # no zero next byte
bne $2, 1f
/* Here there are bytes left in the current word. Clear one. */
addq $24, $24, $24 # end-of-count bit <<= 1
2: zap $1, $24, $1
stq_u $1, 0($16)
ret
1: /* Here we must read the next DST word and clear the first byte. */
ldq_u $1, 8($16)
zap $1, 1, $1
stq_u $1, 8($16)
$zerocount:
ret
.end strncat
/*
* arch/alpha/lib/strncpy.S
* Contributed by Richard Henderson (rth@tamu.edu)
*
* Copy no more than COUNT bytes of the null-terminated string from
* SRC to DST. If SRC does not cover all of COUNT, the balance is
* zeroed.
*
* Or, rather, if the kernel cared about that weird ANSI quirk. This
* version has cropped that bit o' nastiness as well as assuming that
* __stxncpy is in range of a branch.
*/
.set noat
.set noreorder
.text
.align 3
.globl strncpy
.ent strncpy
strncpy:
.frame $30, 0, $26
.prologue 0
mov $16, $0 # set return value now
beq $18, 0f
mov $26, $23 # set return address
br __stxncpy # do the work of the copy
0: ret
.end strncpy
/*
* arch/alpha/lib/strncpy_from_user.S
* Contributed by Richard Henderson (rth@tamu.edu)
*
* Just like strncpy except in the return value:
*
* -EFAULT if an exception occurs before the terminator is copied.
* N if the buffer filled.
*
* Otherwise the length of the string is returned.
*
* Additionally, the fourth argument should be `&current->tss.ex'.
*/
#include <asm/errno.h>
.set noat
.set noreorder
.text
.align 3
.globl __strncpy_from_user
.ent __strncpy_from_user
__strncpy_from_user:
.frame $30, 0, $26
.prologue 0
ldq $20, 0($19)
beq $18, 9f
br $28, 1f # set up exception return address
lda $0, -EFAULT
ret
1: addq $20, 1, $21
mov $16, $0 # save the string start
stq $21, 0($19) # increment exception count
bsr $23, __stxncpy # do the work of the copy
zapnot $1, $22, $5 # was last byte written null?
stq $20, 0($19) # decrement exception count
cmovne $5, 1, $5
and $22, 0xf0, $4 # binary search for the address of the
and $22, 0xcc, $3 # last byte written
and $22, 0xaa, $2
bic $16, 7, $1
cmovne $4, 4, $4
cmovne $3, 2, $3
cmovne $2, 1, $2
addq $1, $4, $1
addq $2, $3, $2
addq $1, $2, $1
addq $1, $5, $1 # add one if we filled the buffer
subq $1, $0, $0 # find string length
ret
9: clr $0
ret
.end __strncpy_from_user
/*
* arch/alpha/lib/strrchr.S
* Contributed by Richard Henderson (rth@tamu.edu)
*
* Return the address of the last occurrance of a given character
* within a null-terminated string, or null if it is not found.
*/
#include <alpha/regdef.h>
.set noreorder
.set noat
.align 3
.ent strrchr
.globl strrchr
strrchr:
.frame sp, 0, ra
.prologue 0
zapnot a1, 1, a1 # e0 : zero extend our test character
mov zero, t6 # .. e1 : t6 is last match aligned addr
sll a1, 8, t5 # e0 : replicate our test character
mov zero, t7 # .. e1 : t7 is last match byte compare mask
or t5, a1, a1 # e0 :
ldq_u t0, 0(a0) # .. e1 : load first quadword
sll a1, 16, t5 # e0 :
andnot a0, 7, v0 # .. e1 : align source addr
or t5, a1, a1 # e0 :
lda t4, -1 # .. e1 : build garbage mask
sll a1, 32, t5 # e0 :
cmpbge zero, t0, t1 # .. e1 : bits set iff byte == zero
mskqh t4, a0, t4 # e0 :
or t5, a1, a1 # .. e1 : character replication complete
xor t0, a1, t2 # e0 : make bytes == c zero
cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage
cmpbge zero, t2, t3 # e0 : bits set iff byte == c
andnot t1, t4, t1 # .. e1 : clear garbage from null test
andnot t3, t4, t3 # e0 : clear garbage from char test
bne t1, $eos # .. e1 : did we already hit the terminator?
/* Character search main loop */
$loop:
ldq t0, 8(v0) # e0 : load next quadword
cmovne t3, v0, t6 # .. e1 : save previous comparisons match
cmovne t3, t3, t7 # e0 :
addq v0, 8, v0 # .. e1 :
xor t0, a1, t2 # e0 :
cmpbge zero, t0, t1 # .. e1 : bits set iff byte == zero
cmpbge zero, t2, t3 # e0 : bits set iff byte == c
beq t1, $loop # .. e1 : if we havnt seen a null, loop
/* Mask out character matches after terminator */
$eos:
negq t1, t4 # e0 : isolate first null byte match
and t1, t4, t4 # e1 :
subq t4, 1, t5 # e0 : build a mask of the bytes upto...
or t4, t5, t4 # e1 : ... and including the null
and t3, t4, t3 # e0 : mask out char matches after null
cmovne t3, t3, t7 # .. e1 : save it, if match found
cmovne t3, v0, t6 # e0 :
/* Locate the address of the last matched character */
/* Retain the early exit for the ev4 -- the ev5 mispredict penalty
is 5 cycles -- the same as just falling through. */
beq t7, $retnull # .. e1 :
and t7, 0xf0, t2 # e0 : binary search for the high bit set
cmovne t2, t2, t7 # .. e1 (zdb)
cmovne t2, 4, t2 # e0 :
and t7, 0xcc, t1 # .. e1 :
cmovne t1, t1, t7 # e0 :
cmovne t1, 2, t1 # .. e1 :
and t7, 0xaa, t0 # e0 :
cmovne t0, 1, t0 # .. e1 (zdb)
addq t2, t1, t1 # e0 :
addq t6, t0, v0 # .. e1 : add our aligned base ptr to the mix
addq v0, t1, v0 # e0 :
ret # .. e1 :
$retnull:
mov zero, v0 # e0 :
ret # .. e1 :
.end strrchr
/* stxcpy.S
* Contributed by Richard Henderson (rth@tamu.edu)
*
* Copy a null-terminated string from SRC to DST.
*
* This is an internal routine used by strcpy, stpcpy, and strcat.
* As such, it uses special linkage conventions to make implementation
* of these public functions more efficient.
*
* On input:
* t9 = return address
* a0 = DST
* a1 = SRC
*
* On output:
* t8 = bitmask (with one bit set) indicating the last byte written
* a0 = unaligned address of the last *word* written
*
* Furthermore, v0, a3-a5, t11, and t12 are untouched.
*/
#include <alpha/regdef.h>
.set noat
.set noreorder
.text
/* There is a problem with either gdb (as of 4.16) or gas (as of 2.7) that
doesn't like putting the entry point for a procedure somewhere in the
middle of the procedure descriptor. Work around this by putting the
aligned copy in its own procedure descriptor */
.ent stxcpy_aligned
.align 3
stxcpy_aligned:
.frame sp, 0, t9
.prologue 0
/* On entry to this basic block:
t0 == the first destination word for masking back in
t1 == the first source word. */
/* Create the 1st output word and detect 0's in the 1st input word. */
lda t2, -1 # e1 : build a mask against false zero
mskqh t2, a1, t2 # e0 : detection in the src word
mskqh t1, a1, t3 # e0 :
ornot t1, t2, t2 # .. e1 :
mskql t0, a1, t0 # e0 : assemble the first output word
cmpbge zero, t2, t7 # .. e1 : bits set iff null found
or t0, t3, t1 # e0 :
bne t7, $a_eos # .. e1 :
/* On entry to this basic block:
t0 == the first destination word for masking back in
t1 == a source word not containing a null. */
$a_loop:
stq_u t1, 0(a0) # e0 :
addq a0, 8, a0 # .. e1 :
ldq_u t1, 0(a1) # e0 :
addq a1, 8, a1 # .. e1 :
cmpbge zero, t1, t7 # e0 (stall)
beq t7, $a_loop # .. e1 (zdb)
/* Take care of the final (partial) word store.
On entry to this basic block we have:
t1 == the source word containing the null
t7 == the cmpbge mask that found it. */
$a_eos:
negq t7, t6 # e0 : find low bit set
and t7, t6, t8 # e1 (stall)
/* For the sake of the cache, don't read a destination word
if we're not going to need it. */
and t8, 0x80, t6 # e0 :
bne t6, 1f # .. e1 (zdb)
/* We're doing a partial word store and so need to combine
our source and original destination words. */
ldq_u t0, 0(a0) # e0 :
subq t8, 1, t6 # .. e1 :
zapnot t1, t6, t1 # e0 : clear src bytes >= null
or t8, t6, t7 # .. e1 :
zap t0, t7, t0 # e0 : clear dst bytes <= null
or t0, t1, t1 # e1 :
1: stq_u t1, 0(a0) # e0 :
ret (t9) # .. e1 :
.end stxcpy_aligned
.align 3
.ent __stxcpy
.globl __stxcpy
__stxcpy:
.frame sp, 0, t9
.prologue 0
/* Are source and destination co-aligned? */
xor a0, a1, t0 # e0 :
unop # :
and t0, 7, t0 # e0 :
bne t0, $unaligned # .. e1 :
/* We are co-aligned; take care of a partial first word. */
ldq_u t1, 0(a1) # e0 : load first src word
and a0, 7, t0 # .. e1 : take care not to load a word ...
addq a1, 8, a1 # e0 :
beq t0, stxcpy_aligned # .. e1 : ... if we wont need it
ldq_u t0, 0(a0) # e0 :
br stxcpy_aligned # .. e1 :
/* The source and destination are not co-aligned. Align the destination
and cope. We have to be very careful about not reading too much and
causing a SEGV. */
.align 3
$u_head:
/* We know just enough now to be able to assemble the first
full source word. We can still find a zero at the end of it
that prevents us from outputting the whole thing.
On entry to this basic block:
t0 == the first dest word, for masking back in, if needed else 0
t1 == the low bits of the first source word
t6 == bytemask that is -1 in dest word bytes */
ldq_u t2, 8(a1) # e0 :
addq a1, 8, a1 # .. e1 :
extql t1, a1, t1 # e0 :
extqh t2, a1, t4 # e0 :
mskql t0, a0, t0 # e0 :
or t1, t4, t1 # .. e1 :
mskqh t1, a0, t1 # e0 :
or t0, t1, t1 # e1 :
or t1, t6, t6 # e0 :
cmpbge zero, t6, t7 # .. e1 :
lda t6, -1 # e0 : for masking just below
bne t7, $u_final # .. e1 :
mskql t6, a1, t6 # e0 : mask out the bits we have
or t6, t2, t2 # e1 : already extracted before
cmpbge zero, t2, t7 # e0 : testing eos
bne t7, $u_late_head_exit # .. e1 (zdb)
/* Finally, we've got all the stupid leading edge cases taken care
of and we can set up to enter the main loop. */
stq_u t1, 0(a0) # e0 : store first output word
addq a0, 8, a0 # .. e1 :
extql t2, a1, t0 # e0 : position ho-bits of lo word
ldq_u t2, 8(a1) # .. e1 : read next high-order source word
addq a1, 8, a1 # e0 :
cmpbge zero, t2, t7 # .. e1 :
nop # e0 :
bne t7, $u_eos # .. e1 :
/* Unaligned copy main loop. In order to avoid reading too much,
the loop is structured to detect zeros in aligned source words.
This has, unfortunately, effectively pulled half of a loop
iteration out into the head and half into the tail, but it does
prevent nastiness from accumulating in the very thing we want
to run as fast as possible.
On entry to this basic block:
t0 == the shifted high-order bits from the previous source word
t2 == the unshifted current source word
We further know that t2 does not contain a null terminator. */
.align 3
$u_loop:
extqh t2, a1, t1 # e0 : extract high bits for current word
addq a1, 8, a1 # .. e1 :
extql t2, a1, t3 # e0 : extract low bits for next time
addq a0, 8, a0 # .. e1 :
or t0, t1, t1 # e0 : current dst word now complete
ldq_u t2, 0(a1) # .. e1 : load high word for next time
stq_u t1, -8(a0) # e0 : save the current word
mov t3, t0 # .. e1 :
cmpbge zero, t2, t7 # e0 : test new word for eos
beq t7, $u_loop # .. e1 :
/* We've found a zero somewhere in the source word we just read.
If it resides in the lower half, we have one (probably partial)
word to write out, and if it resides in the upper half, we
have one full and one partial word left to write out.
On entry to this basic block:
t0 == the shifted high-order bits from the previous source word
t2 == the unshifted current source word. */
$u_eos:
extqh t2, a1, t1 # e0 :
or t0, t1, t1 # e1 : first (partial) source word complete
cmpbge zero, t1, t7 # e0 : is the null in this first bit?
bne t7, $u_final # .. e1 (zdb)
$u_late_head_exit:
stq_u t1, 0(a0) # e0 : the null was in the high-order bits
addq a0, 8, a0 # .. e1 :
extql t2, a1, t1 # e0 :
cmpbge zero, t1, t7 # .. e1 :
/* Take care of a final (probably partial) result word.
On entry to this basic block:
t1 == assembled source word
t7 == cmpbge mask that found the null. */
$u_final:
negq t7, t6 # e0 : isolate low bit set
and t6, t7, t8 # e1 :
and t8, 0x80, t6 # e0 : avoid dest word load if we can
bne t6, 1f # .. e1 (zdb)
ldq_u t0, 0(a0) # e0 :
subq t8, 1, t6 # .. e1 :
or t6, t8, t7 # e0 :
zapnot t1, t6, t1 # .. e1 : kill source bytes >= null
zap t0, t7, t0 # e0 : kill dest bytes <= null
or t0, t1, t1 # e1 :
1: stq_u t1, 0(a0) # e0 :
ret (t9) # .. e1 :
/* Unaligned copy entry point. */
.align 3
$unaligned:
ldq_u t1, 0(a1) # e0 : load first source word
and a0, 7, t4 # .. e1 : find dest misalignment
and a1, 7, t5 # e0 : find src misalignment
/* Conditionally load the first destination word and a bytemask
with 0xff indicating that the destination byte is sacrosanct. */
mov zero, t0 # .. e1 :
mov zero, t6 # e0 :
beq t4, 1f # .. e1 :
ldq_u t0, 0(a0) # e0 :
lda t6, -1 # .. e1 :
mskql t6, a0, t6 # e0 :
1:
subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr
/* If source misalignment is larger than dest misalignment, we need
extra startup checks to avoid SEGV. */
cmplt t4, t5, t8 # e0 :
beq t8, $u_head # .. e1 (zdb)
lda t2, -1 # e1 : mask out leading garbage in source
mskqh t2, t5, t2 # e0 :
nop # e0 :
ornot t1, t2, t3 # .. e1 :
cmpbge zero, t3, t7 # e0 : is there a zero?
beq t7, $u_head # .. e1 (zdb)
/* At this point we've found a zero in the first partial word of
the source. We need to isolate the valid source data and mask
it into the original destination data. (Incidentally, we know
that we'll need at least one byte of that original dest word.) */
ldq_u t0, 0(a0) # e0 :
negq t7, t6 # .. e1 : build bitmask of bytes <= zero
and t6, t7, t8 # e0 :
and a1, 7, t5 # .. e1 :
subq t8, 1, t6 # e0 :
or t6, t8, t7 # e1 :
srl t8, t5, t8 # e0 : adjust final null return value
zapnot t2, t7, t2 # .. e1 : prepare source word; mirror changes
and t1, t2, t1 # e1 : to source validity mask
extql t2, a1, t2 # .. e0 :
extql t1, a1, t1 # e0 :
andnot t0, t2, t0 # .. e1 : zero place for source to reside
or t0, t1, t1 # e1 : and put it there
stq_u t1, 0(a0) # .. e0 :
ret (t9) # e1 :
.end __stxcpy
This diff is collapsed.
......@@ -59,8 +59,12 @@ asmlinkage struct pt_regs * save_v86_state(struct vm86_regs * regs)
do_exit(SIGSEGV);
}
set_flags(regs->eflags, VEFLAGS, VIF_MASK | current->tss.v86mask);
copy_to_user(&current->tss.vm86_info->regs,regs,sizeof(*regs));
put_user(current->tss.screen_bitmap,&current->tss.vm86_info->screen_bitmap);
tmp = copy_to_user(&current->tss.vm86_info->regs,regs,sizeof(*regs));
tmp += put_user(current->tss.screen_bitmap,&current->tss.vm86_info->screen_bitmap);
if (tmp) {
printk("vm86: could not access userspace vm86_info\n");
do_exit(SIGSEGV);
}
tmp = current->tss.esp0;
current->tss.esp0 = current->saved_kernel_stack;
current->saved_kernel_stack = 0;
......@@ -104,22 +108,21 @@ asmlinkage int sys_vm86(struct vm86_struct * v86)
struct vm86_struct info;
struct task_struct *tsk = current;
struct pt_regs * pt_regs = (struct pt_regs *) &v86;
int error;
if (tsk->saved_kernel_stack)
return -EPERM;
/* v86 must be readable (now) and writable (for save_v86_state) */
error = verify_area(VERIFY_WRITE,v86,sizeof(*v86));
if (error)
return error;
copy_from_user(&info,v86,sizeof(info));
if (copy_from_user(&info,v86,sizeof(info)))
return -EFAULT;
/*
* make sure the vm86() system call doesn't try to do anything silly
*/
info.regs.__null_ds = 0;
info.regs.__null_es = 0;
info.regs.__null_fs = 0;
info.regs.__null_gs = 0;
/* we are clearing fs,gs later just before "jmp ret_from_sys_call",
* because starting with Linux 2.1.x they aren't no longer saved/restored
*/
/*
* The eflags register is also special: we cannot trust that the user
* has set it up safely, so this makes sure interrupt etc flags are
......@@ -156,10 +159,12 @@ asmlinkage int sys_vm86(struct vm86_struct * v86)
tsk->tss.screen_bitmap = info.screen_bitmap;
if (info.flags & VM86_SCREEN_BITMAP)
mark_screen_rdonly(tsk);
__asm__ __volatile__("movl %0,%%esp\n\t"
__asm__ __volatile__(
"xorl %%eax,%%eax; mov %%ax,%%fs; mov %%ax,%%gs\n\t"
"movl %0,%%esp\n\t"
"jmp ret_from_sys_call"
: /* no outputs */
:"r" (&info.regs), "b" (tsk));
:"r" (&info.regs), "b" (tsk) : "ax");
return 0;
}
......@@ -218,9 +223,10 @@ static inline unsigned long get_vflags(struct vm86_regs * regs)
static inline int is_revectored(int nr, struct revectored_struct * bitmap)
{
if (verify_area(VERIFY_READ, bitmap, 256/8) < 0)
unsigned long map;
if (get_user(map, bitmap->__map + (nr >> 5)))
return 1;
return test_bit(nr, bitmap);
return test_bit(nr & ((1 << 5)-1), &map);
}
/*
......@@ -298,26 +304,25 @@ __res; })
static void do_int(struct vm86_regs *regs, int i, unsigned char * ssp, unsigned long sp)
{
unsigned short *intr_ptr, seg;
unsigned long *intr_ptr, segoffs;
if (regs->cs == BIOSSEG)
goto cannot_handle;
if (is_revectored(i, &current->tss.vm86_info->int_revectored))
goto cannot_handle;
if (i==0x21 && is_revectored(AH(regs),&current->tss.vm86_info->int21_revectored))
goto cannot_handle;
intr_ptr = (unsigned short *) (i << 2);
if (verify_area(VERIFY_READ, intr_ptr, 4) < 0)
intr_ptr = (unsigned long *) (i << 2);
if (get_user(segoffs, intr_ptr))
goto cannot_handle;
get_user(seg, intr_ptr+1);
if (seg == BIOSSEG)
if ((segoffs >> 16) == BIOSSEG)
goto cannot_handle;
pushw(ssp, sp, get_vflags(regs));
pushw(ssp, sp, regs->cs);
pushw(ssp, sp, IP(regs));
regs->cs = seg;
regs->cs = segoffs >> 16;
SP(regs) -= 6;
get_user(IP(regs), intr_ptr+0);
IP(regs) = segoffs & 0xffff;
clear_TF(regs);
clear_IF(regs);
return;
......
......@@ -26,13 +26,6 @@
#include <asm/pgtable.h>
#include <asm/dma.h>
/*
* The SMP kernel can't handle the 4MB page table optimizations yet
*/
#ifdef __SMP__
#undef USE_PENTIUM_MM
#endif
extern void die_if_kernel(char *,struct pt_regs *,long);
extern void show_net_buffers(void);
......@@ -154,7 +147,12 @@ unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
/* unmap the original low memory mappings */
pgd_val(pg_dir[0]) = 0;
while (address < end_mem) {
#ifdef USE_PENTIUM_MM
/*
* The following code enabled 4MB page tables for the
* Intel Pentium cpu, unfortunately the SMP kernel can't
* handle the 4MB page table optimizations yet
*/
#ifndef __SMP__
/*
* This will create page tables that
* span up to the next 4MB virtual
......
......@@ -130,7 +130,7 @@ static int allowed_drive_mask = 0x33;
#include <linux/fd.h>
#include <linux/hdreg.h>
#define OLDFDRAWCMD 0x020d /* send a raw command to the FDC */
......@@ -918,6 +918,13 @@ static void empty(void)
static struct tq_struct floppy_tq =
{ 0, 0, 0, 0 };
static void schedule_bh( void (*handler)(void*) )
{
floppy_tq.routine = (void *)(void *) handler;
queue_task_irq(&floppy_tq, &tq_immediate);
mark_bh(IMMEDIATE_BH);
}
static struct timer_list fd_timer ={ NULL, NULL, 0, 0, 0 };
static void cancel_activity(void)
......@@ -1685,12 +1692,9 @@ void floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs)
} while ((ST0 & 0x83) != UNIT(current_drive) && inr == 2);
}
if (handler) {
if(intr_count >= 2) {
/* expected interrupt */
floppy_tq.routine = (void *)(void *) handler;
queue_task_irq(&floppy_tq, &tq_immediate);
mark_bh(IMMEDIATE_BH);
} else
if(intr_count >= 2)
schedule_bh( (void *)(void *) handler);
else
handler();
} else
FDCS->reset = 1;
......@@ -1928,9 +1932,7 @@ static int wait_til_done(void (*handler)(void), int interruptible)
int ret;
unsigned long flags;
floppy_tq.routine = (void *)(void *) handler;
queue_task(&floppy_tq, &tq_immediate);
mark_bh(IMMEDIATE_BH);
schedule_bh((void *)(void *)handler);
INT_OFF;
while(command_status < 2 && NO_SIGNAL){
is_alive("wait_til_done");
......@@ -2737,9 +2739,7 @@ static void redo_fd_request(void)
if (TESTF(FD_NEED_TWADDLE))
twaddle();
floppy_tq.routine = (void *)(void *) floppy_start;
queue_task(&floppy_tq, &tq_immediate);
mark_bh(IMMEDIATE_BH);
schedule_bh( (void *)(void *) floppy_start);
#ifdef DEBUGT
debugt("queue fd request");
#endif
......@@ -2754,18 +2754,19 @@ static struct cont_t rw_cont={
bad_flp_intr,
request_done };
static struct tq_struct request_tq =
{ 0, 0, (void *) (void *) redo_fd_request, 0 };
static void process_fd_request(void)
{
cont = &rw_cont;
queue_task(&request_tq, &tq_immediate);
mark_bh(IMMEDIATE_BH);
schedule_bh( (void *)(void *) redo_fd_request);
}
static void do_fd_request(void)
{
if(usage_count == 0) {
printk("warning: usage count=0, CURRENT=%p exiting\n", CURRENT);
printk("sect=%ld cmd=%d\n", CURRENT->sector, CURRENT->cmd);
return;
}
sti();
if (fdc_busy){
/* fdc busy, this new request will be treated when the
......@@ -2839,22 +2840,19 @@ static int user_reset_fdc(int drive, int arg, int interruptible)
* Misc Ioctl's and support
* ========================
*/
static int fd_copyout(void *param, const void *address, int size)
static inline int fd_copyout(void *param, const void *address, int size)
{
int ret;
ECALL(verify_area(VERIFY_WRITE,param,size));
copy_to_user(param,(void *) address, size);
return 0;
return copy_to_user(param,address, size) ? -EFAULT : 0;
}
static int fd_copyin(void *param, void *address, int size)
static inline int fd_copyin(void *param, void *address, int size)
{
int ret;
return copy_from_user(address, param, size) ? -EFAULT : 0;
}
ECALL(verify_area(VERIFY_READ,param,size));
copy_from_user((void *) address, param, size);
return 0;
static inline int write_user_long(unsigned long useraddr, unsigned long value)
{
return put_user(value, (unsigned long *)useraddr) ? -EFAULT : 0;
}
#define COPYOUT(x) ECALL(fd_copyout((void *)param, &(x), sizeof(x)))
......@@ -3259,6 +3257,21 @@ static inline int xlate_0x00xx_ioctl(int *cmd, int *size)
return -EINVAL;
}
static int get_floppy_geometry(int drive, int type, struct floppy_struct **g)
{
if (type)
*g = &floppy_type[type];
else {
LOCK_FDC(drive,0);
CALL(poll_drive(0,0));
process_fd_request();
*g = current_type[drive];
}
if(!*g)
return -ENODEV;
return 0;
}
static int fd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long param)
{
......@@ -3297,6 +3310,43 @@ static int fd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
cmd = FDEJECT;
}
/* generic block device ioctls */
switch(cmd) {
/* the following have been inspired by the corresponding
* code for other block devices. */
struct floppy_struct *g;
struct hd_geometry *loc;
case HDIO_GETGEO:
loc = (struct hd_geometry *) param;
ECALL(get_floppy_geometry(drive, type, &g));
ECALL(verify_area(VERIFY_WRITE, loc, sizeof(*loc)));
put_user(g->head, &loc->heads);
put_user(g->sect, &loc->sectors);
put_user(g->track, &loc->cylinders);
put_user(0,&loc->start);
return 0;
case BLKRASET:
if(!suser()) return -EACCES;
if(param > 0xff) return -EINVAL;
read_ahead[MAJOR(inode->i_rdev)] = param;
return 0;
case BLKRAGET:
return write_user_long(param,
read_ahead[MAJOR(inode->i_rdev)]);
case BLKFLSBUF:
if(!suser()) return -EACCES;
fsync_dev(inode->i_rdev);
invalidate_buffers(inode->i_rdev);
return 0;
case BLKGETSIZE:
ECALL(get_floppy_geometry(drive, type, &g));
return write_user_long(param, g->size);
/* BLKRRPART is not defined as floppies don't have
* partition tables */
}
/* convert the old style command into a new style command */
if ((cmd & 0xff00) == 0x0200) {
ECALL(normalize_0x02xx_ioctl(&cmd, &size));
......@@ -3345,15 +3395,9 @@ static int fd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
return set_geometry(cmd, & inparam.g,
drive, type, device);
case FDGETPRM:
LOCK_FDC(drive,1);
CALL(poll_drive(1,0));
process_fd_request();
if (type)
outparam = (char *) &floppy_type[type];
else
outparam = (char *) current_type[drive];
if(!outparam)
return -ENODEV;
ECALL(get_floppy_geometry(drive, type,
(struct floppy_struct**)
&outparam));
break;
case FDMSGON:
......@@ -4174,7 +4218,7 @@ static void mod_setup(char *pattern, void (*setup)(char *, int *))
j=1;
for (i=current->mm->env_start; i< current->mm->env_end; i ++){
c= get_fs_byte(i);
get_user(c, (char *)i);
if (match){
if (j==99)
c='\0';
......
......@@ -455,14 +455,11 @@ static int lo_ioctl(struct inode * inode, struct file * file,
case BLKGETSIZE: /* Return device size */
if (!lo->lo_inode)
return -ENXIO;
if (!arg) return -EINVAL;
err = verify_area(VERIFY_WRITE, (long *) arg, sizeof(long));
if (err)
return err;
put_fs_long(loop_sizes[lo->lo_number] << 1, (long *) arg);
return 0;
default:
if (!arg)
return -EINVAL;
return put_user(loop_sizes[lo->lo_number] << 1, (int *) arg);
default:
return -EINVAL;
}
return 0;
}
......
......@@ -106,6 +106,10 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*
* Credits:
* Heiko Eissfeldt <heiko@colossus.escape.de>
* For finding abug in the return of the track numbers.
*/
/*
......@@ -2482,8 +2486,8 @@ static int scd_ioctl(struct inode *inode,
i=verify_area(VERIFY_WRITE, hdr, sizeof(*hdr));
if(i<0)
return i;
loc_hdr.cdth_trk0 = bcd_to_int(sony_toc.first_track_num);
loc_hdr.cdth_trk1 = bcd_to_int(sony_toc.last_track_num);
loc_hdr.cdth_trk0 = sony_toc.first_track_num;
loc_hdr.cdth_trk1 = sony_toc.last_track_num;
copy_to_user(hdr, &loc_hdr, sizeof(*hdr));
}
return 0;
......@@ -2583,7 +2587,7 @@ static int scd_ioctl(struct inode *inode,
* If we want to stop after the last track, use the lead-out
* MSF to do that.
*/
if (ti.cdti_trk1 >= bcd_to_int(sony_toc.last_track_num))
if (ti.cdti_trk1 >= sony_toc.last_track_num)
{
log_to_msf(msf_to_log(sony_toc.lead_out_start_msf)-1,
&(params[4]));
......
......@@ -670,7 +670,7 @@ int _ftape_ioctl(unsigned int command, void *arg)
TRACE_EXIT;
return error;
}
memcpy_fromfs(&krnl_arg.mtop, arg, arg_size);
copy_from_user(&krnl_arg.mtop, arg, arg_size);
}
TRACEx1(5, "called with ioctl command: 0x%08x", command);
switch (command) {
......@@ -830,7 +830,7 @@ int _ftape_ioctl(unsigned int command, void *arg)
TRACE_EXIT;
return error;
}
memcpy_tofs(arg, &krnl_arg, arg_size);
copy_to_user(arg, &krnl_arg, arg_size);
}
TRACE_EXIT;
return result;
......
......@@ -656,7 +656,7 @@ int _ftape_read(char *buff, int req_len)
TRACE_EXIT;
return -EIO;
}
memcpy_tofs(buff, deblock_buffer + buf_pos_rd, cnt);
copy_to_user(buff, deblock_buffer + buf_pos_rd, cnt);
buff += cnt;
to_do -= cnt; /* what's left from req_len */
remaining -= cnt; /* what remains on this tape */
......
......@@ -639,7 +639,7 @@ int _ftape_write(const char *buff, int req_len)
TRACE_EXIT;
return result;
}
memcpy_fromfs(deblock_buffer + buf_pos_wr, buff, cnt);
copy_from_user(deblock_buffer + buf_pos_wr, buff, cnt);
buff += cnt;
req_len -= cnt;
buf_pos_wr += cnt;
......
......@@ -2039,7 +2039,7 @@ static long qic02_tape_read(struct inode * inode, struct file * filp,
}
/* copy buffer to user-space in one go */
if (bytes_done>0)
copy_to_user( (void *) buf, (void *) buffaddr, bytes_done);
copy_to_user( (void *) buf, (void *) bus_to_virt(buffaddr), bytes_done);
#if 1
/* Checks Ton's patch below */
if ((return_read_eof == NO) && (status_eof_detected == YES)) {
......@@ -2167,7 +2167,7 @@ static long qic02_tape_write(struct inode * inode, struct file * filp,
/* copy from user to DMA buffer and initiate transfer. */
if (bytes_todo>0) {
copy_from_user( (void *) buffaddr, (const void *) buf, bytes_todo);
copy_from_user( (void *) bus_to_virt(buffaddr), (const void *) buf, bytes_todo);
/****************** similar problem with read() at FM could happen here at EOT.
******************/
......@@ -2590,7 +2590,7 @@ static int qic02_tape_ioctl(struct inode * inode, struct file * filp,
return -EPERM;
error = verify_area(VERIFY_READ, (int *) ioarg, sizeof(int));
if (error) return error;
c = get_user((int *) ioarg);
c = get_user(sizeof(int), (int *) ioarg);
if (c==0) {
QIC02_TAPE_DEBUG = 0;
return 0;
......@@ -2648,8 +2648,7 @@ static int qic02_tape_ioctl(struct inode * inode, struct file * filp,
/* copy struct from user space to kernel space */
stp = (char *) &qic02_tape_dynconf;
argp = (char *) ioarg;
for (i=0; i<sizeof(qic02_tape_dynconf); i++)
*stp++ = get_user(argp++);
copy_from_user(stp, argp, sizeof(qic02_tape_dynconf));
if (status_zombie==NO)
qic02_release_resources(); /* and go zombie */
if (update_ifc_masks(qic02_tape_dynconf.ifc_type))
......@@ -2680,8 +2679,7 @@ static int qic02_tape_ioctl(struct inode * inode, struct file * filp,
/* copy mtop struct from user space to kernel space */
stp = (char *) &operation;
argp = (char *) ioarg;
for (i=0; i<sizeof(operation); i++)
*stp++ = get_user(argp++);
copy_from_user(stp, argp, sizeof(operation));
/* ---note: mt_count is signed, negative seeks must be
* --- translated to seeks in opposite direction!
......@@ -2920,7 +2918,7 @@ int qic02_tape_init(void)
* This assumes a one-to-one identity mapping between
* kernel addresses and physical memory.
*/
buffaddr = align_buffer((unsigned long) &qic02_tape_buf, TAPE_BLKSIZE);
buffaddr = align_buffer(virt_to_bus(qic02_tape_buf), TAPE_BLKSIZE);
printk(", at address 0x%lx (0x%lx)\n", buffaddr, (unsigned long) &qic02_tape_buf);
#ifndef CONFIG_MAX_16M
......
......@@ -21,6 +21,7 @@
#include <linux/errno.h>
#include <linux/tty.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/segment.h>
#include "vt_kern.h"
#include "selection.h"
......
......@@ -2250,7 +2250,7 @@ ppp_tty_ioctl (struct tty_struct *tty, struct file * file,
error = verify_area (VERIFY_READ, (void *) param3,
sizeof (temp_i));
if (error == 0) {
temp_i = get_user ((int *) param3);
get_user (temp_i, (int *) param3);
if (ppp->flags & SC_DEBUG)
printk (KERN_INFO
"ppp_tty_ioctl: set mru to %x\n", temp_i);
......@@ -2285,7 +2285,8 @@ ppp_tty_ioctl (struct tty_struct *tty, struct file * file,
error = verify_area (VERIFY_READ, (void *) param3,
sizeof (temp_i));
if (error == 0) {
temp_i = get_user ((int *) param3) & SC_MASK;
get_user (temp_i, (int *) param3);
temp_i &= SC_MASK;
temp_i |= (ppp->flags & ~SC_MASK);
if ((ppp->flags & SC_CCP_OPEN) &&
......@@ -2328,7 +2329,7 @@ ppp_tty_ioctl (struct tty_struct *tty, struct file * file,
error = verify_area (VERIFY_READ, (void *) param3,
sizeof (temp_i));
if (error == 0) {
ppp->xmit_async_map[0] = get_user ((int *) param3);
get_user (ppp->xmit_async_map[0],(int *) param3);
if (ppp->flags & SC_DEBUG)
printk (KERN_INFO
"ppp_tty_ioctl: set xmit asyncmap %x\n",
......@@ -2342,7 +2343,7 @@ ppp_tty_ioctl (struct tty_struct *tty, struct file * file,
error = verify_area (VERIFY_READ, (void *) param3,
sizeof (temp_i));
if (error == 0) {
ppp->recv_async_map = get_user ((int *) param3);
get_user (ppp->recv_async_map,(int *) param3);
if (ppp->flags & SC_DEBUG)
printk (KERN_INFO
"ppp_tty_ioctl: set rcv asyncmap %x\n",
......@@ -2370,7 +2371,8 @@ ppp_tty_ioctl (struct tty_struct *tty, struct file * file,
error = verify_area (VERIFY_READ, (void *) param3,
sizeof (temp_i));
if (error == 0) {
temp_i = (get_user ((int *) param3) & 0x1F) << 16;
get_user (temp_i, (int *) param3);
temp_i = (temp_i & 0x1F) << 16;
temp_i |= (ppp->flags & ~0x1F0000);
if ((ppp->flags | temp_i) & SC_DEBUG)
......@@ -2469,7 +2471,8 @@ ppp_tty_ioctl (struct tty_struct *tty, struct file * file,
error = verify_area (VERIFY_READ, (void *) param3,
sizeof (temp_i));
if (error == 0) {
temp_i = get_user ((int *) param3) + 1;
get_user (temp_i, (int *) param3);
++temp_i;
if (ppp->flags & SC_DEBUG)
printk (KERN_INFO
"ppp_tty_ioctl: set maxcid to %d\n",
......
......@@ -1069,7 +1069,7 @@ slip_ioctl(struct tty_struct *tty, void *file, int cmd, void *arg)
if (err) {
return err;
}
tmp = get_user((int *)arg);
get_user(tmp,(int *)arg);
#ifndef SL_INCLUDE_CSLIP
if (tmp & (SL_MODE_CSLIP|SL_MODE_ADAPTIVE)) {
return -EINVAL;
......@@ -1124,7 +1124,7 @@ slip_ioctl(struct tty_struct *tty, void *file, int cmd, void *arg)
if (err) {
return -err;
}
tmp = get_user((int *)arg);
get_user(tmp,(int *)arg);
if (tmp > 255) /* max for unchar */
return -EINVAL;
if ((sl->keepalive = (unchar) tmp) != 0) {
......@@ -1149,9 +1149,9 @@ slip_ioctl(struct tty_struct *tty, void *file, int cmd, void *arg)
if (err) {
return -err;
}
tmp = get_user((int *)arg);
get_user(tmp,(int *)arg);
if (tmp > 255) /* max for unchar */
return -EINVAL;
return -EINVAL;
if ((sl->outfill = (unchar) tmp) != 0){
sl->outfill_timer.expires=jiffies+sl->outfill*HZ;
add_timer(&sl->outfill_timer);
......
......@@ -84,22 +84,11 @@ static void set_brk(unsigned long start, unsigned long end)
static void padzero(unsigned long elf_bss)
{
unsigned long nbyte;
char * fpnt;
nbyte = elf_bss & (PAGE_SIZE-1);
if (nbyte) {
nbyte = PAGE_SIZE - nbyte;
/* FIXME: someone should investigate, why a bad binary
is allowed to bring a wrong elf_bss until here,
and how to react. Suffice the plain return?
rossius@hrz.tu-chemnitz.de */
if (verify_area(VERIFY_WRITE, (void *) elf_bss, nbyte)) {
return;
}
fpnt = (char *) elf_bss;
do {
put_user(0, fpnt++);
} while (--nbyte);
clear_user((void *) elf_bss, nbyte);
}
}
......
......@@ -210,7 +210,31 @@ static int sync_buffers(kdev_t dev, int wait)
next->b_count--;
retry = 1;
}
repeat2:
bh = lru_list[BUF_LOCKED];
if (!bh)
break;
for (i = nr_buffers_type[BUF_LOCKED]*2 ; i-- > 0 ; bh = next) {
if (bh->b_list != BUF_LOCKED)
goto repeat2;
next = bh->b_next_free;
if (!lru_list[BUF_LOCKED])
break;
if (dev && bh->b_dev != dev)
continue;
if (buffer_locked(bh)) {
/* Buffer is locked; skip it unless wait is
requested AND pass > 0. */
if (!wait || !pass) {
retry = 1;
continue;
}
wait_on_buffer (bh);
goto repeat2;
}
}
/* If we are waiting for the sync to succeed, and if any dirty
blocks were written, then repeat; on the second pass, only
wait for buffers being written (do not pass to write any
......
......@@ -21,6 +21,7 @@
#include <linux/fs.h>
#include <linux/ext2_fs.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/stat.h>
static int ext2_readlink (struct inode *, char *, int);
......
......@@ -20,30 +20,6 @@
#define ACC_MODE(x) ("\000\004\002\006"[(x)&O_ACCMODE])
/*
* How long a filename can we get from user space?
* -EFAULT if invalid area
* 0 if ok (ENAMETOOLONG before EFAULT)
* >0 EFAULT after xx bytes
*/
static inline int get_max_filename(unsigned long address)
{
struct vm_area_struct * vma;
if (get_fs() == KERNEL_DS)
return 0;
vma = find_vma(current->mm, address);
if (!vma || vma->vm_start > address || !(vma->vm_flags & VM_READ))
return -EFAULT;
address = vma->vm_end - address;
if (address > PAGE_SIZE)
return 0;
if (vma->vm_next && vma->vm_next->vm_start == vma->vm_end &&
(vma->vm_next->vm_flags & VM_READ))
return 0;
return address;
}
/*
* In order to reduce some races, while at the same time doing additional
* checking and hopefully speeding things up, we copy filenames to the
......@@ -51,36 +27,41 @@ static inline int get_max_filename(unsigned long address)
*
* POSIX.1 2.4: an empty pathname is invalid (ENOENT).
*/
static inline int do_getname(const char *filename, char *page)
{
int retval;
unsigned long len = PAGE_SIZE;
if ((unsigned long) filename >= TASK_SIZE) {
if (get_fs() != KERNEL_DS)
return -EFAULT;
} else if (TASK_SIZE - (unsigned long) filename < PAGE_SIZE)
len = TASK_SIZE - (unsigned long) filename;
retval = strncpy_from_user((char *)page, filename, len);
if (retval > 0) {
if (retval < len)
return 0;
return -ENAMETOOLONG;
} else if (!retval)
retval = -ENOENT;
return retval;
}
int getname(const char * filename, char **result)
{
int i, error;
unsigned long page;
char * tmp, c;
i = get_max_filename((unsigned long) filename);
if (i < 0)
return i;
error = -EFAULT;
if (!i) {
error = -ENAMETOOLONG;
i = PAGE_SIZE;
}
get_user(c, filename++);
if (!c)
return -ENOENT;
if(!(page = __get_free_page(GFP_KERNEL)))
return -ENOMEM;
*result = tmp = (char *) page;
while (--i) {
*(tmp++) = c;
get_user(c, filename++);
if (!c) {
*tmp = '\0';
return 0;
}
int retval;
page = __get_free_page(GFP_KERNEL);
retval = -ENOMEM;
if (page) {
*result = (char *)page;
retval = do_getname(filename, (char *) page);
if (retval < 0)
free_page(page);
}
free_page(page);
return error;
return retval;
}
void putname(char * name)
......
This diff is collapsed.
......@@ -27,6 +27,12 @@ extern void * __memcpy(void *, const void *, size_t);
__constant_c_memset((s),(0x0101010101010101UL*(unsigned char)c),(count)) : \
__memset((s),(c),(count)))
#define __HAVE_ARCH_STRCPY
#define __HAVE_ARCH_STRNCPY
#define __HAVE_ARCH_STRCAT
#define __HAVE_ARCH_STRNCAT
#define __HAVE_ARCH_STRCHR
#define __HAVE_ARCH_STRRCHR
#define __HAVE_ARCH_STRLEN
#endif /* __KERNEL__ */
......
......@@ -9,7 +9,7 @@
typedef unsigned long elf_greg_t;
#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_i387_struct elf_fpregset_t;
......@@ -38,4 +38,29 @@ typedef struct user_i387_struct elf_fpregset_t;
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* Wow, the "main" arch needs arch dependent functions too.. :) */
/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
now struct_user_regs, they are different) */
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
pr_reg[0] = regs->ebx; \
pr_reg[1] = regs->ecx; \
pr_reg[2] = regs->edx; \
pr_reg[3] = regs->esi; \
pr_reg[4] = regs->edi; \
pr_reg[5] = regs->ebp; \
pr_reg[6] = regs->eax; \
pr_reg[7] = regs->xds; \
pr_reg[8] = regs->xes; \
/* fake once used fs and gs selectors? */ \
pr_reg[9] = regs->xds; /* was fs and __fs */ \
pr_reg[10] = regs->xds; /* was gs and __gs */ \
pr_reg[11] = regs->orig_eax; \
pr_reg[12] = regs->eip; \
pr_reg[13] = regs->xcs; \
pr_reg[14] = regs->eflags; \
pr_reg[15] = regs->esp; \
pr_reg[16] = regs->xss;
#endif
......@@ -3,12 +3,6 @@
#include <linux/config.h>
/*
* Define USE_PENTIUM_MM if you want the 4MB page table optimizations.
* This works only on an intel Pentium.
*/
#define USE_PENTIUM_MM 1
/*
* The Linux memory management assumes a three-level page table setup. On
* the i386, we use that, but "fold" the mid level into the top-level page
......
......@@ -36,7 +36,7 @@ extern int __verify_write(const void *, unsigned long);
#else
#define __access_ok(type,addr,size) \
(__kernel_ok || (__user_ok(addr,size) && \
((type) == VERIFY_READ || __verify_write((void *)(addr),(size)))))
((type) == VERIFY_READ || wp_works_ok || __verify_write((void *)(addr),(size)))))
#endif /* CPU */
#define access_ok(type,addr,size) \
......@@ -140,7 +140,7 @@ __asm__ __volatile__( \
"decl %2\n" \
"3:\tlea 0(%3,%1,4),%0" \
:"=d" (size) \
:"c" (size >> 2), "m" (current->tss.ex), "r" (size & 3), \
:"c" (size >> 2), "m" (current->tss.ex), "q" (size & 3), \
"D" (to), "S" (from), "0" (size) \
:"cx","di","si","memory");
......@@ -158,6 +158,66 @@ if (__cu_size && __access_ok(VERIFY_READ, __cu_from, __cu_size)) \
__copy_user(to,__cu_from,__cu_size); \
__cu_size; })
#define __clear_user(addr,size) \
__asm__ __volatile__( \
"movl $3f,%0\n\t" \
"incl %2\n\t" \
"rep; stosl\n\t" \
"testl $2,%3\n\t" \
"je 1f\n\t" \
"stosw\n\t" \
"subl $2,%3\n" \
"1:\t" \
"testl $1,%3\n\t" \
"je 2f\n\t" \
"stosb\n\t" \
"decl %3\n" \
"2:\t" \
"decl %2\n" \
"3:\tlea 0(%3,%1,4),%0" \
:"=d" (size) \
:"c" (size >> 2), "m" (current->tss.ex), "r" (size & 3), \
"D" (addr), "0" (size), "a" (0) \
:"cx","di","memory");
#define clear_user(addr,n) ({ \
void * __cl_addr = (addr); \
unsigned long __cl_size = (n); \
if (__cl_size && __access_ok(VERIFY_WRITE, ((unsigned long)(__cl_addr)), __cl_size)) \
__clear_user(__cl_addr, __cl_size); \
__cl_size; })
#define __strncpy_from_user(dst,src,count,res) \
__asm__ __volatile__( \
"cld\n\t" \
"movl $3f,%0\n\t" \
"incl %2\n" \
"1:\tdecl %1\n\t" \
"js 2f\n\t" \
"lodsb\n\t" \
"stosb\n\t" \
"testb %%al,%%al\n\t" \
"jne 1b\n" \
"2:\t" \
"incl %1\n\t" \
"xorl %0,%0\n\t" \
"decl %2\n" \
"3:" \
:"=d" (res), "=r" (count) \
:"m" (current->tss.ex), "1" (count), "S" (src),"D" (dst),"0" (res) \
:"si","di","ax","cx","memory")
#define strncpy_from_user(dest,src,count) ({ \
const void * __sc_src = (src); \
unsigned long __sc_count = (count); \
long __sc_res = -EFAULT; \
if (__access_ok(VERIFY_READ, ((unsigned long)(__sc_src)), __sc_count)) { \
unsigned long __sc_residue = __sc_count; \
__strncpy_from_user(dest,__sc_src,__sc_count,__sc_res); \
if (!__sc_res) __sc_res = __sc_residue - __sc_count; \
} __sc_res; })
#endif /* __ASSEMBLY__ */
#endif /* _ASM_SEGMENT_H */
......@@ -64,8 +64,6 @@ struct vm86_regs {
long eax;
long __null_ds;
long __null_es;
long __null_fs;
long __null_gs;
long orig_eax;
long eip;
unsigned short cs, __csh;
......
......@@ -573,8 +573,10 @@ long generic_file_read(struct inode * inode, struct file * filp,
unsigned long pos, ppos, page_cache;
int reada_ok;
if (!access_ok(VERIFY_WRITE, buf,count))
if (!access_ok(VERIFY_WRITE, buf, count))
return -EFAULT;
if (!count)
return 0;
error = 0;
read = 0;
page_cache = 0;
......
......@@ -673,6 +673,7 @@ static int inet_create(struct socket *sock, int protocol)
sk->rcvbuf = SK_RMEM_MAX;
sk->rto = TCP_TIMEOUT_INIT; /*TCP_WRITE_TIME*/
sk->cong_window = 1; /* start with only sending one packet at a time. */
sk->ssthresh = 0x7fffffff;
sk->priority = 1;
sk->state = TCP_CLOSE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment