Commit ebaef79a authored by Len Brown's avatar Len Brown

Merge intel.com:/home/lenb/bk/linux-2.6.2

into intel.com:/home/lenb/src/linux-acpi-test-2.6.2
parents c6f1cfe6 c8609416
......@@ -834,11 +834,13 @@ void (*wait_timer_tick)(void) = wait_8254_wraparound;
void __setup_APIC_LVTT(unsigned int clocks)
{
unsigned int lvtt1_value, tmp_value;
unsigned int lvtt_value, tmp_value, ver;
lvtt1_value = SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV) |
APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR;
apic_write_around(APIC_LVTT, lvtt1_value);
ver = GET_APIC_VERSION(apic_read(APIC_LVR));
lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR;
if (!APIC_INTEGRATED(ver))
lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
apic_write_around(APIC_LVTT, lvtt_value);
/*
* Divide PICLK by 16
......
......@@ -286,6 +286,8 @@ source "drivers/input/Kconfig"
source "drivers/char/Kconfig"
source "drivers/i2c/Kconfig"
source "drivers/media/Kconfig"
source "fs/Kconfig"
......
......@@ -2011,6 +2011,8 @@ static int do_execve32(char * filename, u32 * argv, u32 * envp, struct pt_regs *
int retval;
int i;
sched_balance_exec();
file = open_exec(filename);
retval = PTR_ERR(file);
......
......@@ -1950,6 +1950,8 @@ do_execve32(char * filename, u32 * argv, u32 * envp, struct pt_regs * regs)
int retval;
int i;
sched_balance_exec();
file = open_exec(filename);
retval = PTR_ERR(file);
......
......@@ -359,8 +359,8 @@ static struct timer_list housekeeping;
static unsigned short debug = 0;
static unsigned short vpi_bits = 0;
static unsigned short max_tx_size = 9000;
static unsigned short max_rx_size = 9000;
static int max_tx_size = 9000;
static int max_rx_size = 9000;
static unsigned char pci_lat = 0;
/********** access functions **********/
......@@ -2898,11 +2898,11 @@ static void __init hrz_check_args (void) {
PRINTK (KERN_ERR, "vpi_bits has been limited to %hu",
vpi_bits = HRZ_MAX_VPI);
if (max_tx_size > TX_AAL5_LIMIT)
if (max_tx_size < 0 || max_tx_size > TX_AAL5_LIMIT)
PRINTK (KERN_NOTICE, "max_tx_size has been limited to %hu",
max_tx_size = TX_AAL5_LIMIT);
if (max_rx_size > RX_AAL5_LIMIT)
if (max_rx_size < 0 || max_rx_size > RX_AAL5_LIMIT)
PRINTK (KERN_NOTICE, "max_rx_size has been limited to %hu",
max_rx_size = RX_AAL5_LIMIT);
......@@ -2914,8 +2914,8 @@ MODULE_DESCRIPTION(description_string);
MODULE_LICENSE("GPL");
MODULE_PARM(debug, "h");
MODULE_PARM(vpi_bits, "h");
MODULE_PARM(max_tx_size, "h");
MODULE_PARM(max_rx_size, "h");
MODULE_PARM(max_tx_size, "i");
MODULE_PARM(max_rx_size, "i");
MODULE_PARM(pci_lat, "b");
MODULE_PARM_DESC(debug, "debug bitmap, see .h file");
MODULE_PARM_DESC(vpi_bits, "number of bits (0..4) to allocate to VPIs");
......
......@@ -124,12 +124,7 @@ config MD_RAID6
RAID-5, RAID-6 distributes the syndromes across the drives
in one of the available parity distribution methods.
RAID-6 currently requires a specially patched version of
mdadm; the patch is available at:
ftp://ftp.kernel.org/pub/linux/kernel/people/hpa/
... and the mdadm source code at ...
RAID-6 requires mdadm-1.5.0 or later, available at:
ftp://ftp.kernel.org/pub/linux/utils/raid/mdadm/
......@@ -137,7 +132,6 @@ config MD_RAID6
this code as a module, choose M here: the module will be
called raid6.
If unsure, say N.
config MD_MULTIPATH
......
......@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/stddef.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
......@@ -45,11 +46,15 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
#else /* ! __KERNEL__ */
/* Used for testing in user space */
#include <stddef.h>
#include <sys/types.h>
#include <inttypes.h>
#include <errno.h>
#include <inttypes.h>
#include <limits.h>
#include <stddef.h>
#include <sys/mman.h>
#include <sys/types.h>
/* Not standard, but glibc defines it */
#define BITS_PER_LONG __WORDSIZE
typedef uint8_t u8;
typedef uint16_t u16;
......@@ -63,27 +68,13 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
#define __init
#define __exit
#define __attribute_const__ __attribute__((const))
#define preempt_enable()
#define preempt_disable()
#endif /* __KERNEL__ */
/* Change this from BITS_PER_LONG if there is something better... */
#if BITS_PER_LONG == 64
# define NBYTES(x) ((x) * 0x0101010101010101UL)
# define NSIZE 8
# define NSHIFT 3
# define NSTRING "64"
typedef u64 unative_t;
#else
# define NBYTES(x) ((x) * 0x01010101U)
# define NSIZE 4
# define NSHIFT 2
# define NSTRING "32"
typedef u32 unative_t;
#endif
/* Routine choices */
struct raid6_calls {
void (*gen_syndrome)(int, size_t, void **);
......
......@@ -46,7 +46,7 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_intx16,
&raid6_intx32,
#endif
#if defined(__i386__) || defined(__x86_64__)
#if defined(__i386__)
&raid6_mmxx1,
&raid6_mmxx2,
&raid6_sse1x1,
......@@ -55,6 +55,8 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_sse2x2,
#endif
#if defined(__x86_64__)
&raid6_sse2x1,
&raid6_sse2x2,
&raid6_sse2x4,
#endif
NULL
......
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright 2002 H. Peter Anvin - All Rights Reserved
* Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -20,12 +20,64 @@
#include "raid6.h"
/*
* This is the C data type to use
*/
/* Change this from BITS_PER_LONG if there is something better... */
#if BITS_PER_LONG == 64
# define NBYTES(x) ((x) * 0x0101010101010101UL)
# define NSIZE 8
# define NSHIFT 3
# define NSTRING "64"
typedef u64 unative_t;
#else
# define NBYTES(x) ((x) * 0x01010101U)
# define NSIZE 4
# define NSHIFT 2
# define NSTRING "32"
typedef u32 unative_t;
#endif
/*
* IA-64 wants insane amounts of unrolling. On other architectures that
* is just a waste of space.
*/
#if ($# <= 8) || defined(__ia64__)
/*
* These sub-operations are separate inlines since they can sometimes be
* specially optimized using architecture-specific hacks.
*/
/*
* The SHLBYTE() operation shifts each byte left by 1, *not*
* rolling over into the next byte
*/
static inline __attribute_const__ unative_t SHLBYTE(unative_t v)
{
unative_t vv;
vv = (v << 1) & NBYTES(0xfe);
return vv;
}
/*
* The MASK() operation returns 0xFF in any byte for which the high
* bit is 1, 0x00 for any byte for which the high bit is 0.
*/
static inline __attribute_const__ unative_t MASK(unative_t v)
{
unative_t vv;
vv = v & NBYTES(0x80);
vv = (vv << 1) - (vv >> 7); /* Overflow on the top bit is OK */
return vv;
}
#if ($# <= 8) || defined(_ia64__)
static void raid6_int$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
......@@ -44,9 +96,8 @@ static void raid6_int$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
for ( z = z0-1 ; z >= 0 ; z-- ) {
wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
wp$$ ^= wd$$;
w2$$ = wq$$ & NBYTES(0x80);
w1$$ = (wq$$ << 1) & NBYTES(0xfe);
w2$$ = (w2$$ << 1) - (w2$$ >> 7);
w2$$ = MASK(wq$$);
w1$$ = SHLBYTE(wq$$);
w2$$ &= NBYTES(0x1d);
w1$$ ^= w2$$;
wq$$ = w1$$ ^ wd$$;
......
......@@ -16,7 +16,7 @@
* MMX implementation of RAID-6 syndrome functions
*/
#if defined(__i386__) || defined(__x86_64__)
#if defined(__i386__)
#include "raid6.h"
#include "raid6x86.h"
......
......@@ -117,7 +117,7 @@ void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, void **ptrs
} else {
/* data+Q failure. Reconstruct data from P,
then rebuild syndrome. */
/* FIX */
/* NOT IMPLEMENTED - equivalent to RAID-5 */
}
} else {
if ( failb == disks-2 ) {
......
......@@ -21,7 +21,7 @@
* worthwhile as a separate implementation.
*/
#if defined(__i386__) || defined(__x86_64__)
#if defined(__i386__)
#include "raid6.h"
#include "raid6x86.h"
......
......@@ -3,10 +3,11 @@
# from userspace.
#
CC = gcc
CFLAGS = -I.. -O2 -g -march=i686
LD = ld
PERL = perl
CC = gcc
OPTFLAGS = -O2 # Adjust as desired
CFLAGS = -I.. -g $(OPTFLAGS)
LD = ld
PERL = perl
.c.o:
$(CC) $(CFLAGS) -c -o $@ $<
......@@ -17,12 +18,10 @@ PERL = perl
%.uc: ../%.uc
cp -f $< $@
%.pl: ../%.pl
cp -f $< $@
all: raid6.o raid6test
raid6.o: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \
raid6int32.o \
raid6mmx.o raid6sse1.o raid6sse2.o \
raid6recov.o raid6algos.o \
raid6tables.o
......@@ -31,20 +30,23 @@ raid6.o: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \
raid6test: raid6.o test.c
$(CC) $(CFLAGS) -o raid6test $^
raid6int1.c: raid6int.uc unroller.pl
$(PERL) ./unroller.pl 1 < raid6int.uc > $@
raid6int1.c: raid6int.uc ../unroll.pl
$(PERL) ../unroll.pl 1 < raid6int.uc > $@
raid6int2.c: raid6int.uc ../unroll.pl
$(PERL) ../unroll.pl 2 < raid6int.uc > $@
raid6int2.c: raid6int.uc unroller.pl
$(PERL) ./unroller.pl 2 < raid6int.uc > $@
raid6int4.c: raid6int.uc ../unroll.pl
$(PERL) ../unroll.pl 4 < raid6int.uc > $@
raid6int4.c: raid6int.uc unroller.pl
$(PERL) ./unroller.pl 4 < raid6int.uc > $@
raid6int8.c: raid6int.uc ../unroll.pl
$(PERL) ../unroll.pl 8 < raid6int.uc > $@
raid6int8.c: raid6int.uc unroller.pl
$(PERL) ./unroller.pl 8 < raid6int.uc > $@
raid6int16.c: raid6int.uc ../unroll.pl
$(PERL) ../unroll.pl 16 < raid6int.uc > $@
raid6int16.c: raid6int.uc unroller.pl
$(PERL) ./unroller.pl 16 < raid6int.uc > $@
raid6int32.c: raid6int.uc ../unroll.pl
$(PERL) ../unroll.pl 32 < raid6int.uc > $@
raid6tables.c: mktables
./mktables > raid6tables.c
......
......@@ -73,14 +73,19 @@ int main(int argc, char *argv[])
erra = memcmp(data[i], recovi, PAGE_SIZE);
errb = memcmp(data[j], recovj, PAGE_SIZE);
printf("algo=%-8s faila=%3d(%c) failb=%3d(%c) %s\n",
raid6_call.name,
i, (i==NDISKS-2)?'P':'D',
j, (j==NDISKS-1)?'Q':(j==NDISKS-2)?'P':'D',
(!erra && !errb) ? "OK" :
!erra ? "ERRB" :
!errb ? "ERRA" :
"ERRAB");
if ( i < NDISKS-2 && j == NDISKS-1 ) {
/* We don't implement the DQ failure scenario, since it's
equivalent to a RAID-5 failure (XOR, then recompute Q) */
} else {
printf("algo=%-8s faila=%3d(%c) failb=%3d(%c) %s\n",
raid6_call.name,
i, (i==NDISKS-2)?'P':'D',
j, (j==NDISKS-1)?'Q':(j==NDISKS-2)?'P':'D',
(!erra && !errb) ? "OK" :
!erra ? "ERRB" :
!errb ? "ERRA" :
"ERRAB");
}
dataptrs[i] = data[i];
dataptrs[j] = data[j];
......
#ident "$Id: raid6x86.h,v 1.3 2002/12/12 22:41:27 hpa Exp $"
/* ----------------------------------------------------------------------- *
*
* Copyright 2002 H. Peter Anvin - All Rights Reserved
* Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -22,54 +22,75 @@
#if defined(__i386__) || defined(__x86_64__)
#ifdef __x86_64__
typedef struct {
unsigned int fsave[27];
unsigned int cr0;
} raid6_mmx_save_t;
unsigned long cr0;
} raid6_mmx_save_t __attribute__((aligned(16)));
/* N.B.: For SSE we only save %xmm0-%xmm7 even for x86-64, since
the code doesn't know about the additional x86-64 registers */
/* The +3 is so we can make sure the area is aligned properly */
typedef struct {
unsigned int sarea[8*4+3];
unsigned int sarea[8*4];
unsigned int cr0;
} raid6_sse_save_t __attribute__((aligned(16)));
#ifdef __x86_64__
/* This is for x86-64-specific code which uses all 16 XMM registers */
typedef struct {
unsigned int sarea[16*4+3];
unsigned int cr0;
unsigned int sarea[16*4];
unsigned long cr0;
} raid6_sse16_save_t __attribute__((aligned(16)));
/* On x86-64 the stack is 16-byte aligned */
#define SAREA(x) (x->sarea)
#else /* __i386__ */
typedef struct {
unsigned int fsave[27];
unsigned long cr0;
} raid6_mmx_save_t;
/* On i386, the stack is only 8-byte aligned, but SSE requires 16-byte
alignment. The +3 is so we have the slack space to manually align
a properly-sized area correctly. */
typedef struct {
unsigned int sarea[8*4+3];
unsigned long cr0;
} raid6_sse_save_t;
#define SAREA(x) ((unsigned int *)((((unsigned long)&(x)->sarea)+15) & ~15))
#endif
#ifdef __KERNEL__ /* Real code */
static inline u32 raid6_get_fpu(void)
/* Note: %cr0 is 32 bits on i386 and 64 bits on x86-64 */
static inline unsigned long raid6_get_fpu(void)
{
u32 cr0;
unsigned long cr0;
preempt_disable();
asm volatile("movl %%cr0,%0 ; clts" : "=r" (cr0));
asm volatile("mov %%cr0,%0 ; clts" : "=r" (cr0));
return cr0;
}
static inline void raid6_put_fpu(u32 cr0)
static inline void raid6_put_fpu(unsigned long cr0)
{
asm volatile("movl %0,%%cr0" : : "r" (cr0));
asm volatile("mov %0,%%cr0" : : "r" (cr0));
preempt_enable();
}
#else /* Dummy code for user space testing */
static inline u32 raid6_get_fpu(void)
static inline unsigned long raid6_get_fpu(void)
{
return 0xf00ba6;
}
static inline void raid6_put_fpu(u32 cr0)
static inline void raid6_put_fpu(unsigned long cr0)
{
(void)cr0;
}
......@@ -90,13 +111,8 @@ static inline void raid6_after_mmx(raid6_mmx_save_t *s)
static inline void raid6_before_sse(raid6_sse_save_t *s)
{
#ifdef __x86_64__
unsigned int *rsa = s->sarea;
#else
/* On i386 the save area may not be aligned */
unsigned int *rsa =
(unsigned int *)((((unsigned long)&s->sarea)+15) & ~15);
#endif
unsigned int *rsa = SAREA(s);
s->cr0 = raid6_get_fpu();
asm volatile("movaps %%xmm0,%0" : "=m" (rsa[0]));
......@@ -111,13 +127,8 @@ static inline void raid6_before_sse(raid6_sse_save_t *s)
static inline void raid6_after_sse(raid6_sse_save_t *s)
{
#ifdef __x86_64__
unsigned int *rsa = s->sarea;
#else
/* On i386 the save area may not be aligned */
unsigned int *rsa =
(unsigned int *)((((unsigned long)&s->sarea)+15) & ~15);
#endif
unsigned int *rsa = SAREA(s);
asm volatile("movaps %0,%%xmm0" : : "m" (rsa[0]));
asm volatile("movaps %0,%%xmm1" : : "m" (rsa[4]));
asm volatile("movaps %0,%%xmm2" : : "m" (rsa[8]));
......@@ -132,13 +143,8 @@ static inline void raid6_after_sse(raid6_sse_save_t *s)
static inline void raid6_before_sse2(raid6_sse_save_t *s)
{
#ifdef __x86_64__
unsigned int *rsa = &s->sarea;
#else
/* On i386 the save area may not be aligned */
unsigned int *rsa =
(unsigned int *)((((unsigned long)&s->sarea)+15) & ~15);
#endif
unsigned int *rsa = SAREA(s);
s->cr0 = raid6_get_fpu();
asm volatile("movdqa %%xmm0,%0" : "=m" (rsa[0]));
......@@ -153,13 +159,8 @@ static inline void raid6_before_sse2(raid6_sse_save_t *s)
static inline void raid6_after_sse2(raid6_sse_save_t *s)
{
#ifdef __x86_64__
unsigned int *rsa = s->sarea;
#else
/* On i386 the save area may not be aligned */
unsigned int *rsa =
(unsigned int *)((((unsigned long)&s->sarea)+15) & ~15);
#endif
unsigned int *rsa = SAREA(s);
asm volatile("movdqa %0,%%xmm0" : : "m" (rsa[0]));
asm volatile("movdqa %0,%%xmm1" : : "m" (rsa[4]));
asm volatile("movdqa %0,%%xmm2" : : "m" (rsa[8]));
......@@ -174,9 +175,9 @@ static inline void raid6_after_sse2(raid6_sse_save_t *s)
#ifdef __x86_64__
static inline raid6_before_sse16(raid6_sse16_save_t *s)
static inline void raid6_before_sse16(raid6_sse16_save_t *s)
{
unsigned int *rsa = s->sarea;
unsigned int *rsa = SAREA(s);
s->cr0 = raid6_get_fpu();
......@@ -198,9 +199,9 @@ static inline raid6_before_sse16(raid6_sse16_save_t *s)
asm volatile("movdqa %%xmm15,%0" : "=m" (rsa[60]));
}
static inline raid6_after_sse16(raid6_sse16_save_t *s)
static inline void raid6_after_sse16(raid6_sse16_save_t *s)
{
unsigned int *rsa = s->sarea;
unsigned int *rsa = SAREA(s);
asm volatile("movdqa %0,%%xmm0" : : "m" (rsa[0]));
asm volatile("movdqa %0,%%xmm1" : : "m" (rsa[4]));
......
......@@ -112,7 +112,7 @@ static int __init dummy_init_one(int index)
return err;
}
static void __exit dummy_free_one(int index)
static void dummy_free_one(int index)
{
unregister_netdev(dummies[index]);
free_netdev(dummies[index]);
......
......@@ -51,6 +51,8 @@
#include <linux/list.h>
#include <linux/workqueue.h>
#include <asm/atomic.h>
#include <pcmcia/version.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
......@@ -95,10 +97,12 @@ typedef struct user_info_t {
int event_head, event_tail;
event_t event[MAX_EVENTS];
struct user_info_t *next;
struct pcmcia_bus_socket *socket;
} user_info_t;
/* Socket state information */
struct pcmcia_bus_socket {
atomic_t refcount;
client_handle_t handle;
int state;
user_info_t *user;
......@@ -106,13 +110,13 @@ struct pcmcia_bus_socket {
wait_queue_head_t queue, request;
struct work_struct removal;
socket_bind_t *bind;
struct device *socket_dev;
struct pcmcia_socket *parent;
};
#define SOCKET_PRESENT 0x01
#define SOCKET_BUSY 0x02
#define SOCKET_REMOVAL_PENDING 0x10
#define SOCKET_DEAD 0x80
/*====================================================================*/
......@@ -137,6 +141,24 @@ EXPORT_SYMBOL(cs_error);
static struct pcmcia_driver * get_pcmcia_driver (dev_info_t *dev_info);
static struct pcmcia_bus_socket * get_socket_info_by_nr(unsigned int nr);
static void pcmcia_put_bus_socket(struct pcmcia_bus_socket *s)
{
if (atomic_dec_and_test(&s->refcount))
kfree(s);
}
static struct pcmcia_bus_socket *pcmcia_get_bus_socket(int nr)
{
struct pcmcia_bus_socket *s;
s = get_socket_info_by_nr(nr);
if (s) {
WARN_ON(atomic_read(&s->refcount) == 0);
atomic_inc(&s->refcount);
}
return s;
}
/**
* pcmcia_register_driver - register a PCMCIA driver with the bus core
*
......@@ -230,13 +252,10 @@ static int handle_request(struct pcmcia_bus_socket *s, event_t event)
if (s->state & SOCKET_BUSY)
s->req_pending = 1;
handle_event(s, event);
if (s->req_pending > 0) {
interruptible_sleep_on(&s->request);
if (signal_pending(current))
return CS_IN_USE;
else
return s->req_result;
}
if (wait_event_interruptible(s->request, s->req_pending <= 0))
return CS_IN_USE;
if (s->state & SOCKET_BUSY)
return s->req_result;
return CS_SUCCESS;
}
......@@ -501,7 +520,7 @@ static int ds_open(struct inode *inode, struct file *file)
DEBUG(0, "ds_open(socket %d)\n", i);
s = get_socket_info_by_nr(i);
s = pcmcia_get_bus_socket(i);
if (!s)
return -ENODEV;
......@@ -517,6 +536,7 @@ static int ds_open(struct inode *inode, struct file *file)
user->event_tail = user->event_head = 0;
user->next = s->user;
user->user_magic = USER_MAGIC;
user->socket = s;
s->user = user;
file->private_data = user;
......@@ -529,23 +549,23 @@ static int ds_open(struct inode *inode, struct file *file)
static int ds_release(struct inode *inode, struct file *file)
{
socket_t i = iminor(inode);
struct pcmcia_bus_socket *s;
user_info_t *user, **link;
DEBUG(0, "ds_release(socket %d)\n", i);
s = get_socket_info_by_nr(i);
if (!s)
return 0;
DEBUG(0, "ds_release(socket %d)\n", iminor(inode));
user = file->private_data;
if (CHECK_USER(user))
goto out;
s = user->socket;
/* Unlink user data structure */
if ((file->f_flags & O_ACCMODE) != O_RDONLY)
if ((file->f_flags & O_ACCMODE) != O_RDONLY) {
s->state &= ~SOCKET_BUSY;
s->req_pending = 0;
wake_up_interruptible(&s->request);
}
file->private_data = NULL;
for (link = &s->user; *link; link = &(*link)->next)
if (*link == user) break;
......@@ -554,6 +574,7 @@ static int ds_release(struct inode *inode, struct file *file)
*link = user->next;
user->user_magic = 0;
kfree(user);
pcmcia_put_bus_socket(s);
out:
return 0;
} /* ds_release */
......@@ -563,30 +584,28 @@ static int ds_release(struct inode *inode, struct file *file)
static ssize_t ds_read(struct file *file, char *buf,
size_t count, loff_t *ppos)
{
socket_t i = iminor(file->f_dentry->d_inode);
struct pcmcia_bus_socket *s;
user_info_t *user;
int ret;
DEBUG(2, "ds_read(socket %d)\n", i);
DEBUG(2, "ds_read(socket %d)\n", iminor(inode));
if (count < 4)
return -EINVAL;
s = get_socket_info_by_nr(i);
if (!s)
return -ENODEV;
user = file->private_data;
if (CHECK_USER(user))
return -EIO;
if (queue_empty(user)) {
interruptible_sleep_on(&s->queue);
if (signal_pending(current))
return -EINTR;
}
s = user->socket;
if (s->state & SOCKET_DEAD)
return -EIO;
ret = wait_event_interruptible(s->queue, !queue_empty(user));
if (ret == 0)
ret = put_user(get_queued_event(user), (int *)buf) ? -EFAULT : 4;
return put_user(get_queued_event(user), (int *)buf) ? -EFAULT : 4;
return ret;
} /* ds_read */
/*====================================================================*/
......@@ -594,25 +613,24 @@ static ssize_t ds_read(struct file *file, char *buf,
static ssize_t ds_write(struct file *file, const char *buf,
size_t count, loff_t *ppos)
{
socket_t i = iminor(file->f_dentry->d_inode);
struct pcmcia_bus_socket *s;
user_info_t *user;
DEBUG(2, "ds_write(socket %d)\n", i);
DEBUG(2, "ds_write(socket %d)\n", iminor(inode));
if (count != 4)
return -EINVAL;
if ((file->f_flags & O_ACCMODE) == O_RDONLY)
return -EBADF;
s = get_socket_info_by_nr(i);
if (!s)
return -ENODEV;
user = file->private_data;
if (CHECK_USER(user))
return -EIO;
s = user->socket;
if (s->state & SOCKET_DEAD)
return -EIO;
if (s->req_pending) {
s->req_pending--;
get_user(s->req_result, (int *)buf);
......@@ -629,19 +647,19 @@ static ssize_t ds_write(struct file *file, const char *buf,
/* No kernel lock - fine */
static u_int ds_poll(struct file *file, poll_table *wait)
{
socket_t i = iminor(file->f_dentry->d_inode);
struct pcmcia_bus_socket *s;
user_info_t *user;
DEBUG(2, "ds_poll(socket %d)\n", i);
DEBUG(2, "ds_poll(socket %d)\n", iminor(inode));
s = get_socket_info_by_nr(i);
if (!s)
return POLLERR;
user = file->private_data;
if (CHECK_USER(user))
return POLLERR;
s = user->socket;
/*
* We don't check for a dead socket here since that
* will send cardmgr into an endless spin.
*/
poll_wait(file, &s->queue, wait);
if (!queue_empty(user))
return POLLIN | POLLRDNORM;
......@@ -653,17 +671,21 @@ static u_int ds_poll(struct file *file, poll_table *wait)
static int ds_ioctl(struct inode * inode, struct file * file,
u_int cmd, u_long arg)
{
socket_t i = iminor(inode);
struct pcmcia_bus_socket *s;
u_int size;
int ret, err;
ds_ioctl_arg_t buf;
user_info_t *user;
DEBUG(2, "ds_ioctl(socket %d, %#x, %#lx)\n", i, cmd, arg);
DEBUG(2, "ds_ioctl(socket %d, %#x, %#lx)\n", iminor(inode), cmd, arg);
s = get_socket_info_by_nr(i);
if (!s)
return -ENODEV;
user = file->private_data;
if (CHECK_USER(user))
return -EIO;
s = user->socket;
if (s->state & SOCKET_DEAD)
return -EIO;
size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
if (size > sizeof(ds_ioctl_arg_t)) return -EINVAL;
......@@ -833,6 +855,7 @@ static int __devinit pcmcia_bus_add_socket(struct class_device *class_dev)
if(!s)
return -ENOMEM;
memset(s, 0, sizeof(struct pcmcia_bus_socket));
atomic_set(&s->refcount, 1);
/*
* Ugly. But we want to wait for the socket threads to have started up.
......@@ -845,7 +868,6 @@ static int __devinit pcmcia_bus_add_socket(struct class_device *class_dev)
init_waitqueue_head(&s->request);
/* initialize data */
s->socket_dev = socket->dev.dev;
INIT_WORK(&s->removal, handle_removal, s);
s->parent = socket;
......@@ -894,7 +916,8 @@ static void pcmcia_bus_remove_socket(struct class_device *class_dev)
pcmcia_deregister_client(socket->pcmcia->handle);
kfree(socket->pcmcia);
socket->pcmcia->state |= SOCKET_DEAD;
pcmcia_put_bus_socket(socket->pcmcia);
socket->pcmcia = NULL;
return;
......
......@@ -57,9 +57,11 @@ extern int addrconf_del_ifaddr(void *arg);
extern int addrconf_set_dstaddr(void *arg);
extern int ipv6_chk_addr(struct in6_addr *addr,
struct net_device *dev);
struct net_device *dev,
int strict);
extern struct inet6_ifaddr * ipv6_get_ifaddr(struct in6_addr *addr,
struct net_device *dev);
struct net_device *dev,
int strict);
extern int ipv6_get_saddr(struct dst_entry *dst,
struct in6_addr *daddr,
struct in6_addr *saddr);
......
......@@ -910,7 +910,7 @@ static int ipv6_count_addresses(struct inet6_dev *idev)
return cnt;
}
int ipv6_chk_addr(struct in6_addr *addr, struct net_device *dev)
int ipv6_chk_addr(struct in6_addr *addr, struct net_device *dev, int strict)
{
struct inet6_ifaddr * ifp;
u8 hash = ipv6_addr_hash(addr);
......@@ -920,7 +920,7 @@ int ipv6_chk_addr(struct in6_addr *addr, struct net_device *dev)
if (ipv6_addr_cmp(&ifp->addr, addr) == 0 &&
!(ifp->flags&IFA_F_TENTATIVE)) {
if (dev == NULL || ifp->idev->dev == dev ||
!(ifp->scope&(IFA_LINK|IFA_HOST)))
!(ifp->scope&(IFA_LINK|IFA_HOST) || strict))
break;
}
}
......@@ -945,7 +945,7 @@ int ipv6_chk_same_addr(const struct in6_addr *addr, struct net_device *dev)
return ifp != NULL;
}
struct inet6_ifaddr * ipv6_get_ifaddr(struct in6_addr *addr, struct net_device *dev)
struct inet6_ifaddr * ipv6_get_ifaddr(struct in6_addr *addr, struct net_device *dev, int strict)
{
struct inet6_ifaddr * ifp;
u8 hash = ipv6_addr_hash(addr);
......@@ -954,7 +954,7 @@ struct inet6_ifaddr * ipv6_get_ifaddr(struct in6_addr *addr, struct net_device *
for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) {
if (ipv6_addr_cmp(&ifp->addr, addr) == 0) {
if (dev == NULL || ifp->idev->dev == dev ||
!(ifp->scope&(IFA_LINK|IFA_HOST))) {
!(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
in6_ifa_hold(ifp);
break;
}
......@@ -1393,7 +1393,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
ok:
ifp = ipv6_get_ifaddr(&addr, dev);
ifp = ipv6_get_ifaddr(&addr, dev, 1);
if (ifp == NULL && valid_lft) {
int max_addresses = in6_dev->cnf.max_addresses;
......@@ -2952,7 +2952,7 @@ static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
if (!ipv6_addr_any(&addr))
ipv6_dev_ac_dec(ifp->idev->dev, &addr);
}
if (!ipv6_chk_addr(&ifp->addr, NULL))
if (!ipv6_chk_addr(&ifp->addr, ifp->idev->dev, 1))
ip6_rt_addr_del(&ifp->addr, ifp->idev->dev);
break;
}
......@@ -3191,6 +3191,9 @@ static struct addrconf_sysctl_table
.mode = 0555,
.child = addrconf_sysctl.addrconf_vars,
},
{
.ctl_name = 0, /* sentinel */
}
},
.addrconf_conf_dir = {
{
......@@ -3199,6 +3202,9 @@ static struct addrconf_sysctl_table
.mode = 0555,
.child = addrconf_sysctl.addrconf_dev,
},
{
.ctl_name = 0, /* sentinel */
}
},
.addrconf_proto_dir = {
{
......@@ -3207,6 +3213,9 @@ static struct addrconf_sysctl_table
.mode = 0555,
.child = addrconf_sysctl.addrconf_conf_dir,
},
{
.ctl_name = 0, /* sentinel */
}
},
.addrconf_root_dir = {
{
......@@ -3215,6 +3224,9 @@ static struct addrconf_sysctl_table
.mode = 0555,
.child = addrconf_sysctl.addrconf_proto_dir,
},
{
.ctl_name = 0, /* sentinel */
}
},
};
......
......@@ -355,7 +355,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
*/
v4addr = LOOPBACK4_IPV6;
if (!(addr_type & IPV6_ADDR_MULTICAST)) {
if (!ipv6_chk_addr(&addr->sin6_addr, dev)) {
if (!ipv6_chk_addr(&addr->sin6_addr, dev, 0)) {
if (dev)
dev_put(dev);
err = -EADDRNOTAVAIL;
......
......@@ -113,7 +113,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr)
return -EPERM;
if (ipv6_addr_type(addr) & IPV6_ADDR_MULTICAST)
return -EINVAL;
if (ipv6_chk_addr(addr, NULL))
if (ipv6_chk_addr(addr, NULL, 0))
return -EINVAL;
pac = sock_kmalloc(sk, sizeof(struct ipv6_ac_socklist), GFP_KERNEL);
......
......@@ -295,7 +295,7 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
addr_type = ipv6_addr_type(&src_info->ipi6_addr);
if (ipv6_addr_type == IPV6_ADDR_ANY)
if (addr_type == IPV6_ADDR_ANY)
break;
if (addr_type & IPV6_ADDR_LINKLOCAL) {
......@@ -307,7 +307,7 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
return -ENODEV;
}
}
if (!ipv6_chk_addr(&src_info->ipi6_addr, dev)) {
if (!ipv6_chk_addr(&src_info->ipi6_addr, dev, 0)) {
if (dev)
dev_put(dev);
err = -EINVAL;
......
......@@ -298,7 +298,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
*/
addr_type = ipv6_addr_type(&hdr->daddr);
if (ipv6_chk_addr(&hdr->daddr, skb->dev))
if (ipv6_chk_addr(&hdr->daddr, skb->dev, 0))
saddr = &hdr->daddr;
/*
......
......@@ -725,6 +725,7 @@ int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
{
int err = 0;
*dst = NULL;
if (sk) {
struct ipv6_pinfo *np = inet6_sk(sk);
......
......@@ -777,10 +777,10 @@ static void ip6_tnl_set_cap(struct ip6_tnl *t)
if (p->link)
ldev = dev_get_by_index(p->link);
if ((ltype&IPV6_ADDR_UNICAST) && !ipv6_chk_addr(laddr, ldev))
if (ltype&IPV6_ADDR_UNICAST && !ipv6_chk_addr(laddr, ldev, 0))
l_ok = 0;
if ((rtype&IPV6_ADDR_UNICAST) && ipv6_chk_addr(raddr, NULL))
if (rtype&IPV6_ADDR_UNICAST && ipv6_chk_addr(raddr, NULL, 0))
r_ok = 0;
if (l_ok && r_ok) {
......
......@@ -434,7 +434,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
/* for anycast or proxy, solicited_addr != src_addr */
ifp = ipv6_get_ifaddr(solicited_addr, dev);
ifp = ipv6_get_ifaddr(solicited_addr, dev, 1);
if (ifp) {
src_addr = solicited_addr;
in6_ifa_put(ifp);
......@@ -680,7 +680,7 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
struct in6_addr *target = (struct in6_addr *)&neigh->primary_key;
int probes = atomic_read(&neigh->probes);
if (skb && ipv6_chk_addr(&skb->nh.ipv6h->saddr, dev))
if (skb && ipv6_chk_addr(&skb->nh.ipv6h->saddr, dev, 1))
saddr = &skb->nh.ipv6h->saddr;
if ((probes -= neigh->parms->ucast_probes) < 0) {
......@@ -758,7 +758,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
}
}
if ((ifp = ipv6_get_ifaddr(&msg->target, dev)) != NULL) {
if ((ifp = ipv6_get_ifaddr(&msg->target, dev, 1)) != NULL) {
if (ifp->flags & IFA_F_TENTATIVE) {
/* Address is tentative. If the source
is unspecified address, it is someone
......@@ -955,7 +955,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
return;
}
}
if ((ifp = ipv6_get_ifaddr(&msg->target, dev))) {
if ((ifp = ipv6_get_ifaddr(&msg->target, dev, 1))) {
if (ifp->flags & IFA_F_TENTATIVE) {
addrconf_dad_failure(ifp);
return;
......
......@@ -227,7 +227,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
v4addr = LOOPBACK4_IPV6;
if (!(addr_type & IPV6_ADDR_MULTICAST)) {
err = -EADDRNOTAVAIL;
if (!ipv6_chk_addr(&addr->sin6_addr, dev)) {
if (!ipv6_chk_addr(&addr->sin6_addr, dev, 0)) {
if (dev)
dev_put(dev);
goto out;
......
......@@ -510,7 +510,7 @@ static int sctp_v6_available(union sctp_addr *addr, struct sctp_opt *sp)
if (!(type & IPV6_ADDR_UNICAST))
return 0;
return ipv6_chk_addr(in6, NULL);
return ipv6_chk_addr(in6, NULL, 0);
}
/* This function checks if the address is a valid address to be used for
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment