Commit c53373b8 authored by David S. Miller's avatar David S. Miller

Merge nuts.davemloft.net:/disk1/BK/acme-2.6

into nuts.davemloft.net:/disk1/BK/net-2.6
parents edbf7825 40fe4936
......@@ -28,6 +28,8 @@ ifdef CONFIG_SUN3
LDFLAGS_vmlinux = -N
endif
CHECK := $(CHECK) -D__mc68000__=1 -I$(shell $(CC) -print-file-name=include)
# without -fno-strength-reduce the 53c7xx.c driver fails ;-(
CFLAGS += -pipe -fno-strength-reduce -ffixed-a2
......
......@@ -196,14 +196,57 @@ _060_real_cas2:
| Expected outputs:
| d0 = 0 -> success; non-zero -> failure
|
| Linux/68k: As long as ints are disabled, no swapping out should
| occur (hopefully...)
| Linux/m68k: Make sure the page is properly paged in, so we use
| plpaw and handle any exception here. The kernel must not be
| preempted until _060_unlock_page(), so that the page stays mapped.
|
.global _060_real_lock_page
_060_real_lock_page:
clr.l %d0
move.l %d2,-(%sp)
| load sfc/dfc
moveq #5,%d0
tst.b %d0
jne 1f
moveq #1,%d0
1: movec.l %dfc,%d2
movec.l %d0,%dfc
movec.l %d0,%sfc
clr.l %d0
| prefetch address
.chip 68060
move.l %a0,%a1
1: plpaw (%a1)
addq.w #1,%a0
tst.b %d1
jeq 2f
addq.w #2,%a0
2: plpaw (%a0)
3: .chip 68k
| restore sfc/dfc
movec.l %d2,%dfc
movec.l %d2,%sfc
move.l (%sp)+,%d2
rts
.section __ex_table,"a"
.align 4
.long 1b,11f
.long 2b,21f
.previous
.section .fixup,"ax"
.even
11: move.l #0x020003c0,%d0
or.l %d2,%d0
swap %d0
jra 3b
21: move.l #0x02000bc0,%d0
or.l %d2,%d0
swap %d0
jra 3b
.previous
|
| _060_unlock_page():
|
......@@ -216,8 +259,7 @@ _060_real_lock_page:
| d0 = `xxxxxxff -> supervisor; `xxxxxx00 -> user
| d1 = `xxxxxxff -> longword; `xxxxxx00 -> word
|
| Linux/68k: As we do no special locking operation, also no unlocking
| is needed...
| Linux/m68k: perhaps reenable preemption here...
.global _060_real_unlock_page
_060_real_unlock_page:
......
......@@ -228,8 +228,8 @@ static inline int restore_fpu_state(struct sigcontext *sc)
goto out;
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %0,%/fp0-%/fp1\n\t"
"fmoveml %1,%/fpcr/%/fpsr/%/fpiar\n\t"
"fmovemx %0,%%fp0-%%fp1\n\t"
"fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
".chip 68k"
: /* no outputs */
: "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl));
......@@ -258,7 +258,7 @@ static inline int rt_restore_fpu_state(struct ucontext *uc)
if (FPU_IS_EMU) {
/* restore fpu control register */
if (__copy_from_user(current->thread.fpcntl,
&uc->uc_mcontext.fpregs.f_pcr, 12))
uc->uc_mcontext.fpregs.f_fpcntl, 12))
goto out;
/* restore all other fpu register */
if (__copy_from_user(current->thread.fp,
......@@ -298,12 +298,12 @@ static inline int rt_restore_fpu_state(struct ucontext *uc)
sizeof(fpregs)))
goto out;
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %0,%/fp0-%/fp7\n\t"
"fmoveml %1,%/fpcr/%/fpsr/%/fpiar\n\t"
"fmovemx %0,%%fp0-%%fp7\n\t"
"fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
".chip 68k"
: /* no outputs */
: "m" (*fpregs.f_fpregs),
"m" (fpregs.f_pcr));
"m" (*fpregs.f_fpcntl));
}
if (context_size &&
__copy_from_user(fpstate + 4, (long *)&uc->uc_fpstate + 1,
......@@ -586,12 +586,12 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
sc->sc_fpstate[0x38] |= 1 << 3;
}
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %/fp0-%/fp1,%0\n\t"
"fmoveml %/fpcr/%/fpsr/%/fpiar,%1\n\t"
"fmovemx %%fp0-%%fp1,%0\n\t"
"fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
".chip 68k"
: /* no outputs */
: "m" (*sc->sc_fpregs),
"m" (*sc->sc_fpcntl)
: "=m" (*sc->sc_fpregs),
"=m" (*sc->sc_fpcntl)
: /* no inputs */
: "memory");
}
}
......@@ -604,7 +604,7 @@ static inline int rt_save_fpu_state(struct ucontext *uc, struct pt_regs *regs)
if (FPU_IS_EMU) {
/* save fpu control register */
err |= copy_to_user(&uc->uc_mcontext.fpregs.f_pcr,
err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
current->thread.fpcntl, 12);
/* save all other fpu register */
err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
......@@ -631,12 +631,12 @@ static inline int rt_save_fpu_state(struct ucontext *uc, struct pt_regs *regs)
fpstate[0x38] |= 1 << 3;
}
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %/fp0-%/fp7,%0\n\t"
"fmoveml %/fpcr/%/fpsr/%/fpiar,%1\n\t"
"fmovemx %%fp0-%%fp7,%0\n\t"
"fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
".chip 68k"
: /* no outputs */
: "m" (*fpregs.f_fpregs),
"m" (fpregs.f_pcr)
: "=m" (*fpregs.f_fpregs),
"=m" (*fpregs.f_fpcntl)
: /* no inputs */
: "memory");
err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
sizeof(fpregs));
......
......@@ -329,7 +329,8 @@ static inline void access_error060 (struct frame *fp)
* fault during mem_read/mem_write in ifpsp060/os.S
*/
send_fault_sig(&fp->ptregs);
} else {
} else if (!(fslw & (MMU060_RE|MMU060_WE)) ||
send_fault_sig(&fp->ptregs) > 0) {
printk("pc=%#lx, fa=%#lx\n", fp->ptregs.pc, fp->un.fmt4.effaddr);
printk( "68060 access error, fslw=%lx\n", fslw );
trap_c( fp );
......@@ -517,7 +518,7 @@ static inline void access_error040(struct frame *fp)
if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr)
fp->un.fmt7.wb2s &= ~WBV_040;
}
} else {
} else if (send_fault_sig(&fp->ptregs) > 0) {
printk("68040 access error, ssw=%x\n", ssw);
trap_c(fp);
}
......@@ -732,7 +733,7 @@ static inline void bus_error030 (struct frame *fp)
return;
} else if (!(mmusr & MMU_I)) {
/* probably a 020 cas fault */
if (!(ssw & RM))
if (!(ssw & RM) && send_fault_sig(&fp->ptregs) > 0)
printk("unexpected bus error (%#x,%#x)\n", ssw, mmusr);
} else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
printk("invalid %s access at %#lx from pc %#lx\n",
......
......@@ -261,7 +261,7 @@ void __init iop_preinit(void)
} else {
iop_base[IOP_NUM_ISM] = (struct mac_iop *) ISM_IOP_BASE_QUADRA;
}
iop_base[IOP_NUM_SCC]->status_ctrl = 0;
iop_base[IOP_NUM_ISM]->status_ctrl = 0;
iop_ism_present = 1;
} else {
iop_base[IOP_NUM_ISM] = NULL;
......
......@@ -54,7 +54,7 @@ void __init init_pointer_table(unsigned long ptable)
/* unreserve the page so it's possible to free that page */
PD_PAGE(dp)->flags &= ~(1 << PG_reserved);
atomic_set(&PD_PAGE(dp)->count, 1);
set_page_count(PD_PAGE(dp), 1);
return;
}
......
......@@ -612,7 +612,6 @@ static struct net_device *dev_macsonic;
MODULE_PARM(sonic_debug, "i");
MODULE_PARM_DESC(sonic_debug, "macsonic debug level (1-4)");
MODULE_LICENSE("GPL");
int
init_module(void)
......
......@@ -672,6 +672,25 @@ config VFAT_FS
To compile this as a module, choose M here: the module will be called
vfat.
config FAT_DEFAULT_CODEPAGE
int "Default codepage for FAT"
depends on MSDOS_FS || VFAT_FS
default 437
help
This option should be set to the codepage of your FAT filesystems.
It can be overridden with the 'codepage' mount option.
config FAT_DEFAULT_IOCHARSET
string "Default iocharset for FAT"
depends on VFAT_FS
default "iso8859-1"
help
Set this to the default I/O character set you'd like FAT to use.
It should probably match the character set that most of your
FAT filesystems use, and can be overridded with the 'iocharset'
mount option for FAT filesystems. Note that UTF8 is *not* a
supported charset for FAT filesystems.
config UMSDOS_FS
#dep_tristate ' UMSDOS: Unix-like file system on top of standard MSDOS fs' CONFIG_UMSDOS_FS $CONFIG_MSDOS_FS
# UMSDOS is temprory broken
......@@ -1562,7 +1581,7 @@ config SMB_NLS_REMOTE
smbmount from samba 2.2.0 or later supports this.
config CIFS
tristate "CIFS support (advanced network filesystem for Samba, Window and other CIFS compliant servers)(EXPERIMENTAL)"
tristate "CIFS support (advanced network filesystem for Samba, Window and other CIFS compliant servers)"
depends on INET
select NLS
help
......@@ -1592,7 +1611,16 @@ config CIFS_STATS
depends on CIFS
help
Enabling this option will cause statistics for each server share
mounted by the cifs client to be displayed in /proc/fs/cifs/DebugData
mounted by the cifs client to be displayed in /proc/fs/cifs/Stats
config CIFS_POSIX
bool "CIFS POSIX Extensions (EXPERIMENTAL)"
depends on CIFS
help
Enabling this option will cause the cifs client to attempt to
negotiate a newer dialect with servers, such as Samba 3.0.5
or later, that optionally can handle more POSIX like (rather
than Windows like) file behavior. If unsure, say N.
config NCP_FS
tristate "NCP file system support (to mount NetWare volumes)"
......
......@@ -458,7 +458,6 @@ affs_error(struct super_block *sb, const char *function, const char *fmt, ...)
if (!(sb->s_flags & MS_RDONLY))
printk(KERN_WARNING "AFFS: Remounting filesystem read-only\n");
sb->s_flags |= MS_RDONLY;
AFFS_SB(sb)->s_flags |= SF_READONLY; /* Don't allow to remount rw */
}
void
......
......@@ -272,8 +272,7 @@ affs_alloc_block(struct inode *inode, u32 goal)
return 0;
}
int
affs_init_bitmap(struct super_block *sb)
int affs_init_bitmap(struct super_block *sb, int *flags)
{
struct affs_bm_info *bm;
struct buffer_head *bmap_bh = NULL, *bh = NULL;
......@@ -282,13 +281,13 @@ affs_init_bitmap(struct super_block *sb)
int i, res = 0;
struct affs_sb_info *sbi = AFFS_SB(sb);
if (sb->s_flags & MS_RDONLY)
if (*flags & MS_RDONLY)
return 0;
if (!AFFS_ROOT_TAIL(sb, sbi->s_root_bh)->bm_flag) {
printk(KERN_NOTICE "AFFS: Bitmap invalid - mounting %s read only\n",
sb->s_id);
sb->s_flags |= MS_RDONLY;
*flags |= MS_RDONLY;
return 0;
}
......@@ -301,7 +300,7 @@ affs_init_bitmap(struct super_block *sb)
bm = sbi->s_bitmap = kmalloc(size, GFP_KERNEL);
if (!sbi->s_bitmap) {
printk(KERN_ERR "AFFS: Bitmap allocation failed\n");
return 1;
return -ENOMEM;
}
memset(sbi->s_bitmap, 0, size);
......@@ -316,13 +315,13 @@ affs_init_bitmap(struct super_block *sb)
bh = affs_bread(sb, bm->bm_key);
if (!bh) {
printk(KERN_ERR "AFFS: Cannot read bitmap\n");
res = 1;
res = -EIO;
goto out;
}
if (affs_checksum_block(sb, bh)) {
printk(KERN_WARNING "AFFS: Bitmap %u invalid - mounting %s read only.\n",
bm->bm_key, sb->s_id);
sb->s_flags |= MS_RDONLY;
*flags |= MS_RDONLY;
goto out;
}
pr_debug("AFFS: read bitmap block %d: %d\n", blk, bm->bm_key);
......@@ -338,7 +337,7 @@ affs_init_bitmap(struct super_block *sb)
bmap_bh = affs_bread(sb, be32_to_cpu(bmap_blk[blk]));
if (!bmap_bh) {
printk(KERN_ERR "AFFS: Cannot read bitmap extension\n");
res = 1;
res = -EIO;
goto out;
}
bmap_blk = (u32 *)bmap_bh->b_data;
......@@ -383,3 +382,17 @@ affs_init_bitmap(struct super_block *sb)
affs_brelse(bmap_bh);
return res;
}
void affs_free_bitmap(struct super_block *sb)
{
struct affs_sb_info *sbi = AFFS_SB(sb);
if (!sbi->s_bitmap)
return;
affs_brelse(sbi->s_bmap_bh);
sbi->s_bmap_bh = NULL;
sbi->s_last_bmap = ~0;
kfree(sbi->s_bitmap);
sbi->s_bitmap = NULL;
}
......@@ -51,10 +51,9 @@ affs_put_super(struct super_block *sb)
mark_buffer_dirty(sbi->s_root_bh);
}
affs_brelse(sbi->s_bmap_bh);
if (sbi->s_prefix)
kfree(sbi->s_prefix);
kfree(sbi->s_bitmap);
affs_free_bitmap(sb);
affs_brelse(sbi->s_root_bh);
kfree(sbi);
sb->s_fs_info = NULL;
......@@ -288,6 +287,7 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
gid_t gid;
int reserved;
unsigned long mount_flags;
int tmp_flags; /* fix remount prototype... */
pr_debug("AFFS: read_super(%s)\n",data ? (const char *)data : "no options");
......@@ -399,7 +399,6 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
printk(KERN_NOTICE "AFFS: Dircache FS - mounting %s read only\n",
sb->s_id);
sb->s_flags |= MS_RDONLY;
sbi->s_flags |= SF_READONLY;
}
switch (chksum) {
case MUFS_FS:
......@@ -455,8 +454,10 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_root_bh = root_bh;
/* N.B. after this point s_root_bh must be released */
if (affs_init_bitmap(sb))
tmp_flags = sb->s_flags;
if (affs_init_bitmap(sb, &tmp_flags))
goto out_error;
sb->s_flags = tmp_flags;
/* set up enough so that it can read an inode */
......@@ -498,7 +499,7 @@ affs_remount(struct super_block *sb, int *flags, char *data)
int reserved;
int root_block;
unsigned long mount_flags;
unsigned long read_only = sbi->s_flags & SF_READONLY;
int res = 0;
pr_debug("AFFS: remount(flags=0x%x,opts=\"%s\")\n",*flags,data);
......@@ -507,7 +508,7 @@ affs_remount(struct super_block *sb, int *flags, char *data)
if (!parse_options(data,&uid,&gid,&mode,&reserved,&root_block,
&blocksize,&sbi->s_prefix,sbi->s_volume,&mount_flags))
return -EINVAL;
sbi->s_flags = mount_flags | read_only;
sbi->s_flags = mount_flags;
sbi->s_mode = mode;
sbi->s_uid = uid;
sbi->s_gid = gid;
......@@ -518,14 +519,11 @@ affs_remount(struct super_block *sb, int *flags, char *data)
sb->s_dirt = 1;
while (sb->s_dirt)
affs_write_super(sb);
sb->s_flags |= MS_RDONLY;
} else if (!(sbi->s_flags & SF_READONLY)) {
sb->s_flags &= ~MS_RDONLY;
} else {
affs_warning(sb,"remount","Cannot remount fs read/write because of errors");
return -EINVAL;
}
return 0;
affs_free_bitmap(sb);
} else
res = affs_init_bitmap(sb, flags);
return res;
}
static int
......
......@@ -31,7 +31,7 @@ Thanks to those in the community who have submitted detailed bug reports
and debug of problems they have found: Jochen Dolze, David Blaine,
Rene Scharfe, Martin Josefsson, Alexander Wild, Anthony Liguori,
Lars Muller, Urban Widmark, Massimiliano Ferrero, Howard Owen,
Olaf Kirch, Kieron Briggs and others.
Olaf Kirch, Kieron Briggs, Nick Millington and others.
And thanks to the IBM LTC and Power test teams and SuSE testers for
finding multiple bugs during excellent stress test runs.
Version 1.19
------------
Fix /proc/fs/cifs/Stats and DebugData display to handle larger
amounts of return data. Properly limit requests to MAX_REQ (50
is the usual maximum active multiplex SMB/CIFS requests per server).
Do not kill cifsd (and thus hurt the other SMB session) when more than one
session to the same server (but with different userids) exists and one
of the two user's smb sessions is being removed while leaving the other.
Version 1.18
------------
Do not rename hardlinked files (since that should be a noop). Flush
cached write behind data when reopening a file after session abend,
except when already in write. Grab per socket sem during reconnect
to avoid oops in sendmsg if overlapping with reconnect.
to avoid oops in sendmsg if overlapping with reconnect. Do not
reset cached inode file size on readdir for files open for write on
client.
Version 1.17
......
......@@ -62,12 +62,17 @@ cifs_debug_data_read(char *buf, char **beginBuffer, off_t offset,
int count, int *eof, void *data)
{
struct list_head *tmp;
struct list_head *tmp1;
struct mid_q_entry * mid_entry;
struct cifsSesInfo *ses;
struct cifsTconInfo *tcon;
int i;
int length = 0;
char *buf_start = buf;
char * original_buf = buf;
*beginBuffer = buf + offset;
length =
sprintf(buf,
"Display Internal CIFS Data Structures for Debugging\n"
......@@ -94,7 +99,7 @@ cifs_debug_data_read(char *buf, char **beginBuffer, off_t offset,
ses->server->secMode,
atomic_read(&ses->server->inFlight));
/* length = sprintf(buf, "\nMIDs: \n");
length = sprintf(buf, "\nMIDs: \n");
buf += length;
spin_lock(&GlobalMid_Lock);
......@@ -103,11 +108,11 @@ cifs_debug_data_read(char *buf, char **beginBuffer, off_t offset,
mid_q_entry,
qhead);
if(mid_entry) {
length = sprintf(buf,"State: %d com: %d pid: %d tsk: %p\n",mid_entry->midState,mid_entry->command,mid_entry->pid,mid_entry->tsk);
length = sprintf(buf,"State: %d com: %d pid: %d tsk: %p mid %d\n",mid_entry->midState,mid_entry->command,mid_entry->pid,mid_entry->tsk,mid_entry->mid);
buf += length;
}
}
spin_unlock(&GlobalMid_Lock); */
spin_unlock(&GlobalMid_Lock);
}
}
......@@ -152,19 +157,22 @@ cifs_debug_data_read(char *buf, char **beginBuffer, off_t offset,
length = sprintf(buf, "\n");
buf += length;
*eof = 1;
/* BB add code to dump additional info such as TCP session info now */
/*
if (offset >= (buf - buf_start))
{
*beginBuffer = buf;
return 0;
}
*beginBuffer = buf + offset;
if ((buf - buf_start - offset) > count)
return count;
else */
return (buf - buf_start - offset);
/* Now calculate total size of returned data */
length = buf - original_buf;
if(offset + count >= length)
*eof = 1;
if(length < offset) {
*eof = 1;
return 0;
} else {
length = length - offset;
}
if (length > count)
length = count;
return length;
}
int
......@@ -183,12 +191,14 @@ cifs_total_xid_read(char *buf, char **beginBuffer, off_t offset,
#ifdef CONFIG_CIFS_STATS
int
cifs_stats_read(char *buf, char **beginBuffer, off_t offset,
int length, int *eof, void *data)
int count, int *eof, void *data)
{
int item_length,i;
int item_length,i,length;
struct list_head *tmp;
struct cifsTconInfo *tcon;
*beginBuffer = buf + offset;
length = sprintf(buf,
"Resources in use\nCIFS Session: %d\n",
sesInfoAllocCount.counter);
......@@ -235,10 +245,12 @@ cifs_stats_read(char *buf, char **beginBuffer, off_t offset,
atomic_read(&tcon->num_reads),
(long long)(tcon->bytes_read));
buf += item_length;
length += item_length;
item_length = sprintf(buf,"\nWrites: %d Bytes: %lld",
atomic_read(&tcon->num_writes),
(long long)(tcon->bytes_written));
buf += item_length;
length += item_length;
item_length = sprintf(buf,
"\nOpens: %d Deletes: %d\nMkdirs: %d Rmdirs: %d",
atomic_read(&tcon->num_opens),
......@@ -247,10 +259,29 @@ cifs_stats_read(char *buf, char **beginBuffer, off_t offset,
atomic_read(&tcon->num_rmdirs));
buf += item_length;
length += item_length;
item_length = sprintf(buf,
"\nRenames: %d T2 Renames %d",
atomic_read(&tcon->num_renames),
atomic_read(&tcon->num_t2renames));
buf += item_length;
length += item_length;
}
read_unlock(&GlobalSMBSeslock);
buf += sprintf(buf,"\n");
length++;
if(offset + count >= length)
*eof = 1;
if(length < offset) {
*eof = 1;
return 0;
} else {
length = length - offset;
}
if (length > count)
length = count;
return length;
}
#endif
......
......@@ -93,5 +93,5 @@ extern int cifs_setxattr(struct dentry *, const char *, const void *,
size_t, int);
extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t);
extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
#define CIFS_VERSION "1.18"
#define CIFS_VERSION "1.19"
#endif /* _CIFSFS_H */
......@@ -211,6 +211,8 @@ struct cifsTconInfo {
atomic_t num_deletes;
atomic_t num_mkdirs;
atomic_t num_rmdirs;
atomic_t num_renames;
atomic_t num_t2renames;
__u64 bytes_read;
__u64 bytes_written;
spinlock_t stat_lock;
......
......@@ -387,6 +387,7 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
spin_lock(&GlobalMid_Lock);
ses->server->tcpStatus = CifsExiting;
spin_unlock(&GlobalMid_Lock);
rc = -ESHUTDOWN;
}
}
if (pSMB)
......@@ -819,14 +820,20 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
pSMB->AndXCommand = 0xFF; /* none */
pSMB->Fid = smb_file_id; /* netfid stays le */
pSMB->Locks[0].Pid = cpu_to_le16(current->tgid);
temp = cpu_to_le64(len);
pSMB->Locks[0].LengthLow = (__u32)(len & 0xFFFFFFFF);
pSMB->Locks[0].LengthHigh = (__u32)(len>>32);
temp = cpu_to_le64(offset);
pSMB->Locks[0].OffsetLow = (__u32)(offset & 0xFFFFFFFF);
pSMB->Locks[0].OffsetHigh = (__u32)(offset>>32);
pSMB->ByteCount = sizeof (LOCKING_ANDX_RANGE);
if(numLock != 0) {
pSMB->Locks[0].Pid = cpu_to_le16(current->tgid);
/* BB where to store pid high? */
temp = cpu_to_le64(len);
pSMB->Locks[0].LengthLow = (__u32)(temp & 0xFFFFFFFF);
pSMB->Locks[0].LengthHigh = (__u32)(temp>>32);
temp = cpu_to_le64(offset);
pSMB->Locks[0].OffsetLow = (__u32)(temp & 0xFFFFFFFF);
pSMB->Locks[0].OffsetHigh = (__u32)(temp>>32);
pSMB->ByteCount = sizeof (LOCKING_ANDX_RANGE);
} else {
/* oplock break */
pSMB->ByteCount = 0;
}
pSMB->hdr.smb_buf_length += pSMB->ByteCount;
pSMB->ByteCount = cpu_to_le16(pSMB->ByteCount);
......@@ -941,7 +948,14 @@ CIFSSMBRename(const int xid, struct cifsTconInfo *tcon,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, ("Send error in rename = %d", rc));
}
#ifdef CONFIG_CIFS_STATS
else {
atomic_inc(&tcon->num_renames);
}
#endif
if (pSMB)
cifs_buf_release(pSMB);
......@@ -1017,7 +1031,11 @@ int CIFSSMBRenameOpenFile(const int xid,struct cifsTconInfo *pTcon,
if (rc) {
cFYI(1,("Send error in Rename (by file handle) = %d", rc));
}
#ifdef CONFIG_CIFS_STATS
else {
atomic_inc(&pTcon->num_t2renames);
}
#endif
if (pSMB)
cifs_buf_release(pSMB);
......
......@@ -175,7 +175,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
if(server->tcpStatus != CifsExiting)
server->tcpStatus = CifsGood;
spin_unlock(&GlobalMid_Lock);
atomic_set(&server->inFlight,0);
/* atomic_set(&server->inFlight,0);*/
wake_up(&server->response_q);
}
}
......@@ -453,7 +453,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
spin_unlock(&GlobalMid_Lock);
read_unlock(&GlobalSMBSeslock);
set_current_state(TASK_INTERRUPTIBLE);
/* 1/8th of sec should be more than enough time for them to exit */
/* 1/8th of sec is more than enough time for them to exit */
schedule_timeout(HZ/8);
}
......@@ -468,7 +468,8 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
}
kfree(server);
cFYI(1, ("About to exit from demultiplex thread"));
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ/4);
return 0;
}
......@@ -2769,6 +2770,7 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
int rc = 0;
int xid;
struct cifsSesInfo *ses = NULL;
struct task_struct *cifsd_task;
xid = GetXid();
......@@ -2781,19 +2783,25 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
}
tconInfoFree(cifs_sb->tcon);
if ((ses) && (ses->server)) {
/* save off task so we do not refer to ses later */
cifsd_task = ses->server->tsk;
cFYI(1, ("About to do SMBLogoff "));
rc = CIFSSMBLogoff(xid, ses);
if (rc == -EBUSY) {
FreeXid(xid);
return 0;
}
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ / 4); /* give captive thread time to exit */
if((ses->server) && (ses->server->ssocket)) {
cFYI(1,("Waking up socket by sending it signal "));
send_sig(SIGKILL,ses->server->tsk,1);
}
} else if (rc == -ESHUTDOWN) {
/* should we add wake_up_all(&server->request_q);
and add a check in the check inFlight loop
for the session ending */
set_current_state(TASK_INTERRUPTIBLE);
/* give captive thread time to exit */
schedule_timeout(HZ / 4);
cFYI(1,("Waking up socket by sending it signal"));
send_sig(SIGKILL,cifsd_task,1);
rc = 0;
} /* else - we have an smb session
left on this socket do not kill cifsd */
} else
cFYI(1, ("No session or bad tcon"));
}
......
......@@ -588,9 +588,10 @@ cifs_write(struct file * file, const char *write_data,
if(file->f_dentry == NULL)
return -EBADF;
xid = GetXid();
cifs_sb = CIFS_SB(file->f_dentry->d_sb);
if(cifs_sb == NULL) {
return -EBADF;
}
pTcon = cifs_sb->tcon;
/*cFYI(1,
......@@ -598,11 +599,12 @@ cifs_write(struct file * file, const char *write_data,
*poffset, file->f_dentry->d_name.name)); */
if (file->private_data == NULL) {
FreeXid(xid);
return -EBADF;
} else {
open_file = (struct cifsFileInfo *) file->private_data;
}
open_file = (struct cifsFileInfo *) file->private_data;
xid = GetXid();
if(file->f_dentry->d_inode == NULL) {
FreeXid(xid);
return -EBADF;
......@@ -620,10 +622,22 @@ cifs_write(struct file * file, const char *write_data,
if(file->private_data == NULL) {
/* file has been closed on us */
FreeXid(xid);
/* if we have gotten here we have written some data
and blocked, and the file has been freed on us
while we blocked so return what we managed to write */
return total_written;
}
open_file = (struct cifsFileInfo *) file->private_data;
if(open_file->closePend) {
FreeXid(xid);
if(total_written)
return total_written;
else
return -EBADF;
}
if ((open_file->invalidHandle) && (!open_file->closePend)) {
if((file->f_dentry == NULL) || (file->f_dentry->d_inode == NULL)) {
if (open_file->invalidHandle) {
if((file->f_dentry == NULL) ||
(file->f_dentry->d_inode == NULL)) {
FreeXid(xid);
return total_written;
}
......
......@@ -200,23 +200,35 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
}
/* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or
use ses->maxReq */
/* can not count locking commands against the total since
they are allowed to block on server */
if(long_op < 3) {
/* update # of requests on the wire to this server */
atomic_inc(&ses->server->inFlight);
}
if(atomic_read(&ses->server->inFlight) > CIFS_MAX_REQ) {
wait_event(ses->server->request_q,atomic_read(&ses->server->inFlight) <= CIFS_MAX_REQ);
to the same server. We may make this configurable later or
use ses->maxReq */
if(long_op == -1) {
/* oplock breaks must not be held up */
atomic_inc(&ses->server->inFlight);
} else {
spin_lock(&GlobalMid_Lock);
while(1) {
if(atomic_read(&ses->server->inFlight) >= CIFS_MAX_REQ){
spin_unlock(&GlobalMid_Lock);
wait_event(ses->server->request_q,
atomic_read(&ses->server->inFlight)
< CIFS_MAX_REQ);
spin_lock(&GlobalMid_Lock);
} else {
/* can not count locking commands against total since
they are allowed to block on server */
if(long_op < 3) {
/* update # of requests on the wire to server */
atomic_inc(&ses->server->inFlight);
}
spin_unlock(&GlobalMid_Lock);
break;
}
}
}
/* make sure that we sign in the same order that we send on this socket
and avoid races inside tcp sendmsg code that could cause corruption
of smb data */
and avoid races inside tcp sendmsg code that could cause corruption
of smb data */
down(&ses->server->tcpSem);
......
......@@ -23,6 +23,14 @@
#include <linux/parser.h>
#include <asm/unaligned.h>
#ifndef CONFIG_FAT_DEFAULT_IOCHARSET
/* if user don't select VFAT, this is undefined. */
#define CONFIG_FAT_DEFAULT_IOCHARSET ""
#endif
static int fat_default_codepage = CONFIG_FAT_DEFAULT_CODEPAGE;
static char fat_default_iocharset[] = CONFIG_FAT_DEFAULT_IOCHARSET;
/*
* New FAT inode stuff. We do the following:
* a) i_ino is constant and has nothing with on-disk location.
......@@ -166,20 +174,17 @@ void fat_put_super(struct super_block *sb)
if (sbi->nls_disk) {
unload_nls(sbi->nls_disk);
sbi->nls_disk = NULL;
sbi->options.codepage = 0;
sbi->options.codepage = fat_default_codepage;
}
if (sbi->nls_io) {
unload_nls(sbi->nls_io);
sbi->nls_io = NULL;
}
/*
* Note: the iocharset option might have been specified
* without enabling nls_io, so check for it here.
*/
if (sbi->options.iocharset) {
if (sbi->options.iocharset != fat_default_iocharset) {
kfree(sbi->options.iocharset);
sbi->options.iocharset = NULL;
sbi->options.iocharset = fat_default_iocharset;
}
sb->s_fs_info = NULL;
kfree(sbi);
}
......@@ -196,11 +201,11 @@ static int fat_show_options(struct seq_file *m, struct vfsmount *mnt)
seq_printf(m, ",gid=%u", opts->fs_gid);
seq_printf(m, ",fmask=%04o", opts->fs_fmask);
seq_printf(m, ",dmask=%04o", opts->fs_dmask);
if (sbi->nls_disk)
if (sbi->nls_disk && opts->codepage != fat_default_codepage)
seq_printf(m, ",codepage=%s", sbi->nls_disk->charset);
if (isvfat) {
if (sbi->nls_io
&& strcmp(sbi->nls_io->charset, CONFIG_NLS_DEFAULT))
if (sbi->nls_io &&
strcmp(opts->iocharset, fat_default_iocharset))
seq_printf(m, ",iocharset=%s", sbi->nls_io->charset);
switch (opts->shortname) {
......@@ -331,14 +336,15 @@ static int parse_options(char *options, int is_vfat, int *debug,
char *p;
substring_t args[MAX_OPT_ARGS];
int option;
char *iocharset;
opts->isvfat = is_vfat;
opts->fs_uid = current->uid;
opts->fs_gid = current->gid;
opts->fs_fmask = opts->fs_dmask = current->fs->umask;
opts->codepage = 0;
opts->iocharset = NULL;
opts->codepage = fat_default_codepage;
opts->iocharset = fat_default_iocharset;
if (is_vfat)
opts->shortname = VFAT_SFN_DISPLAY_LOWER|VFAT_SFN_CREATE_WIN95;
else
......@@ -351,7 +357,7 @@ static int parse_options(char *options, int is_vfat, int *debug,
*debug = 0;
if (!options)
return 1;
return 0;
while ((p = strsep(&options, ",")) != NULL) {
int token;
......@@ -437,10 +443,12 @@ static int parse_options(char *options, int is_vfat, int *debug,
/* vfat specific */
case Opt_charset:
kfree(opts->iocharset);
opts->iocharset = match_strdup(&args[0]);
if (!opts->iocharset)
return 0;
if (opts->iocharset != fat_default_iocharset)
kfree(opts->iocharset);
iocharset = match_strdup(&args[0]);
if (!iocharset)
return -ENOMEM;
opts->iocharset = iocharset;
break;
case Opt_shortname_lower:
opts->shortname = VFAT_SFN_DISPLAY_LOWER
......@@ -486,14 +494,20 @@ static int parse_options(char *options, int is_vfat, int *debug,
default:
printk(KERN_ERR "FAT: Unrecognized mount option \"%s\" "
"or missing value\n", p);
return 0;
return -EINVAL;
}
}
/* UTF8 doesn't provide FAT semantics */
if (!strcmp(opts->iocharset, "utf8")) {
printk(KERN_ERR "FAT: utf8 is not a valid IO charset"
" for FAT filesystems\n");
return -EINVAL;
}
if (opts->unicode_xlate)
opts->utf8 = 0;
return 1;
return 0;
}
static int fat_calc_dir_size(struct inode *inode)
......@@ -784,7 +798,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent,
struct msdos_sb_info *sbi;
u16 logical_sector_size;
u32 total_sectors, total_clusters, fat_clusters, rootdir_sectors;
int debug, cp, first;
int debug, first;
unsigned int media;
long error;
char buf[50];
......@@ -801,8 +815,8 @@ int fat_fill_super(struct super_block *sb, void *data, int silent,
sb->s_export_op = &fat_export_ops;
sbi->dir_ops = fs_dir_inode_ops;
error = -EINVAL;
if (!parse_options(data, isvfat, &debug, &sbi->options))
error = parse_options(data, isvfat, &debug, &sbi->options);
if (error)
goto out_fail;
fat_cache_init(sb);
......@@ -1009,31 +1023,21 @@ int fat_fill_super(struct super_block *sb, void *data, int silent,
}
error = -EINVAL;
cp = sbi->options.codepage ? sbi->options.codepage : 437;
sprintf(buf, "cp%d", cp);
sprintf(buf, "cp%d", sbi->options.codepage);
sbi->nls_disk = load_nls(buf);
if (!sbi->nls_disk) {
/* Fail only if explicit charset specified */
if (sbi->options.codepage != 0) {
printk(KERN_ERR "FAT: codepage %s not found\n", buf);
goto out_fail;
}
sbi->options.codepage = 0; /* already 0?? */
sbi->nls_disk = load_nls_default();
printk(KERN_ERR "FAT: codepage %s not found\n", buf);
goto out_fail;
}
/* FIXME: utf8 is using iocharset for upper/lower conversion */
if (sbi->options.isvfat) {
if (sbi->options.iocharset != NULL) {
sbi->nls_io = load_nls(sbi->options.iocharset);
if (!sbi->nls_io) {
printk(KERN_ERR
"FAT: IO charset %s not found\n",
sbi->options.iocharset);
goto out_fail;
}
} else
sbi->nls_io = load_nls_default();
sbi->nls_io = load_nls(sbi->options.iocharset);
if (!sbi->nls_io) {
printk(KERN_ERR "FAT: IO charset %s not found\n",
sbi->options.iocharset);
goto out_fail;
}
}
error = -ENOMEM;
......@@ -1068,7 +1072,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent,
unload_nls(sbi->nls_io);
if (sbi->nls_disk)
unload_nls(sbi->nls_disk);
if (sbi->options.iocharset)
if (sbi->options.iocharset != fat_default_iocharset)
kfree(sbi->options.iocharset);
sb->s_fs_info = NULL;
kfree(sbi);
......
#ifndef __ARCH_M68K_ATOMIC__
#define __ARCH_M68K_ATOMIC__
#include <asm/system.h> /* local_irq_XXX() */
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
......@@ -16,38 +18,124 @@ typedef struct { int counter; } atomic_t;
#define atomic_read(v) ((v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
static __inline__ void atomic_add(int i, atomic_t *v)
static inline void atomic_add(int i, atomic_t *v)
{
__asm__ __volatile__("addl %1,%0" : "+m" (*v) : "id" (i));
}
static inline void atomic_sub(int i, atomic_t *v)
{
__asm__ __volatile__("subl %1,%0" : "+m" (*v) : "id" (i));
}
static inline void atomic_inc(atomic_t *v)
{
__asm__ __volatile__("addql #1,%0" : "+m" (*v));
}
static inline void atomic_dec(atomic_t *v)
{
__asm__ __volatile__("subql #1,%0" : "+m" (*v));
}
static inline int atomic_dec_and_test(atomic_t *v)
{
char c;
__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
static inline int atomic_inc_and_test(atomic_t *v)
{
char c;
__asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
#ifdef CONFIG_RMW_INSNS
static inline int atomic_add_return(int i, atomic_t *v)
{
__asm__ __volatile__("addl %1,%0" : "=m" (*v) : "id" (i), "0" (*v));
int t, tmp;
__asm__ __volatile__(
"1: movel %2,%1\n"
" addl %3,%1\n"
" casl %2,%1,%0\n"
" jne 1b"
: "+m" (*v), "=&d" (t), "=&d" (tmp)
: "g" (i), "2" (atomic_read(v)));
return t;
}
static __inline__ void atomic_sub(int i, atomic_t *v)
static inline int atomic_sub_return(int i, atomic_t *v)
{
__asm__ __volatile__("subl %1,%0" : "=m" (*v) : "id" (i), "0" (*v));
int t, tmp;
__asm__ __volatile__(
"1: movel %2,%1\n"
" subl %3,%1\n"
" casl %2,%1,%0\n"
" jne 1b"
: "+m" (*v), "=&d" (t), "=&d" (tmp)
: "g" (i), "2" (atomic_read(v)));
return t;
}
#else /* !CONFIG_RMW_INSNS */
static inline int atomic_add_return(int i, atomic_t * v)
{
unsigned long flags;
int t;
local_irq_save(flags);
t = atomic_read(v);
t += i;
atomic_set(v, t);
local_irq_restore(flags);
return t;
}
static __inline__ void atomic_inc(volatile atomic_t *v)
static inline int atomic_sub_return(int i, atomic_t * v)
{
__asm__ __volatile__("addql #1,%0" : "=m" (*v): "0" (*v));
unsigned long flags;
int t;
local_irq_save(flags);
t = atomic_read(v);
t -= i;
atomic_set(v, t);
local_irq_restore(flags);
return t;
}
#endif /* !CONFIG_RMW_INSNS */
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))
static __inline__ void atomic_dec(volatile atomic_t *v)
static inline int atomic_sub_and_test(int i, atomic_t *v)
{
__asm__ __volatile__("subql #1,%0" : "=m" (*v): "0" (*v));
char c;
__asm__ __volatile__("subl %2,%1; seq %0" : "=d" (c), "+m" (*v): "g" (i));
return c != 0;
}
static __inline__ int atomic_dec_and_test(volatile atomic_t *v)
static inline int atomic_add_negative(int i, atomic_t *v)
{
char c;
__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "=m" (*v): "1" (*v));
__asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i));
return c != 0;
}
#define atomic_clear_mask(mask, v) \
__asm__ __volatile__("andl %1,%0" : "=m" (*v) : "id" (~(mask)),"0"(*v))
static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
{
__asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask)));
}
#define atomic_set_mask(mask, v) \
__asm__ __volatile__("orl %1,%0" : "=m" (*v) : "id" (mask),"0"(*v))
static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
{
__asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
}
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
......
......@@ -23,25 +23,24 @@
#define __test_and_set_bit(nr,vaddr) test_and_set_bit(nr,vaddr)
static inline int __constant_test_and_set_bit(int nr,
volatile unsigned long *vaddr)
static inline int __constant_test_and_set_bit(int nr, unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
char retval;
__asm__ __volatile__ ("bset %2,%1; sne %0"
: "=d" (retval), "+m" (((volatile char *)vaddr)[(nr^31) >> 3])
: "di" (nr & 7));
: "=d" (retval), "+m" (*p)
: "di" (nr & 7));
return retval;
}
static inline int __generic_test_and_set_bit(int nr,
volatile unsigned long *vaddr)
static inline int __generic_test_and_set_bit(int nr, unsigned long *vaddr)
{
char retval;
__asm__ __volatile__ ("bfset %2@{%1:#1}; sne %0"
: "=d" (retval) : "d" (nr^31), "a" (vaddr) : "memory");
__asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
: "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
return retval;
}
......@@ -53,16 +52,17 @@ static inline int __generic_test_and_set_bit(int nr,
#define __set_bit(nr,vaddr) set_bit(nr,vaddr)
static inline void __constant_set_bit(int nr, volatile unsigned long *vaddr)
static inline void __constant_set_bit(int nr, unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bset %1,%0"
: "+m" (((volatile char *)vaddr)[(nr^31) >> 3]) : "di" (nr & 7));
: "+m" (*p) : "di" (nr & 7));
}
static inline void __generic_set_bit(int nr, volatile unsigned long *vaddr)
static inline void __generic_set_bit(int nr, unsigned long *vaddr)
{
__asm__ __volatile__ ("bfset %1@{%0:#1}"
: : "d" (nr^31), "a" (vaddr) : "memory");
__asm__ __volatile__ ("bfset %1{%0:#1}"
: : "d" (nr^31), "o" (*vaddr) : "memory");
}
#define test_and_clear_bit(nr,vaddr) \
......@@ -72,25 +72,24 @@ static inline void __generic_set_bit(int nr, volatile unsigned long *vaddr)
#define __test_and_clear_bit(nr,vaddr) test_and_clear_bit(nr,vaddr)
static inline int __constant_test_and_clear_bit(int nr,
volatile unsigned long *vaddr)
static inline int __constant_test_and_clear_bit(int nr, unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
char retval;
__asm__ __volatile__ ("bclr %2,%1; sne %0"
: "=d" (retval), "+m" (((volatile char *)vaddr)[(nr^31) >> 3])
: "di" (nr & 7));
: "=d" (retval), "+m" (*p)
: "di" (nr & 7));
return retval;
}
static inline int __generic_test_and_clear_bit(int nr,
volatile unsigned long *vaddr)
static inline int __generic_test_and_clear_bit(int nr, unsigned long *vaddr)
{
char retval;
__asm__ __volatile__ ("bfclr %2@{%1:#1}; sne %0"
: "=d" (retval) : "d" (nr^31), "a" (vaddr) : "memory");
__asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
: "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
return retval;
}
......@@ -107,16 +106,17 @@ static inline int __generic_test_and_clear_bit(int nr,
__generic_clear_bit(nr, vaddr))
#define __clear_bit(nr,vaddr) clear_bit(nr,vaddr)
static inline void __constant_clear_bit(int nr, volatile unsigned long *vaddr)
static inline void __constant_clear_bit(int nr, unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bclr %1,%0"
: "+m" (((volatile char *)vaddr)[(nr^31) >> 3]) : "di" (nr & 7));
: "+m" (*p) : "di" (nr & 7));
}
static inline void __generic_clear_bit(int nr, volatile unsigned long *vaddr)
static inline void __generic_clear_bit(int nr, unsigned long *vaddr)
{
__asm__ __volatile__ ("bfclr %1@{%0:#1}"
: : "d" (nr^31), "a" (vaddr) : "memory");
__asm__ __volatile__ ("bfclr %1{%0:#1}"
: : "d" (nr^31), "o" (*vaddr) : "memory");
}
#define test_and_change_bit(nr,vaddr) \
......@@ -127,25 +127,24 @@ static inline void __generic_clear_bit(int nr, volatile unsigned long *vaddr)
#define __test_and_change_bit(nr,vaddr) test_and_change_bit(nr,vaddr)
#define __change_bit(nr,vaddr) change_bit(nr,vaddr)
static inline int __constant_test_and_change_bit(int nr,
volatile unsigned long *vaddr)
static inline int __constant_test_and_change_bit(int nr, unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
char retval;
__asm__ __volatile__ ("bchg %2,%1; sne %0"
: "=d" (retval), "+m" (((volatile char *)vaddr)[(nr^31) >> 3])
: "di" (nr & 7));
: "=d" (retval), "+m" (*p)
: "di" (nr & 7));
return retval;
}
static inline int __generic_test_and_change_bit(int nr,
volatile unsigned long *vaddr)
static inline int __generic_test_and_change_bit(int nr, unsigned long *vaddr)
{
char retval;
__asm__ __volatile__ ("bfchg %2@{%1:#1}; sne %0"
: "=d" (retval) : "d" (nr^31), "a" (vaddr) : "memory");
__asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
: "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
return retval;
}
......@@ -155,21 +154,22 @@ static inline int __generic_test_and_change_bit(int nr,
__constant_change_bit(nr, vaddr) : \
__generic_change_bit(nr, vaddr))
static inline void __constant_change_bit(int nr, volatile unsigned long *vaddr)
static inline void __constant_change_bit(int nr, unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bchg %1,%0"
: "+m" (((volatile char *)vaddr)[(nr^31) >> 3]) : "di" (nr & 7));
: "+m" (*p) : "di" (nr & 7));
}
static inline void __generic_change_bit(int nr, volatile unsigned long *vaddr)
static inline void __generic_change_bit(int nr, unsigned long *vaddr)
{
__asm__ __volatile__ ("bfchg %1@{%0:#1}"
: : "d" (nr^31), "a" (vaddr) : "memory");
__asm__ __volatile__ ("bfchg %1{%0:#1}"
: : "d" (nr^31), "o" (*vaddr) : "memory");
}
static inline int test_bit(int nr, const volatile unsigned long *vaddr)
static inline int test_bit(int nr, const unsigned long *vaddr)
{
return ((1UL << (nr & 31)) & (((const volatile unsigned long *) vaddr)[nr >> 5])) != 0;
return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
}
static inline int find_first_zero_bit(const unsigned long *vaddr,
......@@ -364,76 +364,27 @@ static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size)
return ((p - addr) << 4) + (res ^ 31);
}
static inline int minix_test_and_set_bit(int nr, volatile void *vaddr)
{
char retval;
__asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
: "=d" (retval) : "d" (nr^15), "m" (*(volatile char *)vaddr) : "memory");
return retval;
}
#define minix_set_bit(nr,addr) ((void)minix_test_and_set_bit(nr,addr))
#define minix_test_and_set_bit(nr, addr) test_and_set_bit((nr) ^ 16, (unsigned long *)(addr))
#define minix_set_bit(nr,addr) set_bit((nr) ^ 16, (unsigned long *)(addr))
#define minix_test_and_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 16, (unsigned long *)(addr))
static inline int minix_test_and_clear_bit(int nr, volatile void *vaddr)
static inline int minix_test_bit(int nr, const void *vaddr)
{
char retval;
__asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
: "=d" (retval) : "d" (nr^15), "m" (*(volatile char *) vaddr) : "memory");
return retval;
}
static inline int minix_test_bit(int nr, const volatile void *vaddr)
{
return ((1U << (nr & 15)) & (((const volatile unsigned short *) vaddr)[nr >> 4])) != 0;
const unsigned short *p = vaddr;
return (p[nr >> 4] & (1U << (nr & 15))) != 0;
}
/* Bitmap functions for the ext2 filesystem. */
static inline int ext2_set_bit(int nr, volatile void *vaddr)
{
char retval;
__asm__ __volatile__ ("bfset %2{%1,#1}; sne %0"
: "=d" (retval) : "d" (nr^7), "m" (*(volatile char *) vaddr) : "memory");
return retval;
}
static inline int ext2_clear_bit(int nr, volatile void *vaddr)
{
char retval;
__asm__ __volatile__ ("bfclr %2{%1,#1}; sne %0"
: "=d" (retval) : "d" (nr^7), "m" (*(volatile char *) vaddr) : "memory");
return retval;
}
#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr))
#define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr))
#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr))
#define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr))
#define ext2_set_bit_atomic(lock, nr, addr) \
({ \
int ret; \
spin_lock(lock); \
ret = ext2_set_bit((nr), (addr)); \
spin_unlock(lock); \
ret; \
})
#define ext2_clear_bit_atomic(lock, nr, addr) \
({ \
int ret; \
spin_lock(lock); \
ret = ext2_clear_bit((nr), (addr)); \
spin_unlock(lock); \
ret; \
})
static inline int ext2_test_bit(int nr, const volatile void *vaddr)
static inline int ext2_test_bit(int nr, const void *vaddr)
{
return ((1U << (nr & 7)) & (((const volatile unsigned char *) vaddr)[nr >> 3])) != 0;
const unsigned char *p = vaddr;
return (p[nr >> 3] & (1U << (nr & 7))) != 0;
}
static inline int ext2_find_first_zero_bit(const void *vaddr, unsigned size)
......
......@@ -120,7 +120,7 @@ extern int isa_sex;
* be compiled in so the case statement will be optimised away
*/
static inline u8 *isa_itb(long addr)
static inline u8 *isa_itb(unsigned long addr)
{
switch(ISA_TYPE)
{
......@@ -136,7 +136,7 @@ static inline u8 *isa_itb(long addr)
default: return 0; /* avoid warnings, just in case */
}
}
static inline u16 *isa_itw(long addr)
static inline u16 *isa_itw(unsigned long addr)
{
switch(ISA_TYPE)
{
......@@ -152,7 +152,7 @@ static inline u16 *isa_itw(long addr)
default: return 0; /* avoid warnings, just in case */
}
}
static inline u8 *isa_mtb(long addr)
static inline u8 *isa_mtb(unsigned long addr)
{
switch(ISA_TYPE)
{
......@@ -168,7 +168,7 @@ static inline u8 *isa_mtb(long addr)
default: return 0; /* avoid warnings, just in case */
}
}
static inline u16 *isa_mtw(long addr)
static inline u16 *isa_mtw(unsigned long addr)
{
switch(ISA_TYPE)
{
......@@ -191,10 +191,14 @@ static inline u16 *isa_mtw(long addr)
#define isa_outb(val,port) out_8(isa_itb(port),(val))
#define isa_outw(val,port) (ISA_SEX ? out_be16(isa_itw(port),(val)) : out_le16(isa_itw(port),(val)))
#define isa_readb(p) in_8(isa_mtb(p))
#define isa_readw(p) (ISA_SEX ? in_be16(isa_mtw(p)) : in_le16(isa_mtw(p)))
#define isa_writeb(val,p) out_8(isa_mtb(p),(val))
#define isa_writew(val,p) (ISA_SEX ? out_be16(isa_mtw(p),(val)) : out_le16(isa_mtw(p),(val)))
#define isa_readb(p) in_8(isa_mtb((unsigned long)(p)))
#define isa_readw(p) \
(ISA_SEX ? in_be16(isa_mtw((unsigned long)(p))) \
: in_le16(isa_mtw((unsigned long)(p))))
#define isa_writeb(val,p) out_8(isa_mtb((unsigned long)(p)),(val))
#define isa_writew(val,p) \
(ISA_SEX ? out_be16(isa_mtw((unsigned long)(p)),(val)) \
: out_le16(isa_mtw((unsigned long)(p)),(val)))
static inline void isa_delay(void)
{
......@@ -215,17 +219,21 @@ static inline void isa_delay(void)
#define isa_inb_p(p) ({u8 v=isa_inb(p);isa_delay();v;})
#define isa_outb_p(v,p) ({isa_outb((v),(p));isa_delay();})
#define isa_inw_p(p) ({u16 v=isa_inw(p);isa_delay();v;})
#define isa_outw_p(v,p) ({isa_outw((v),(p));isa_delay();})
#define isa_inl_p(p) ({u32 v=isa_inl(p);isa_delay();v;})
#define isa_outl_p(v,p) ({isa_outl((v),(p));isa_delay();})
#define isa_insb(port, buf, nr) raw_insb(isa_itb(port), (buf), (nr))
#define isa_outsb(port, buf, nr) raw_outsb(isa_itb(port), (buf), (nr))
#define isa_insb(port, buf, nr) raw_insb(isa_itb(port), (u8 *)(buf), (nr))
#define isa_outsb(port, buf, nr) raw_outsb(isa_itb(port), (u8 *)(buf), (nr))
#define isa_insw(port, buf, nr) \
(ISA_SEX ? raw_insw(isa_itw(port), (buf), (nr)) : \
raw_insw_swapw(isa_itw(port), (buf), (nr)))
(ISA_SEX ? raw_insw(isa_itw(port), (u16 *)(buf), (nr)) : \
raw_insw_swapw(isa_itw(port), (u16 *)(buf), (nr)))
#define isa_outsw(port, buf, nr) \
(ISA_SEX ? raw_outsw(isa_itw(port), (buf), (nr)) : \
raw_outsw_swapw(isa_itw(port), (buf), (nr)))
(ISA_SEX ? raw_outsw(isa_itw(port), (u16 *)(buf), (nr)) : \
raw_outsw_swapw(isa_itw(port), (u16 *)(buf), (nr)))
#endif /* CONFIG_ISA */
......@@ -235,9 +243,13 @@ static inline void isa_delay(void)
#define outb isa_outb
#define outb_p isa_outb_p
#define inw isa_inw
#define inw_p isa_inw_p
#define outw isa_outw
#define outw_p isa_outw_p
#define inl isa_inw
#define inl_p isa_inw_p
#define outl isa_outw
#define outl_p isa_outw_p
#define insb isa_insb
#define insw isa_insw
#define outsb isa_outsb
......@@ -281,10 +293,16 @@ static inline void isa_delay(void)
#define inb(port) ((port)<1024 ? isa_inb(port) : in_8(port))
#define inb_p(port) ((port)<1024 ? isa_inb_p(port) : in_8(port))
#define inw(port) ((port)<1024 ? isa_inw(port) : in_le16(port))
#define inw_p(port) ((port)<1024 ? isa_inw_p(port) : in_le16(port))
#define inl(port) ((port)<1024 ? isa_inl(port) : in_le32(port))
#define inl_p(port) ((port)<1024 ? isa_inl_p(port) : in_le32(port))
#define outb(val,port) ((port)<1024 ? isa_outb((val),(port)) : out_8((port),(val)))
#define outb_p(val,port) ((port)<1024 ? isa_outb_p((val),(port)) : out_8((port),(val)))
#define outw(val,port) ((port)<1024 ? isa_outw((val),(port)) : out_le16((port),(val)))
#define outw_p(val,port) ((port)<1024 ? isa_outw_p((val),(port)) : out_le16((port),(val)))
#define outl(val,port) ((port)<1024 ? isa_outl((val),(port)) : out_le32((port),(val)))
#define outl_p(val,port) ((port)<1024 ? isa_outl_p((val),(port)) : out_le32((port),(val)))
#endif
#endif /* CONFIG_PCI */
......
......@@ -52,15 +52,13 @@ static inline void copy_page(void *to, void *from)
static inline void clear_page(void *page)
{
unsigned long data, tmp;
void *sp = page;
unsigned long tmp;
unsigned long *sp = page;
data = 0;
*((unsigned long *)(page))++ = 0;
*((unsigned long *)(page))++ = 0;
*((unsigned long *)(page))++ = 0;
*((unsigned long *)(page))++ = 0;
*sp++ = 0;
*sp++ = 0;
*sp++ = 0;
*sp++ = 0;
__asm__ __volatile__("1:\t"
".chip 68040\n\t"
......@@ -69,8 +67,8 @@ static inline void clear_page(void *page)
"subqw #8,%2\n\t"
"subqw #8,%2\n\t"
"dbra %1,1b\n\t"
: "=a" (page), "=d" (tmp)
: "a" (sp), "0" (page),
: "=a" (sp), "=d" (tmp)
: "a" (page), "0" (sp),
"1" ((PAGE_SIZE - 16) / 16 - 1));
}
......
......@@ -290,9 +290,7 @@ static inline void * __memset_g(void * s, int c, size_t count)
static inline void * __memset_page(void * s,int c,size_t count)
{
unsigned long data, tmp;
void *xs, *sp;
xs = sp = s;
void *xs = s;
c = c & 255;
data = c | (c << 8);
......@@ -303,10 +301,11 @@ static inline void * __memset_page(void * s,int c,size_t count)
if (((unsigned long) s) & 0x0f)
__memset_g(s, c, count);
else{
*((unsigned long *)(s))++ = data;
*((unsigned long *)(s))++ = data;
*((unsigned long *)(s))++ = data;
*((unsigned long *)(s))++ = data;
unsigned long *sp = s;
*sp++ = data;
*sp++ = data;
*sp++ = data;
*sp++ = data;
__asm__ __volatile__("1:\t"
".chip 68040\n\t"
......@@ -315,8 +314,8 @@ static inline void * __memset_page(void * s,int c,size_t count)
"subqw #8,%2\n\t"
"subqw #8,%2\n\t"
"dbra %1,1b\n\t"
: "=a" (s), "=d" (tmp)
: "a" (sp), "0" (s), "1" ((count - 16) / 16 - 1)
: "=a" (sp), "=d" (tmp)
: "a" (s), "0" (sp), "1" ((count - 16) / 16 - 1)
);
}
......
......@@ -6,10 +6,8 @@ typedef int greg_t;
typedef greg_t gregset_t[NGREG];
typedef struct fpregset {
int f_pcr;
int f_psr;
int f_fpiaddr;
int f_fpregs[8][3];
int f_fpcntl[3];
int f_fpregs[8*3];
} fpregset_t;
struct mcontext {
......
......@@ -36,7 +36,8 @@ extern u32 affs_count_free_bits(u32 blocksize, const void *data);
extern u32 affs_count_free_blocks(struct super_block *s);
extern void affs_free_block(struct super_block *sb, u32 block);
extern u32 affs_alloc_block(struct inode *inode, u32 goal);
extern int affs_init_bitmap(struct super_block *sb);
extern int affs_init_bitmap(struct super_block *sb, int *flags);
extern void affs_free_bitmap(struct super_block *sb);
/* namei.c */
......
......@@ -47,7 +47,6 @@ struct affs_sb_info {
#define SF_OFS 0x0200 /* Old filesystem */
#define SF_PREFIX 0x0400 /* Buffer for prefix is allocated */
#define SF_VERBOSE 0x0800 /* Talk about fs when mounting */
#define SF_READONLY 0x1000 /* Don't allow to remount rw */
/* short cut to get to the affs specific sb data */
static inline struct affs_sb_info *AFFS_SB(struct super_block *sb)
......
......@@ -22,6 +22,10 @@ struct sock_extended_err
#ifdef __KERNEL__
#include <linux/config.h>
#include <net/ip.h>
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
#include <linux/ipv6.h>
#endif
#define SKB_EXT_ERR(skb) ((struct sock_exterr_skb *) ((skb)->cb))
......
......@@ -27,6 +27,7 @@
#include <linux/highmem.h>
#include <linux/poll.h>
#include <linux/net.h>
#include <net/checksum.h>
#define HAVE_ALLOC_SKB /* For the drivers to know */
#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
......@@ -995,6 +996,39 @@ static inline struct sk_buff *skb_padto(struct sk_buff *skb, unsigned int len)
return skb_pad(skb, len-size);
}
static inline int skb_add_data(struct sk_buff *skb,
char __user *from, int copy)
{
const int off = skb->len;
if (skb->ip_summed == CHECKSUM_NONE) {
int err = 0;
unsigned int csum = csum_and_copy_from_user(from,
skb_put(skb, copy),
copy, 0, &err);
if (!err) {
skb->csum = csum_block_add(skb->csum, csum, off);
return 0;
}
} else if (!copy_from_user(skb_put(skb, copy), from, copy))
return 0;
__skb_trim(skb, off);
return -EFAULT;
}
static inline int skb_can_coalesce(struct sk_buff *skb, int i,
struct page *page, int off)
{
if (i) {
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
return page == frag->page &&
off == frag->page_offset + frag->size;
}
return 0;
}
/**
* skb_linearize - convert paged skb to linear one
* @skb: buffer to linarize
......@@ -1058,6 +1092,8 @@ extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb,
int offset, u8 *to, int len,
unsigned int csum);
extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
extern void skb_split(struct sk_buff *skb,
struct sk_buff *skb1, const u32 len);
extern void skb_init(void);
extern void skb_add_mtu(int mtu);
......
......@@ -16,83 +16,15 @@
* 2 of the License, or (at your option) any later version.
*/
/*
* Fixes:
*
* Ralf Baechle : generic ipv6 checksum
* <ralf@waldorf-gmbh.de>
*/
#ifndef _CHECKSUM_H
#define _CHECKSUM_H
#include <linux/errno.h>
#include <asm/types.h>
#include <asm/byteorder.h>
#include <net/ip.h>
#include <linux/in6.h>
#include <asm/uaccess.h>
#include <asm/checksum.h>
#ifndef _HAVE_ARCH_IPV6_CSUM
static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
struct in6_addr *daddr,
__u16 len,
unsigned short proto,
unsigned int csum)
{
int carry;
__u32 ulen;
__u32 uproto;
csum += saddr->s6_addr32[0];
carry = (csum < saddr->s6_addr32[0]);
csum += carry;
csum += saddr->s6_addr32[1];
carry = (csum < saddr->s6_addr32[1]);
csum += carry;
csum += saddr->s6_addr32[2];
carry = (csum < saddr->s6_addr32[2]);
csum += carry;
csum += saddr->s6_addr32[3];
carry = (csum < saddr->s6_addr32[3]);
csum += carry;
csum += daddr->s6_addr32[0];
carry = (csum < daddr->s6_addr32[0]);
csum += carry;
csum += daddr->s6_addr32[1];
carry = (csum < daddr->s6_addr32[1]);
csum += carry;
csum += daddr->s6_addr32[2];
carry = (csum < daddr->s6_addr32[2]);
csum += carry;
csum += daddr->s6_addr32[3];
carry = (csum < daddr->s6_addr32[3]);
csum += carry;
ulen = htonl((__u32) len);
csum += ulen;
carry = (csum < ulen);
csum += carry;
uproto = htonl(proto);
csum += uproto;
carry = (csum < uproto);
csum += carry;
return csum_fold(csum);
}
#endif
#ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
static inline
unsigned int csum_and_copy_from_user (const char __user *src, char *dst,
......
......@@ -37,7 +37,7 @@
#include <net/snmp.h>
#endif
#include <net/sock.h> /* struct sock */
struct sock;
struct inet_skb_parm
{
......
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Checksumming functions for IPv6
*
* Authors: Jorge Cwik, <jorge@laser.satlink.net>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
* Borrows very liberally from tcp.c and ip.c, see those
* files for more names.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/*
* Fixes:
*
* Ralf Baechle : generic ipv6 checksum
* <ralf@waldorf-gmbh.de>
*/
#ifndef _CHECKSUM_IPV6_H
#define _CHECKSUM_IPV6_H
#include <asm/types.h>
#include <asm/byteorder.h>
#include <net/ip.h>
#include <asm/checksum.h>
#include <linux/in6.h>
#ifndef _HAVE_ARCH_IPV6_CSUM
static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
struct in6_addr *daddr,
__u16 len,
unsigned short proto,
unsigned int csum)
{
int carry;
__u32 ulen;
__u32 uproto;
csum += saddr->s6_addr32[0];
carry = (csum < saddr->s6_addr32[0]);
csum += carry;
csum += saddr->s6_addr32[1];
carry = (csum < saddr->s6_addr32[1]);
csum += carry;
csum += saddr->s6_addr32[2];
carry = (csum < saddr->s6_addr32[2]);
csum += carry;
csum += saddr->s6_addr32[3];
carry = (csum < saddr->s6_addr32[3]);
csum += carry;
csum += daddr->s6_addr32[0];
carry = (csum < daddr->s6_addr32[0]);
csum += carry;
csum += daddr->s6_addr32[1];
carry = (csum < daddr->s6_addr32[1]);
csum += carry;
csum += daddr->s6_addr32[2];
carry = (csum < daddr->s6_addr32[2]);
csum += carry;
csum += daddr->s6_addr32[3];
carry = (csum < daddr->s6_addr32[3]);
csum += carry;
ulen = htonl((__u32) len);
csum += ulen;
carry = (csum < ulen);
csum += carry;
uproto = htonl(proto);
csum += uproto;
carry = (csum < uproto);
csum += carry;
return csum_fold(csum);
}
#endif
#endif
......@@ -53,6 +53,7 @@
#include <asm/atomic.h>
#include <net/dst.h>
#include <net/checksum.h>
/*
* This structure really needs to be cleaned up.
......@@ -923,6 +924,29 @@ static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb)
sk->sk_forward_alloc -= skb->truesize;
}
static inline int skb_copy_to_page(struct sock *sk, char __user *from,
struct sk_buff *skb, struct page *page,
int off, int copy)
{
if (skb->ip_summed == CHECKSUM_NONE) {
int err = 0;
unsigned int csum = csum_and_copy_from_user(from,
page_address(page) + off,
copy, 0, &err);
if (err)
return err;
skb->csum = csum_block_add(skb->csum, csum, skb->len);
} else if (copy_from_user(page_address(page) + off, from, copy))
return -EFAULT;
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
sk->sk_wmem_queued += copy;
sk->sk_forward_alloc -= copy;
return 0;
}
/*
* Queue a received datagram if it will fit. Stream and sequenced
* protocols can't normally use this as they need to fit buffers in
......
......@@ -33,6 +33,7 @@
#include <net/checksum.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/ip.h>
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
#include <linux/ipv6.h>
#endif
......
......@@ -1196,6 +1196,13 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
int ret;
unsigned long flags;
/*
* Make sure legacy kernel users don't send in bad values
* (normal paths check this in check_kill_permission).
*/
if (sig < 0 || sig > _NSIG)
return -EINVAL;
/*
* We need the tasklist lock even for the specific
* thread case (when we don't need to follow the group
......
......@@ -1022,7 +1022,7 @@ void __init numa_policy_init(void)
/* Reset policy of current process to default.
* Assumes fs == KERNEL_DS */
void __init numa_default_policy(void)
void numa_default_policy(void)
{
sys_set_mempolicy(MPOL_DEFAULT, NULL, 0);
}
......@@ -1263,6 +1263,81 @@ void skb_add_mtu(int mtu)
}
#endif
static void inline skb_split_inside_header(struct sk_buff *skb,
struct sk_buff* skb1,
const u32 len, const int pos)
{
int i;
memcpy(skb_put(skb1, pos - len), skb->data + len, pos - len);
/* And move data appendix as is. */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
skb_shinfo(skb)->nr_frags = 0;
skb1->data_len = skb->data_len;
skb1->len += skb1->data_len;
skb->data_len = 0;
skb->len = len;
skb->tail = skb->data + len;
}
static void inline skb_split_no_header(struct sk_buff *skb,
struct sk_buff* skb1,
const u32 len, int pos)
{
int i, k = 0;
const int nfrags = skb_shinfo(skb)->nr_frags;
skb_shinfo(skb)->nr_frags = 0;
skb1->len = skb1->data_len = skb->len - len;
skb->len = len;
skb->data_len = len - pos;
for (i = 0; i < nfrags; i++) {
int size = skb_shinfo(skb)->frags[i].size;
if (pos + size > len) {
skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
if (pos < len) {
/* Split frag.
* We have to variants in this case:
* 1. Move all the frag to the second
* part, if it is possible. F.e.
* this approach is mandatory for TUX,
* where splitting is expensive.
* 2. Split is accurately. We make this.
*/
get_page(skb_shinfo(skb)->frags[i].page);
skb_shinfo(skb1)->frags[0].page_offset += len - pos;
skb_shinfo(skb1)->frags[0].size -= len - pos;
skb_shinfo(skb)->frags[i].size = len - pos;
skb_shinfo(skb)->nr_frags++;
}
k++;
} else
skb_shinfo(skb)->nr_frags++;
pos += size;
}
skb_shinfo(skb1)->nr_frags = k;
}
/**
* skb_split - Split fragmented skb to two parts at length len.
*/
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
{
int pos = skb_headlen(skb);
if (len < pos) /* Split line is inside header. */
skb_split_inside_header(skb, skb1, len, pos);
else /* Second chunk has no header, nothing to copy. */
skb_split_no_header(skb, skb1, len, pos);
}
void __init skb_init(void)
{
skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
......@@ -1300,3 +1375,4 @@ EXPORT_SYMBOL(skb_queue_head);
EXPORT_SYMBOL(skb_queue_tail);
EXPORT_SYMBOL(skb_unlink);
EXPORT_SYMBOL(skb_append);
EXPORT_SYMBOL(skb_split);
......@@ -702,17 +702,6 @@ ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk
return 0;
}
static inline int
skb_can_coalesce(struct sk_buff *skb, int i, struct page *page, int off)
{
if (i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
return page == frag->page &&
off == frag->page_offset+frag->size;
}
return 0;
}
static inline unsigned int
csum_page(struct page *page, int offset, int copy)
{
......
......@@ -28,6 +28,7 @@
#include <linux/proc_fs.h>
#include <linux/vmalloc.h>
#include <net/checksum.h>
#include <net/ip.h>
#include <linux/stddef.h>
#include <linux/sysctl.h>
#include <linux/slab.h>
......
......@@ -24,6 +24,7 @@
#include <linux/sysctl.h>
#endif
#include <net/checksum.h>
#include <net/ip.h>
#define ASSERT_READ_LOCK(x) MUST_BE_READ_LOCKED(&ip_conntrack_lock)
#define ASSERT_WRITE_LOCK(x) MUST_BE_WRITE_LOCKED(&ip_conntrack_lock)
......
......@@ -22,6 +22,7 @@
#include <linux/udp.h>
#include <linux/tcp.h>
#include <net/checksum.h>
#include <net/ip.h>
#include <linux/timer.h>
#include <linux/netdevice.h>
#include <linux/if.h>
......
......@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <net/ip.h>
#include <net/checksum.h>
#include <linux/spinlock.h>
......
......@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/netfilter.h>
#include <net/protocol.h>
#include <net/ip.h>
#include <net/checksum.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv4/ip_nat_rule.h>
......
......@@ -737,17 +737,6 @@ static int wait_for_tcp_memory(struct sock *sk, long *timeo)
goto out;
}
static inline int can_coalesce(struct sk_buff *skb, int i, struct page *page,
int off)
{
if (i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
return page == frag->page &&
off == frag->page_offset + frag->size;
}
return 0;
}
static inline void fill_page_desc(struct sk_buff *skb, int i,
struct page *page, int off, int size)
{
......@@ -865,7 +854,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
copy = size;
i = skb_shinfo(skb)->nr_frags;
if (can_coalesce(skb, i, page, offset)) {
if (skb_can_coalesce(skb, i, page, offset)) {
skb_shinfo(skb)->frags[i - 1].size += copy;
} else if (i < MAX_SKB_FRAGS) {
get_page(page);
......@@ -948,53 +937,6 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
#define TCP_PAGE(sk) (inet_sk(sk)->sndmsg_page)
#define TCP_OFF(sk) (inet_sk(sk)->sndmsg_off)
static inline int tcp_copy_to_page(struct sock *sk, char __user *from,
struct sk_buff *skb, struct page *page,
int off, int copy)
{
int err = 0;
unsigned int csum;
if (skb->ip_summed == CHECKSUM_NONE) {
csum = csum_and_copy_from_user(from, page_address(page) + off,
copy, 0, &err);
if (err) return err;
skb->csum = csum_block_add(skb->csum, csum, skb->len);
} else {
if (copy_from_user(page_address(page) + off, from, copy))
return -EFAULT;
}
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
sk->sk_wmem_queued += copy;
sk->sk_forward_alloc -= copy;
return 0;
}
static inline int skb_add_data(struct sk_buff *skb, char __user *from, int copy)
{
int err = 0;
unsigned int csum;
int off = skb->len;
if (skb->ip_summed == CHECKSUM_NONE) {
csum = csum_and_copy_from_user(from, skb_put(skb, copy),
copy, 0, &err);
if (!err) {
skb->csum = csum_block_add(skb->csum, csum, off);
return 0;
}
} else {
if (!copy_from_user(skb_put(skb, copy), from, copy))
return 0;
}
__skb_trim(skb, off);
return -EFAULT;
}
static inline int select_size(struct sock *sk, struct tcp_opt *tp)
{
int tmp = tp->mss_cache_std;
......@@ -1100,7 +1042,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
struct page *page = TCP_PAGE(sk);
int off = TCP_OFF(sk);
if (can_coalesce(skb, i, page, off) &&
if (skb_can_coalesce(skb, i, page, off) &&
off != PAGE_SIZE) {
/* We can extend the last page
* fragment. */
......@@ -1138,7 +1080,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
/* Time to copy data. We are close to
* the end! */
err = tcp_copy_to_page(sk, from, skb, page,
err = skb_copy_to_page(sk, from, skb, page,
off, copy);
if (err) {
/* If this page was new, give it to the
......
......@@ -354,70 +354,6 @@ void tcp_push_one(struct sock *sk, unsigned cur_mss)
}
}
/* Split fragmented skb to two parts at length len. */
static void skb_split(struct sk_buff *skb, struct sk_buff *skb1, u32 len)
{
int i;
int pos = skb_headlen(skb);
if (len < pos) {
/* Split line is inside header. */
memcpy(skb_put(skb1, pos-len), skb->data + len, pos-len);
/* And move data appendix as is. */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
skb_shinfo(skb)->nr_frags = 0;
skb1->data_len = skb->data_len;
skb1->len += skb1->data_len;
skb->data_len = 0;
skb->len = len;
skb->tail = skb->data+len;
} else {
int k = 0;
int nfrags = skb_shinfo(skb)->nr_frags;
/* Second chunk has no header, nothing to copy. */
skb_shinfo(skb)->nr_frags = 0;
skb1->len = skb1->data_len = skb->len - len;
skb->len = len;
skb->data_len = len - pos;
for (i=0; i<nfrags; i++) {
int size = skb_shinfo(skb)->frags[i].size;
if (pos + size > len) {
skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
if (pos < len) {
/* Split frag.
* We have to variants in this case:
* 1. Move all the frag to the second
* part, if it is possible. F.e.
* this approach is mandatory for TUX,
* where splitting is expensive.
* 2. Split is accurately. We make this.
*/
get_page(skb_shinfo(skb)->frags[i].page);
skb_shinfo(skb1)->frags[0].page_offset += (len-pos);
skb_shinfo(skb1)->frags[0].size -= (len-pos);
skb_shinfo(skb)->frags[i].size = len-pos;
skb_shinfo(skb)->nr_frags++;
}
k++;
} else {
skb_shinfo(skb)->nr_frags++;
}
pos += size;
}
skb_shinfo(skb1)->nr_frags = k;
}
}
/* Function to create two new TCP segments. Shrinks the given segment
* to the specified size and appends a new segment with the rest of the
* packet to the list. This won't be called frequently, I hope.
......
......@@ -55,7 +55,7 @@
#include <net/sock.h>
#include <net/ipv6.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <net/protocol.h>
#include <net/raw.h>
#include <net/rawv6.h>
......
......@@ -60,7 +60,7 @@
#include <net/addrconf.h>
#include <net/ip6_route.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
/* Set to 3 to get tracing... */
#define MCAST_DEBUG 2
......
......@@ -77,7 +77,7 @@
#include <net/icmp.h>
#include <net/flow.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/proc_fs.h>
#include <linux/netfilter.h>
......
......@@ -35,6 +35,7 @@
#include <asm/uaccess.h>
#include <asm/ioctls.h>
#include <net/ip.h>
#include <net/sock.h>
#include <net/snmp.h>
......@@ -42,6 +43,7 @@
#include <net/ndisc.h>
#include <net/protocol.h>
#include <net/ip6_route.h>
#include <net/ip6_checksum.h>
#include <net/addrconf.h>
#include <net/transp_v6.h>
#include <net/udp.h>
......
......@@ -51,6 +51,7 @@
#include <net/transp_v6.h>
#include <net/addrconf.h>
#include <net/ip6_route.h>
#include <net/ip6_checksum.h>
#include <net/inet_ecn.h>
#include <net/protocol.h>
#include <net/xfrm.h>
......
......@@ -51,7 +51,7 @@
#include <net/udp.h>
#include <net/inet_common.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include <linux/proc_fs.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment