Commit 31d714f2 authored by Linus Torvalds's avatar Linus Torvalds

Merge http://gkernel.bkbits.net/net-drivers-2.5

into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
parents 985514c9 f31667a3
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/pci.h> #include <asm/pci.h>
#include <asm/hwrpb.h> #include <asm/hwrpb.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#define __EXTERN_INLINE inline #define __EXTERN_INLINE inline
#include <asm/io.h> #include <asm/io.h>
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/core_cia.h> #include <asm/core_cia.h>
#include <asm/tlbflush.h>
#include "proto.h" #include "proto.h"
#include "irq_impl.h" #include "irq_impl.h"
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <asm/core_apecs.h> #include <asm/core_apecs.h>
#include <asm/core_cia.h> #include <asm/core_cia.h>
#include <asm/core_lca.h> #include <asm/core_lca.h>
#include <asm/tlbflush.h>
#include "proto.h" #include "proto.h"
#include "irq_impl.h" #include "irq_impl.h"
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/core_tsunami.h> #include <asm/core_tsunami.h>
#include <asm/hwrpb.h> #include <asm/hwrpb.h>
#include <asm/tlbflush.h>
#include "proto.h" #include "proto.h"
#include "irq_impl.h" #include "irq_impl.h"
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <asm/core_apecs.h> #include <asm/core_apecs.h>
#include <asm/core_lca.h> #include <asm/core_lca.h>
#include <asm/hwrpb.h> #include <asm/hwrpb.h>
#include <asm/tlbflush.h>
#include "proto.h" #include "proto.h"
#include "irq_impl.h" #include "irq_impl.h"
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/core_tsunami.h> #include <asm/core_tsunami.h>
#include <asm/hwrpb.h> #include <asm/hwrpb.h>
#include <asm/tlbflush.h>
#include "proto.h" #include "proto.h"
#include "irq_impl.h" #include "irq_impl.h"
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include "proto.h" #include "proto.h"
#include "irq_impl.h" #include "irq_impl.h"
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/core_cia.h> #include <asm/core_cia.h>
#include <asm/tlbflush.h>
#include "proto.h" #include "proto.h"
#include "irq_impl.h" #include "irq_impl.h"
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/core_apecs.h> #include <asm/core_apecs.h>
#include <asm/core_cia.h> #include <asm/core_cia.h>
#include <asm/tlbflush.h>
#include "proto.h" #include "proto.h"
#include "irq_impl.h" #include "irq_impl.h"
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/core_irongate.h> #include <asm/core_irongate.h>
#include <asm/hwrpb.h> #include <asm/hwrpb.h>
#include <asm/tlbflush.h>
#include "proto.h" #include "proto.h"
#include "irq_impl.h" #include "irq_impl.h"
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/core_apecs.h> #include <asm/core_apecs.h>
#include <asm/core_cia.h> #include <asm/core_cia.h>
#include <asm/tlbflush.h>
#include "proto.h" #include "proto.h"
#include "irq_impl.h" #include "irq_impl.h"
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/core_mcpcia.h> #include <asm/core_mcpcia.h>
#include <asm/tlbflush.h>
#include "proto.h" #include "proto.h"
#include "irq_impl.h" #include "irq_impl.h"
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/core_cia.h> #include <asm/core_cia.h>
#include <asm/tlbflush.h>
#include "proto.h" #include "proto.h"
#include "irq_impl.h" #include "irq_impl.h"
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/core_polaris.h> #include <asm/core_polaris.h>
#include <asm/tlbflush.h>
#include "proto.h" #include "proto.h"
#include "irq_impl.h" #include "irq_impl.h"
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/core_t2.h> #include <asm/core_t2.h>
#include <asm/tlbflush.h>
#include "proto.h" #include "proto.h"
#include "irq_impl.h" #include "irq_impl.h"
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/core_apecs.h> #include <asm/core_apecs.h>
#include <asm/core_lca.h> #include <asm/core_lca.h>
#include <asm/tlbflush.h>
#include "proto.h" #include "proto.h"
#include "irq_impl.h" #include "irq_impl.h"
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/core_cia.h> #include <asm/core_cia.h>
#include <asm/hwrpb.h> #include <asm/hwrpb.h>
#include <asm/tlbflush.h>
#include "proto.h" #include "proto.h"
#include "irq_impl.h" #include "irq_impl.h"
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/core_cia.h> #include <asm/core_cia.h>
#include <asm/tlbflush.h>
#include "proto.h" #include "proto.h"
#include "irq_impl.h" #include "irq_impl.h"
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/core_titan.h> #include <asm/core_titan.h>
#include <asm/hwrpb.h> #include <asm/hwrpb.h>
#include <asm/tlbflush.h>
#include "proto.h" #include "proto.h"
#include "irq_impl.h" #include "irq_impl.h"
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/core_wildfire.h> #include <asm/core_wildfire.h>
#include <asm/hwrpb.h> #include <asm/hwrpb.h>
#include <asm/tlbflush.h>
#include "proto.h" #include "proto.h"
#include "irq_impl.h" #include "irq_impl.h"
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#define __EXTERN_INLINE inline #define __EXTERN_INLINE inline
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/pgalloc.h> #include <asm/tlbflush.h>
#undef __EXTERN_INLINE #undef __EXTERN_INLINE
#include <linux/signal.h> #include <linux/signal.h>
......
...@@ -1313,7 +1313,7 @@ e100_hardware_send_packet(char *buf, int length) ...@@ -1313,7 +1313,7 @@ e100_hardware_send_packet(char *buf, int length)
static void static void
e100_clear_network_leds(unsigned long dummy) e100_clear_network_leds(unsigned long dummy)
{ {
if (led_active && jiffies > time_after(jiffies, led_next_time)) { if (led_active && time_after(jiffies, led_next_time)) {
e100_set_network_leds(NO_NETWORK_ACTIVITY); e100_set_network_leds(NO_NETWORK_ACTIVITY);
/* Set the earliest time we may set the LED */ /* Set the earliest time we may set the LED */
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/io_apic.h> #include <asm/io_apic.h>
#include <asm/tlbflush.h>
#define PREFIX "ACPI: " #define PREFIX "ACPI: "
......
...@@ -12,10 +12,12 @@ ...@@ -12,10 +12,12 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/highmem.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/tlbflush.h>
/* /*
* Known problems: * Known problems:
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <asm/e820.h> #include <asm/e820.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/tlbflush.h>
mmu_gather_t mmu_gathers[NR_CPUS]; mmu_gather_t mmu_gathers[NR_CPUS];
unsigned long highstart_pfn, highend_pfn; unsigned long highstart_pfn, highend_pfn;
...@@ -573,7 +574,8 @@ void si_meminfo(struct sysinfo *val) ...@@ -573,7 +574,8 @@ void si_meminfo(struct sysinfo *val)
} }
#if defined(CONFIG_X86_PAE) #if defined(CONFIG_X86_PAE)
struct kmem_cache_s *pae_pgd_cachep; static struct kmem_cache_s *pae_pgd_cachep;
void __init pgtable_cache_init(void) void __init pgtable_cache_init(void)
{ {
/* /*
...@@ -584,4 +586,96 @@ void __init pgtable_cache_init(void) ...@@ -584,4 +586,96 @@ void __init pgtable_cache_init(void)
if (!pae_pgd_cachep) if (!pae_pgd_cachep)
panic("init_pae(): Cannot alloc pae_pgd SLAB cache"); panic("init_pae(): Cannot alloc pae_pgd SLAB cache");
} }
pgd_t *pgd_alloc(struct mm_struct *mm)
{
int i;
pgd_t *pgd = kmem_cache_alloc(pae_pgd_cachep, GFP_KERNEL);
if (pgd) {
for (i = 0; i < USER_PTRS_PER_PGD; i++) {
unsigned long pmd = __get_free_page(GFP_KERNEL);
if (!pmd)
goto out_oom;
clear_page(pmd);
set_pgd(pgd + i, __pgd(1 + __pa(pmd)));
}
memcpy(pgd + USER_PTRS_PER_PGD,
swapper_pg_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
return pgd;
out_oom:
for (i--; i >= 0; i--)
free_page((unsigned long)__va(pgd_val(pgd[i])-1));
kmem_cache_free(pae_pgd_cachep, pgd);
return NULL;
}
void pgd_free(pgd_t *pgd)
{
int i;
for (i = 0; i < USER_PTRS_PER_PGD; i++)
free_page((unsigned long)__va(pgd_val(pgd[i])-1));
kmem_cache_free(pae_pgd_cachep, pgd);
}
#else
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
if (pgd) {
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
memcpy(pgd + USER_PTRS_PER_PGD,
swapper_pg_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
return pgd;
}
void pgd_free(pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
#endif /* CONFIG_X86_PAE */ #endif /* CONFIG_X86_PAE */
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
int count = 0;
pte_t *pte;
do {
pte = (pte_t *) __get_free_page(GFP_KERNEL);
if (pte)
clear_page(pte);
else {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
return pte;
}
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
int count = 0;
struct page *pte;
do {
#if CONFIG_HIGHPTE
pte = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, 0);
#else
pte = alloc_pages(GFP_KERNEL, 0);
#endif
if (pte)
clear_highpage(pte);
else {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
return pte;
}
...@@ -12,6 +12,9 @@ ...@@ -12,6 +12,9 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags) unsigned long phys_addr, unsigned long flags)
......
This diff is collapsed.
...@@ -713,8 +713,6 @@ static void irtty_write_wakeup(struct tty_struct *tty) ...@@ -713,8 +713,6 @@ static void irtty_write_wakeup(struct tty_struct *tty)
self->tx_buff.data += actual; self->tx_buff.data += actual;
self->tx_buff.len -= actual; self->tx_buff.len -= actual;
self->stats.tx_packets++;
} else { } else {
/* /*
* Now serial buffer is almost free & we can start * Now serial buffer is almost free & we can start
...@@ -722,6 +720,8 @@ static void irtty_write_wakeup(struct tty_struct *tty) ...@@ -722,6 +720,8 @@ static void irtty_write_wakeup(struct tty_struct *tty)
*/ */
IRDA_DEBUG(5, __FUNCTION__ "(), finished with frame!\n"); IRDA_DEBUG(5, __FUNCTION__ "(), finished with frame!\n");
self->stats.tx_packets++;
tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
if (self->new_speed) { if (self->new_speed) {
......
...@@ -88,10 +88,14 @@ static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info); ...@@ -88,10 +88,14 @@ static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info);
/* These are the known NSC chips */ /* These are the known NSC chips */
static nsc_chip_t chips[] = { static nsc_chip_t chips[] = {
/* Name, {cfg registers}, chip id index reg, chip id expected value, revision mask */
{ "PC87108", { 0x150, 0x398, 0xea }, 0x05, 0x10, 0xf0, { "PC87108", { 0x150, 0x398, 0xea }, 0x05, 0x10, 0xf0,
nsc_ircc_probe_108, nsc_ircc_init_108 }, nsc_ircc_probe_108, nsc_ircc_init_108 },
{ "PC87338", { 0x398, 0x15c, 0x2e }, 0x08, 0xb0, 0xf8, { "PC87338", { 0x398, 0x15c, 0x2e }, 0x08, 0xb0, 0xf8,
nsc_ircc_probe_338, nsc_ircc_init_338 }, nsc_ircc_probe_338, nsc_ircc_init_338 },
/* Contributed by Kevin Thayer - OmniBook 6100 */
{ "PC87338?", { 0x2e, 0x15c, 0x398 }, 0x08, 0x00, 0xf8,
nsc_ircc_probe_338, nsc_ircc_init_338 },
{ NULL } { NULL }
}; };
...@@ -698,6 +702,9 @@ static int nsc_ircc_setup(chipio_t *info) ...@@ -698,6 +702,9 @@ static int nsc_ircc_setup(chipio_t *info)
switch_bank(iobase, BANK3); switch_bank(iobase, BANK3);
version = inb(iobase+MID); version = inb(iobase+MID);
IRDA_DEBUG(2, __FUNCTION__ "() Driver %s Found chip version %02x\n",
driver_name, version);
/* Should be 0x2? */ /* Should be 0x2? */
if (0x20 != (version & 0xf0)) { if (0x20 != (version & 0xf0)) {
ERROR("%s, Wrong chip version %02x\n", driver_name, version); ERROR("%s, Wrong chip version %02x\n", driver_name, version);
......
...@@ -612,7 +612,7 @@ static void w83977af_dma_write(struct w83977af_ir *self, int iobase) ...@@ -612,7 +612,7 @@ static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
disable_dma(self->io.dma); disable_dma(self->io.dma);
clear_dma_ff(self->io.dma); clear_dma_ff(self->io.dma);
set_dma_mode(self->io.dma, DMA_MODE_READ); set_dma_mode(self->io.dma, DMA_MODE_READ);
set_dma_addr(self->io.dma, virt_to_bus(self->tx_buff.data)); set_dma_addr(self->io.dma, isa_virt_to_bus(self->tx_buff.data));
set_dma_count(self->io.dma, self->tx_buff.len); set_dma_count(self->io.dma, self->tx_buff.len);
#else #else
setup_dma(self->io.dma, self->tx_buff.data, self->tx_buff.len, setup_dma(self->io.dma, self->tx_buff.data, self->tx_buff.len,
...@@ -770,7 +770,7 @@ int w83977af_dma_receive(struct w83977af_ir *self) ...@@ -770,7 +770,7 @@ int w83977af_dma_receive(struct w83977af_ir *self)
disable_dma(self->io.dma); disable_dma(self->io.dma);
clear_dma_ff(self->io.dma); clear_dma_ff(self->io.dma);
set_dma_mode(self->io.dma, DMA_MODE_READ); set_dma_mode(self->io.dma, DMA_MODE_READ);
set_dma_addr(self->io.dma, virt_to_bus(self->rx_buff.data)); set_dma_addr(self->io.dma, isa_virt_to_bus(self->rx_buff.data));
set_dma_count(self->io.dma, self->rx_buff.truesize); set_dma_count(self->io.dma, self->rx_buff.truesize);
#else #else
setup_dma(self->io.dma, self->rx_buff.data, self->rx_buff.truesize, setup_dma(self->io.dma, self->rx_buff.data, self->rx_buff.truesize,
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/cacheflush.h>
static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs); static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs);
static int load_aout_library(struct file*); static int load_aout_library(struct file*);
......
...@@ -10,7 +10,6 @@ struct ext2_inode_info { ...@@ -10,7 +10,6 @@ struct ext2_inode_info {
__u32 i_faddr; __u32 i_faddr;
__u8 i_frag_no; __u8 i_frag_no;
__u8 i_frag_size; __u8 i_frag_size;
__u16 i_osync;
__u32 i_file_acl; __u32 i_file_acl;
__u32 i_dir_acl; __u32 i_dir_acl;
__u32 i_dtime; __u32 i_dtime;
......
...@@ -393,7 +393,6 @@ struct inode * ext2_new_inode(struct inode * dir, int mode) ...@@ -393,7 +393,6 @@ struct inode * ext2_new_inode(struct inode * dir, int mode)
ei->i_faddr = 0; ei->i_faddr = 0;
ei->i_frag_no = 0; ei->i_frag_no = 0;
ei->i_frag_size = 0; ei->i_frag_size = 0;
ei->i_osync = 0;
ei->i_file_acl = 0; ei->i_file_acl = 0;
ei->i_dir_acl = 0; ei->i_dir_acl = 0;
ei->i_dtime = 0; ei->i_dtime = 0;
......
...@@ -405,10 +405,10 @@ static int ext2_alloc_branch(struct inode *inode, ...@@ -405,10 +405,10 @@ static int ext2_alloc_branch(struct inode *inode,
mark_buffer_uptodate(bh, 1); mark_buffer_uptodate(bh, 1);
unlock_buffer(bh); unlock_buffer(bh);
mark_buffer_dirty_inode(bh, inode); mark_buffer_dirty_inode(bh, inode);
if (IS_SYNC(inode) || EXT2_I(inode)->i_osync) { /* We used to sync bh here if IS_SYNC(inode).
ll_rw_block (WRITE, 1, &bh); * But we now rely upon generic_osync_inode()
wait_on_buffer (bh); * and b_inode_buffers
} */
parent = nr; parent = nr;
} }
if (n == num) if (n == num)
...@@ -467,18 +467,10 @@ static inline int ext2_splice_branch(struct inode *inode, ...@@ -467,18 +467,10 @@ static inline int ext2_splice_branch(struct inode *inode,
inode->i_ctime = CURRENT_TIME; inode->i_ctime = CURRENT_TIME;
/* had we spliced it onto indirect block? */ /* had we spliced it onto indirect block? */
if (where->bh) { if (where->bh)
mark_buffer_dirty_inode(where->bh, inode); mark_buffer_dirty_inode(where->bh, inode);
if (IS_SYNC(inode) || ei->i_osync) {
ll_rw_block (WRITE, 1, &where->bh);
wait_on_buffer(where->bh);
}
}
if (IS_SYNC(inode) || ei->i_osync) mark_inode_dirty(inode);
ext2_sync_inode (inode);
else
mark_inode_dirty(inode);
return 0; return 0;
changed: changed:
...@@ -833,10 +825,6 @@ void ext2_truncate (struct inode * inode) ...@@ -833,10 +825,6 @@ void ext2_truncate (struct inode * inode)
(u32*)partial->bh->b_data + addr_per_block, (u32*)partial->bh->b_data + addr_per_block,
(chain+n-1) - partial); (chain+n-1) - partial);
mark_buffer_dirty_inode(partial->bh, inode); mark_buffer_dirty_inode(partial->bh, inode);
if (IS_SYNC(inode)) {
ll_rw_block (WRITE, 1, &partial->bh);
wait_on_buffer (partial->bh);
}
brelse (partial->bh); brelse (partial->bh);
partial--; partial--;
} }
...@@ -868,10 +856,12 @@ void ext2_truncate (struct inode * inode) ...@@ -868,10 +856,12 @@ void ext2_truncate (struct inode * inode)
; ;
} }
inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_mtime = inode->i_ctime = CURRENT_TIME;
if (IS_SYNC(inode)) if (IS_SYNC(inode)) {
fsync_inode_buffers(inode);
ext2_sync_inode (inode); ext2_sync_inode (inode);
else } else {
mark_inode_dirty(inode); mark_inode_dirty(inode);
}
} }
static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino, static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino,
...@@ -958,7 +948,6 @@ void ext2_read_inode (struct inode * inode) ...@@ -958,7 +948,6 @@ void ext2_read_inode (struct inode * inode)
ei->i_faddr = le32_to_cpu(raw_inode->i_faddr); ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
ei->i_frag_no = raw_inode->i_frag; ei->i_frag_no = raw_inode->i_frag;
ei->i_frag_size = raw_inode->i_fsize; ei->i_frag_size = raw_inode->i_fsize;
ei->i_osync = 0;
ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
ei->i_dir_acl = 0; ei->i_dir_acl = 0;
if (S_ISREG(inode->i_mode)) if (S_ISREG(inode->i_mode))
......
...@@ -542,6 +542,7 @@ int ext3_new_block (handle_t *handle, struct inode * inode, ...@@ -542,6 +542,7 @@ int ext3_new_block (handle_t *handle, struct inode * inode,
int i, j, k, tmp, alloctmp; int i, j, k, tmp, alloctmp;
int bitmap_nr; int bitmap_nr;
int fatal = 0, err; int fatal = 0, err;
int performed_allocation = 0;
struct super_block * sb; struct super_block * sb;
struct ext3_group_desc * gdp; struct ext3_group_desc * gdp;
struct ext3_super_block * es; struct ext3_super_block * es;
...@@ -644,8 +645,7 @@ int ext3_new_block (handle_t *handle, struct inode * inode, ...@@ -644,8 +645,7 @@ int ext3_new_block (handle_t *handle, struct inode * inode,
} }
/* No space left on the device */ /* No space left on the device */
unlock_super (sb); goto out;
return 0;
search_back: search_back:
/* /*
...@@ -694,6 +694,7 @@ int ext3_new_block (handle_t *handle, struct inode * inode, ...@@ -694,6 +694,7 @@ int ext3_new_block (handle_t *handle, struct inode * inode,
J_ASSERT_BH(bh, !ext3_test_bit(j, bh->b_data)); J_ASSERT_BH(bh, !ext3_test_bit(j, bh->b_data));
BUFFER_TRACE(bh, "setting bitmap bit"); BUFFER_TRACE(bh, "setting bitmap bit");
ext3_set_bit(j, bh->b_data); ext3_set_bit(j, bh->b_data);
performed_allocation = 1;
#ifdef CONFIG_JBD_DEBUG #ifdef CONFIG_JBD_DEBUG
{ {
...@@ -815,6 +816,11 @@ int ext3_new_block (handle_t *handle, struct inode * inode, ...@@ -815,6 +816,11 @@ int ext3_new_block (handle_t *handle, struct inode * inode,
ext3_std_error(sb, fatal); ext3_std_error(sb, fatal);
} }
unlock_super (sb); unlock_super (sb);
/*
* Undo the block allocation
*/
if (!performed_allocation)
DQUOT_FREE_BLOCK(inode, 1);
return 0; return 0;
} }
......
...@@ -584,8 +584,6 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode, ...@@ -584,8 +584,6 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
parent = nr; parent = nr;
} }
if (IS_SYNC(inode))
handle->h_sync = 1;
} }
if (n == num) if (n == num)
return 0; return 0;
......
...@@ -267,6 +267,7 @@ int kjournald(void *arg) ...@@ -267,6 +267,7 @@ int kjournald(void *arg)
journal->j_task = NULL; journal->j_task = NULL;
wake_up(&journal->j_wait_done_commit); wake_up(&journal->j_wait_done_commit);
jbd_debug(1, "Journal thread exiting.\n"); jbd_debug(1, "Journal thread exiting.\n");
unlock_kernel();
return 0; return 0;
} }
......
#ifndef _ALPHA_CACHEFLUSH_H
#define _ALPHA_CACHEFLUSH_H
#include <linux/config.h>
#include <linux/mm.h>
/* Caches aren't brain-dead on the Alpha. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
/* Note that the following two definitions are _highly_ dependent
on the contexts in which they are used in the kernel. I personally
think it is criminal how loosely defined these macros are. */
/* We need to flush the kernel's icache after loading modules. The
only other use of this macro is in load_aout_interp which is not
used on Alpha.
Note that this definition should *not* be used for userspace
icache flushing. While functional, it is _way_ overkill. The
icache is tagged with ASNs and it suffices to allocate a new ASN
for the process. */
#ifndef CONFIG_SMP
#define flush_icache_range(start, end) imb()
#else
#define flush_icache_range(start, end) smp_imb()
extern void smp_imb(void);
#endif
/* We need to flush the userspace icache after setting breakpoints in
ptrace.
Instead of indiscriminately using imb, take advantage of the fact
that icache entries are tagged with the ASN and load a new mm context. */
/* ??? Ought to use this in arch/alpha/kernel/signal.c too. */
#ifndef CONFIG_SMP
extern void __load_new_mm_context(struct mm_struct *);
static inline void
flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
if (vma->vm_flags & VM_EXEC) {
struct mm_struct *mm = vma->vm_mm;
if (current->active_mm == mm)
__load_new_mm_context(mm);
else
mm->context[smp_processor_id()] = 0;
}
}
#else
extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr, int len);
#endif
/* This is used only in do_no_page and do_swap_page. */
#define flush_icache_page(vma, page) \
flush_icache_user_range((vma), (page), 0, 0)
#endif /* _ALPHA_CACHEFLUSH_H */
...@@ -3,228 +3,6 @@ ...@@ -3,228 +3,6 @@
#include <linux/config.h> #include <linux/config.h>
#ifndef __EXTERN_INLINE
#define __EXTERN_INLINE extern inline
#define __MMU_EXTERN_INLINE
#endif
extern void __load_new_mm_context(struct mm_struct *);
/* Caches aren't brain-dead on the Alpha. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
/* Note that the following two definitions are _highly_ dependent
on the contexts in which they are used in the kernel. I personally
think it is criminal how loosely defined these macros are. */
/* We need to flush the kernel's icache after loading modules. The
only other use of this macro is in load_aout_interp which is not
used on Alpha.
Note that this definition should *not* be used for userspace
icache flushing. While functional, it is _way_ overkill. The
icache is tagged with ASNs and it suffices to allocate a new ASN
for the process. */
#ifndef CONFIG_SMP
#define flush_icache_range(start, end) imb()
#else
#define flush_icache_range(start, end) smp_imb()
extern void smp_imb(void);
#endif
/*
* Use a few helper functions to hide the ugly broken ASN
* numbers on early Alphas (ev4 and ev45)
*/
__EXTERN_INLINE void
ev4_flush_tlb_current(struct mm_struct *mm)
{
__load_new_mm_context(mm);
tbiap();
}
__EXTERN_INLINE void
ev5_flush_tlb_current(struct mm_struct *mm)
{
__load_new_mm_context(mm);
}
static inline void
flush_tlb_other(struct mm_struct *mm)
{
long * mmc = &mm->context[smp_processor_id()];
/*
* Check it's not zero first to avoid cacheline ping pong when
* possible.
*/
if (*mmc)
*mmc = 0;
}
/* We need to flush the userspace icache after setting breakpoints in
ptrace.
Instead of indiscriminately using imb, take advantage of the fact
that icache entries are tagged with the ASN and load a new mm context. */
/* ??? Ought to use this in arch/alpha/kernel/signal.c too. */
#ifndef CONFIG_SMP
static inline void
flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
if (vma->vm_flags & VM_EXEC) {
struct mm_struct *mm = vma->vm_mm;
if (current->active_mm == mm)
__load_new_mm_context(mm);
else
mm->context[smp_processor_id()] = 0;
}
}
#else
extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr, int len);
#endif
/* this is used only in do_no_page and do_swap_page */
#define flush_icache_page(vma, page) flush_icache_user_range((vma), (page), 0, 0)
/*
* Flush just one page in the current TLB set.
* We need to be very careful about the icache here, there
* is no way to invalidate a specific icache page..
*/
__EXTERN_INLINE void
ev4_flush_tlb_current_page(struct mm_struct * mm,
struct vm_area_struct *vma,
unsigned long addr)
{
int tbi_flag = 2;
if (vma->vm_flags & VM_EXEC) {
__load_new_mm_context(mm);
tbi_flag = 3;
}
tbi(tbi_flag, addr);
}
__EXTERN_INLINE void
ev5_flush_tlb_current_page(struct mm_struct * mm,
struct vm_area_struct *vma,
unsigned long addr)
{
if (vma->vm_flags & VM_EXEC)
__load_new_mm_context(mm);
else
tbi(2, addr);
}
#ifdef CONFIG_ALPHA_GENERIC
# define flush_tlb_current alpha_mv.mv_flush_tlb_current
# define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
#else
# ifdef CONFIG_ALPHA_EV4
# define flush_tlb_current ev4_flush_tlb_current
# define flush_tlb_current_page ev4_flush_tlb_current_page
# else
# define flush_tlb_current ev5_flush_tlb_current
# define flush_tlb_current_page ev5_flush_tlb_current_page
# endif
#endif
#ifdef __MMU_EXTERN_INLINE
#undef __EXTERN_INLINE
#undef __MMU_EXTERN_INLINE
#endif
/*
* Flush current user mapping.
*/
static inline void flush_tlb(void)
{
flush_tlb_current(current->active_mm);
}
/*
* Flush a specified range of user mapping page tables
* from TLB.
* Although Alpha uses VPTE caches, this can be a nop, as Alpha does
* not have finegrained tlb flushing, so it will flush VPTE stuff
* during next flush_tlb_range.
*/
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
#ifndef CONFIG_SMP
/*
* Flush everything (kernel mapping may also have
* changed due to vmalloc/vfree)
*/
static inline void flush_tlb_all(void)
{
tbia();
}
/*
* Flush a specified user mapping
*/
static inline void flush_tlb_mm(struct mm_struct *mm)
{
if (mm == current->active_mm)
flush_tlb_current(mm);
else
flush_tlb_other(mm);
}
/*
* Page-granular tlb flush.
*
* do a tbisd (type = 2) normally, and a tbis (type = 3)
* if it is an executable mapping. We want to avoid the
* itlb flush, because that potentially also does a
* icache flush.
*/
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
struct mm_struct * mm = vma->vm_mm;
if (mm == current->active_mm)
flush_tlb_current_page(mm, vma, addr);
else
flush_tlb_other(mm);
}
/*
* Flush a specified range of user mapping: on the
* Alpha we flush the whole user tlb.
*/
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
flush_tlb_mm(vma->vm_mm);
}
#else /* CONFIG_SMP */
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *);
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
extern void flush_tlb_range(struct vm_area_struct *, unsigned long, unsigned long);
#endif /* CONFIG_SMP */
/* /*
* Allocate and free page tables. The xxx_kernel() versions are * Allocate and free page tables. The xxx_kernel() versions are
* used to allocate a kernel page table - this turns on ASN bits * used to allocate a kernel page table - this turns on ASN bits
...@@ -292,4 +70,6 @@ pte_free(struct page *page) ...@@ -292,4 +70,6 @@ pte_free(struct page *page)
__free_page(page); __free_page(page);
} }
#define check_pgt_cache() do { } while (0)
#endif /* _ALPHA_PGALLOC_H */ #endif /* _ALPHA_PGALLOC_H */
#ifndef _ALPHA_TLBFLUSH_H
#define _ALPHA_TLBFLUSH_H
#include <linux/config.h>
#include <linux/mm.h>
#ifndef __EXTERN_INLINE
#define __EXTERN_INLINE extern inline
#define __MMU_EXTERN_INLINE
#endif
extern void __load_new_mm_context(struct mm_struct *);
/* Use a few helper functions to hide the ugly broken ASN
numbers on early Alphas (ev4 and ev45). */
__EXTERN_INLINE void
ev4_flush_tlb_current(struct mm_struct *mm)
{
__load_new_mm_context(mm);
tbiap();
}
__EXTERN_INLINE void
ev5_flush_tlb_current(struct mm_struct *mm)
{
__load_new_mm_context(mm);
}
/* Flush just one page in the current TLB set. We need to be very
careful about the icache here, there is no way to invalidate a
specific icache page. */
__EXTERN_INLINE void
ev4_flush_tlb_current_page(struct mm_struct * mm,
struct vm_area_struct *vma,
unsigned long addr)
{
int tbi_flag = 2;
if (vma->vm_flags & VM_EXEC) {
__load_new_mm_context(mm);
tbi_flag = 3;
}
tbi(tbi_flag, addr);
}
__EXTERN_INLINE void
ev5_flush_tlb_current_page(struct mm_struct * mm,
struct vm_area_struct *vma,
unsigned long addr)
{
if (vma->vm_flags & VM_EXEC)
__load_new_mm_context(mm);
else
tbi(2, addr);
}
#ifdef CONFIG_ALPHA_GENERIC
# define flush_tlb_current alpha_mv.mv_flush_tlb_current
# define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
#else
# ifdef CONFIG_ALPHA_EV4
# define flush_tlb_current ev4_flush_tlb_current
# define flush_tlb_current_page ev4_flush_tlb_current_page
# else
# define flush_tlb_current ev5_flush_tlb_current
# define flush_tlb_current_page ev5_flush_tlb_current_page
# endif
#endif
#ifdef __MMU_EXTERN_INLINE
#undef __EXTERN_INLINE
#undef __MMU_EXTERN_INLINE
#endif
/* Flush current user mapping. */
static inline void
flush_tlb(void)
{
flush_tlb_current(current->active_mm);
}
/* Flush someone else's user mapping. */
static inline void
flush_tlb_other(struct mm_struct *mm)
{
long *mmc = &mm->context[smp_processor_id()];
/* Check it's not zero first to avoid cacheline ping pong
when possible. */
if (*mmc) *mmc = 0;
}
/* Flush a specified range of user mapping page tables from TLB.
Although Alpha uses VPTE caches, this can be a nop, as Alpha does
not have finegrained tlb flushing, so it will flush VPTE stuff
during next flush_tlb_range. */
static inline void
flush_tlb_pgtables(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
}
#ifndef CONFIG_SMP
/* Flush everything (kernel mapping may also have changed
due to vmalloc/vfree). */
static inline void flush_tlb_all(void)
{
tbia();
}
/* Flush a specified user mapping. */
static inline void
flush_tlb_mm(struct mm_struct *mm)
{
if (mm == current->active_mm)
flush_tlb_current(mm);
else
flush_tlb_other(mm);
}
/* Page-granular tlb flush. */
static inline void
flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
struct mm_struct *mm = vma->vm_mm;
if (mm == current->active_mm)
flush_tlb_current_page(mm, vma, addr);
else
flush_tlb_other(mm);
}
/* Flush a specified range of user mapping. On the Alpha we flush
the whole user tlb. */
static inline void
flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
flush_tlb_mm(vma->vm_mm);
}
#else /* CONFIG_SMP */
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *);
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
unsigned long);
#endif /* CONFIG_SMP */
#endif /* _ALPHA_TLBFLUSH_H */
#ifndef _I386_CACHEFLUSH_H
#define _I386_CACHEFLUSH_H
/* Keep includes the same across arches. */
#include <linux/mm.h>
/* Caches aren't brain-dead on the intel. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#endif /* _I386_CACHEFLUSH_H */
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/highmem.h>
#define pmd_populate_kernel(mm, pmd, pte) \ #define pmd_populate_kernel(mm, pmd, pte) \
set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))) set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
...@@ -20,109 +19,11 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p ...@@ -20,109 +19,11 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p
* Allocate and free page tables. * Allocate and free page tables.
*/ */
#if defined (CONFIG_X86_PAE) extern pgd_t *pgd_alloc(struct mm_struct *);
/* extern void pgd_free(pgd_t *pgd);
* We can't include <linux/slab.h> here, thus these uglinesses.
*/
struct kmem_cache_s;
extern struct kmem_cache_s *pae_pgd_cachep;
extern void *kmem_cache_alloc(struct kmem_cache_s *, int);
extern void kmem_cache_free(struct kmem_cache_s *, void *);
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
int i;
pgd_t *pgd = kmem_cache_alloc(pae_pgd_cachep, GFP_KERNEL);
if (pgd) {
for (i = 0; i < USER_PTRS_PER_PGD; i++) {
unsigned long pmd = __get_free_page(GFP_KERNEL);
if (!pmd)
goto out_oom;
clear_page(pmd);
set_pgd(pgd + i, __pgd(1 + __pa(pmd)));
}
memcpy(pgd + USER_PTRS_PER_PGD,
swapper_pg_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
return pgd;
out_oom:
for (i--; i >= 0; i--)
free_page((unsigned long)__va(pgd_val(pgd[i])-1));
kmem_cache_free(pae_pgd_cachep, pgd);
return NULL;
}
#else
static inline pgd_t *pgd_alloc(struct mm_struct *mm) extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
{ extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
if (pgd) {
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
memcpy(pgd + USER_PTRS_PER_PGD,
swapper_pg_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
return pgd;
}
#endif /* CONFIG_X86_PAE */
static inline void pgd_free(pgd_t *pgd)
{
#if defined(CONFIG_X86_PAE)
int i;
for (i = 0; i < USER_PTRS_PER_PGD; i++)
free_page((unsigned long)__va(pgd_val(pgd[i])-1));
kmem_cache_free(pae_pgd_cachep, pgd);
#else
free_page((unsigned long)pgd);
#endif
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
int count = 0;
pte_t *pte;
do {
pte = (pte_t *) __get_free_page(GFP_KERNEL);
if (pte)
clear_page(pte);
else {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
return pte;
}
static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
int count = 0;
struct page *pte;
do {
#if CONFIG_HIGHPTE
pte = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, 0);
#else
pte = alloc_pages(GFP_KERNEL, 0);
#endif
if (pte)
clear_highpage(pte);
else {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
return pte;
}
static inline void pte_free_kernel(pte_t *pte) static inline void pte_free_kernel(pte_t *pte)
{ {
...@@ -144,85 +45,6 @@ static inline void pte_free(struct page *pte) ...@@ -144,85 +45,6 @@ static inline void pte_free(struct page *pte)
#define pmd_free(x) do { } while (0) #define pmd_free(x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG() #define pgd_populate(mm, pmd, pte) BUG()
/*
* TLB flushing:
*
* - flush_tlb() flushes the current mm struct TLBs
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*
* ..but the i386 has somewhat limited tlb flushing capabilities,
* and page-granular flushes are available only on i486 and up.
*/
#ifndef CONFIG_SMP
#define flush_tlb() __flush_tlb()
#define flush_tlb_all() __flush_tlb_all()
#define local_flush_tlb() __flush_tlb()
static inline void flush_tlb_mm(struct mm_struct *mm)
{
if (mm == current->active_mm)
__flush_tlb();
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
if (vma->vm_mm == current->active_mm)
__flush_tlb_one(addr);
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
if (vma->vm_mm == current->active_mm)
__flush_tlb();
}
#else
#include <asm/smp.h>
#define local_flush_tlb() \
__flush_tlb()
extern void flush_tlb_all(void);
extern void flush_tlb_current_task(void);
extern void flush_tlb_mm(struct mm_struct *);
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
#define flush_tlb() flush_tlb_current_task()
static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
{
flush_tlb_mm(vma->vm_mm);
}
#define TLBSTATE_OK 1
#define TLBSTATE_LAZY 2
struct tlb_state
{
struct mm_struct *active_mm;
int state;
char __cacheline_padding[24];
};
extern struct tlb_state cpu_tlbstate[NR_CPUS];
#endif
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
/* i386 does not keep any page table caches in TLB */
}
#define check_pgt_cache() do { } while (0) #define check_pgt_cache() do { } while (0)
#endif /* _I386_PGALLOC_H */ #endif /* _I386_PGALLOC_H */
...@@ -24,71 +24,6 @@ ...@@ -24,71 +24,6 @@
extern pgd_t swapper_pg_dir[1024]; extern pgd_t swapper_pg_dir[1024];
extern void paging_init(void); extern void paging_init(void);
/* Caches aren't brain-dead on the intel. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define __flush_tlb() \
do { \
unsigned int tmpreg; \
\
__asm__ __volatile__( \
"movl %%cr3, %0; # flush TLB \n" \
"movl %0, %%cr3; \n" \
: "=r" (tmpreg) \
:: "memory"); \
} while (0)
/*
* Global pages have to be flushed a bit differently. Not a real
* performance problem because this does not happen often.
*/
#define __flush_tlb_global() \
do { \
unsigned int tmpreg; \
\
__asm__ __volatile__( \
"movl %1, %%cr4; # turn off PGE \n" \
"movl %%cr3, %0; # flush TLB \n" \
"movl %0, %%cr3; \n" \
"movl %2, %%cr4; # turn PGE back on \n" \
: "=&r" (tmpreg) \
: "r" (mmu_cr4_features & ~X86_CR4_PGE), \
"r" (mmu_cr4_features) \
: "memory"); \
} while (0)
extern unsigned long pgkern_mask;
/*
* Do not check the PGE bit unnecesserily if this is a PPro+ kernel.
*/
#ifdef CONFIG_X86_PGE
# define __flush_tlb_all() __flush_tlb_global()
#else
# define __flush_tlb_all() \
do { \
if (cpu_has_pge) \
__flush_tlb_global(); \
else \
__flush_tlb(); \
} while (0)
#endif
#ifndef CONFIG_X86_INVLPG
#define __flush_tlb_one(addr) __flush_tlb()
#else
#define __flush_tlb_one(addr) \
__asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
#endif
/* /*
* ZERO_PAGE is a global shared page that is always zero: used * ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc.. * for zero-mapped memory areas etc..
......
#ifndef _I386_TLBFLUSH_H
#define _I386_TLBFLUSH_H
#include <linux/config.h>
#include <linux/mm.h>
#include <asm/processor.h>
#define __flush_tlb() \
do { \
unsigned int tmpreg; \
\
__asm__ __volatile__( \
"movl %%cr3, %0; # flush TLB \n" \
"movl %0, %%cr3; \n" \
: "=r" (tmpreg) \
:: "memory"); \
} while (0)
/*
* Global pages have to be flushed a bit differently. Not a real
* performance problem because this does not happen often.
*/
#define __flush_tlb_global() \
do { \
unsigned int tmpreg; \
\
__asm__ __volatile__( \
"movl %1, %%cr4; # turn off PGE \n" \
"movl %%cr3, %0; # flush TLB \n" \
"movl %0, %%cr3; \n" \
"movl %2, %%cr4; # turn PGE back on \n" \
: "=&r" (tmpreg) \
: "r" (mmu_cr4_features & ~X86_CR4_PGE), \
"r" (mmu_cr4_features) \
: "memory"); \
} while (0)
extern unsigned long pgkern_mask;
/*
* Do not check the PGE bit unnecesserily if this is a PPro+ kernel.
*/
#ifdef CONFIG_X86_PGE
# define __flush_tlb_all() __flush_tlb_global()
#else
# define __flush_tlb_all() \
do { \
if (cpu_has_pge) \
__flush_tlb_global(); \
else \
__flush_tlb(); \
} while (0)
#endif
#ifndef CONFIG_X86_INVLPG
#define __flush_tlb_one(addr) __flush_tlb()
#else
#define __flush_tlb_one(addr) \
__asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
#endif
/*
* TLB flushing:
*
* - flush_tlb() flushes the current mm struct TLBs
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*
* ..but the i386 has somewhat limited tlb flushing capabilities,
* and page-granular flushes are available only on i486 and up.
*/
#ifndef CONFIG_SMP
#define flush_tlb() __flush_tlb()
#define flush_tlb_all() __flush_tlb_all()
#define local_flush_tlb() __flush_tlb()
static inline void flush_tlb_mm(struct mm_struct *mm)
{
if (mm == current->active_mm)
__flush_tlb();
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
if (vma->vm_mm == current->active_mm)
__flush_tlb_one(addr);
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
if (vma->vm_mm == current->active_mm)
__flush_tlb();
}
#else
#include <asm/smp.h>
#define local_flush_tlb() \
__flush_tlb()
extern void flush_tlb_all(void);
extern void flush_tlb_current_task(void);
extern void flush_tlb_mm(struct mm_struct *);
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
#define flush_tlb() flush_tlb_current_task()
static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
{
flush_tlb_mm(vma->vm_mm);
}
#define TLBSTATE_OK 1
#define TLBSTATE_LAZY 2
struct tlb_state
{
struct mm_struct *active_mm;
int state;
char __cacheline_padding[24];
};
extern struct tlb_state cpu_tlbstate[NR_CPUS];
#endif
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
/* i386 does not keep any page table caches in TLB */
}
#endif /* _I386_TLBFLUSH_H */
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/bio.h> #include <linux/bio.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
......
...@@ -39,6 +39,14 @@ ...@@ -39,6 +39,14 @@
#define DISCOVERY_EXPIRE_TIMEOUT 6*HZ #define DISCOVERY_EXPIRE_TIMEOUT 6*HZ
#define DISCOVERY_DEFAULT_SLOTS 0 #define DISCOVERY_DEFAULT_SLOTS 0
/* Types of discovery */
typedef enum {
DISCOVERY_LOG, /* What's in our discovery log */
DISCOVERY_ACTIVE, /* Doing our own discovery on the medium */
DISCOVERY_PASSIVE, /* Peer doing discovery on the medium */
EXPIRY_TIMEOUT, /* Entry expired due to timeout */
} DISCOVERY_MODE;
#define NICKNAME_MAX_LEN 21 #define NICKNAME_MAX_LEN 21
/* /*
......
...@@ -44,6 +44,11 @@ ...@@ -44,6 +44,11 @@
#define IRCOMM_TTY_MAJOR 161 #define IRCOMM_TTY_MAJOR 161
#define IRCOMM_TTY_MINOR 0 #define IRCOMM_TTY_MINOR 0
/* This is used as an initial value to max_header_size before the proper
* value is filled in (5 for ttp, 4 for lmp). This allow us to detect
* the state of the underlying connection. - Jean II */
#define IRCOMM_TTY_HDR_UNITIALISED 32
/* /*
* IrCOMM TTY driver state * IrCOMM TTY driver state
*/ */
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
#include <net/irda/irlan_event.h> #include <net/irda/irlan_event.h>
void irlan_client_start_kick_timer(struct irlan_cb *self, int timeout); void irlan_client_start_kick_timer(struct irlan_cb *self, int timeout);
void irlan_client_discovery_indication(discovery_t *, void *); void irlan_client_discovery_indication(discovery_t *, DISCOVERY_MODE, void *);
void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr); void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr);
void irlan_client_open_ctrl_tsap( struct irlan_cb *self); void irlan_client_open_ctrl_tsap( struct irlan_cb *self);
......
...@@ -72,7 +72,7 @@ typedef enum { ...@@ -72,7 +72,7 @@ typedef enum {
S_END, S_END,
} SERVICE; } SERVICE;
typedef void (*DISCOVERY_CALLBACK1) (discovery_t *, void *); typedef void (*DISCOVERY_CALLBACK1) (discovery_t *, DISCOVERY_MODE, void *);
typedef void (*DISCOVERY_CALLBACK2) (hashbin_t *, void *); typedef void (*DISCOVERY_CALLBACK2) (hashbin_t *, void *);
typedef struct { typedef struct {
...@@ -214,7 +214,7 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason, ...@@ -214,7 +214,7 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason,
struct sk_buff *userdata); struct sk_buff *userdata);
int irlmp_disconnect_request(struct lsap_cb *, struct sk_buff *userdata); int irlmp_disconnect_request(struct lsap_cb *, struct sk_buff *userdata);
void irlmp_discovery_confirm(hashbin_t *discovery_log); void irlmp_discovery_confirm(hashbin_t *discovery_log, DISCOVERY_MODE);
void irlmp_discovery_request(int nslots); void irlmp_discovery_request(int nslots);
struct irda_device_info *irlmp_get_discoveries(int *pn, __u16 mask, int nslots); struct irda_device_info *irlmp_get_discoveries(int *pn, __u16 mask, int nslots);
void irlmp_do_expiry(void); void irlmp_do_expiry(void);
......
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
static kmem_cache_t *task_struct_cachep; static kmem_cache_t *task_struct_cachep;
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/kmod.h> #include <linux/kmod.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <asm/cacheflush.h>
/* /*
* Originally by Anonymous (as far as I know...) * Originally by Anonymous (as far as I know...)
......
...@@ -843,7 +843,8 @@ asmlinkage void preempt_schedule(void) ...@@ -843,7 +843,8 @@ asmlinkage void preempt_schedule(void)
{ {
if (unlikely(preempt_get_count())) if (unlikely(preempt_get_count()))
return; return;
current->state = TASK_RUNNING; if (current->state != TASK_RUNNING)
return;
schedule(); schedule();
} }
#endif /* CONFIG_PREEMPT */ #endif /* CONFIG_PREEMPT */
......
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/tlbflush.h>
unsigned long max_mapnr; unsigned long max_mapnr;
unsigned long num_physpages; unsigned long num_physpages;
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlbflush.h>
/* /*
* WARNING: the debugging will use recursive algorithms so never enable this * WARNING: the debugging will use recursive algorithms so never enable this
......
...@@ -9,11 +9,13 @@ ...@@ -9,11 +9,13 @@
#include <linux/shm.h> #include <linux/shm.h>
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/highmem.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <linux/highmem.h> #include <asm/cacheflush.h>
#include <asm/tlbflush.h>
static inline void change_pte_range(pmd_t * pmd, unsigned long address, static inline void change_pte_range(pmd_t * pmd, unsigned long address,
unsigned long size, pgprot_t newprot) unsigned long size, pgprot_t newprot)
......
...@@ -11,9 +11,12 @@ ...@@ -11,9 +11,12 @@
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/highmem.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
extern int vm_enough_memory(long pages); extern int vm_enough_memory(long pages);
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlbflush.h>
/* /*
* Called with mm->page_table_lock held to protect against other * Called with mm->page_table_lock held to protect against other
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlbflush.h>
rwlock_t vmlist_lock = RW_LOCK_UNLOCKED; rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
struct vm_struct * vmlist; struct vm_struct * vmlist;
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlbflush.h>
/* /*
* The "priority" of VM scanning is how much of the queues we * The "priority" of VM scanning is how much of the queues we
......
...@@ -415,6 +415,7 @@ static void irda_getvalue_confirm(int result, __u16 obj_id, ...@@ -415,6 +415,7 @@ static void irda_getvalue_confirm(int result, __u16 obj_id,
* hint bits), and then wake up any process waiting for answer... * hint bits), and then wake up any process waiting for answer...
*/ */
static void irda_selective_discovery_indication(discovery_t *discovery, static void irda_selective_discovery_indication(discovery_t *discovery,
DISCOVERY_MODE mode,
void *priv) void *priv)
{ {
struct irda_sock *self; struct irda_sock *self;
...@@ -1286,6 +1287,9 @@ static int irda_sendmsg(struct socket *sock, struct msghdr *msg, int len, ...@@ -1286,6 +1287,9 @@ static int irda_sendmsg(struct socket *sock, struct msghdr *msg, int len,
/* Check if we are still connected */ /* Check if we are still connected */
if (sk->state != TCP_ESTABLISHED) if (sk->state != TCP_ESTABLISHED)
return -ENOTCONN; return -ENOTCONN;
/* Handle signals */
if (signal_pending(current))
return -ERESTARTSYS;
} }
/* Check that we don't send out to big frames */ /* Check that we don't send out to big frames */
......
...@@ -416,7 +416,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) ...@@ -416,7 +416,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
self->line = line; self->line = line;
self->tqueue.routine = ircomm_tty_do_softint; self->tqueue.routine = ircomm_tty_do_softint;
self->tqueue.data = self; self->tqueue.data = self;
self->max_header_size = 5; self->max_header_size = IRCOMM_TTY_HDR_UNITIALISED;
self->max_data_size = 64-self->max_header_size; self->max_data_size = 64-self->max_header_size;
self->close_delay = 5*HZ/10; self->close_delay = 5*HZ/10;
self->closing_wait = 30*HZ; self->closing_wait = 30*HZ;
...@@ -695,6 +695,20 @@ static int ircomm_tty_write(struct tty_struct *tty, int from_user, ...@@ -695,6 +695,20 @@ static int ircomm_tty_write(struct tty_struct *tty, int from_user,
ASSERT(self != NULL, return -1;); ASSERT(self != NULL, return -1;);
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
/* We may receive packets from the TTY even before we have finished
* our setup. Not cool.
* The problem is that we would allocate a skb with bogus header and
* data size, and when adding data to it later we would get
* confused.
* Better to not accept data until we are properly setup. Use bogus
* header size to check that (safest way to detect it).
* Jean II */
if (self->max_header_size == IRCOMM_TTY_HDR_UNITIALISED) {
/* TTY will retry */
IRDA_DEBUG(2, __FUNCTION__ "() : not initialised\n");
return len;
}
save_flags(flags); save_flags(flags);
cli(); cli();
...@@ -791,8 +805,12 @@ static int ircomm_tty_write_room(struct tty_struct *tty) ...@@ -791,8 +805,12 @@ static int ircomm_tty_write_room(struct tty_struct *tty)
ASSERT(self != NULL, return -1;); ASSERT(self != NULL, return -1;);
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
/* Check if we are allowed to transmit any data */ /* Check if we are allowed to transmit any data.
if (tty->hw_stopped) * hw_stopped is the regular flow control.
* max_header_size tells us if the channel is initialised or not.
* Jean II */
if ((tty->hw_stopped) ||
(self->max_header_size == IRCOMM_TTY_HDR_UNITIALISED))
ret = 0; ret = 0;
else { else {
save_flags(flags); save_flags(flags);
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
static void ircomm_tty_ias_register(struct ircomm_tty_cb *self); static void ircomm_tty_ias_register(struct ircomm_tty_cb *self);
static void ircomm_tty_discovery_indication(discovery_t *discovery, static void ircomm_tty_discovery_indication(discovery_t *discovery,
DISCOVERY_MODE mode,
void *priv); void *priv);
static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id, static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id,
struct ias_value *value, void *priv); struct ias_value *value, void *priv);
...@@ -305,6 +306,7 @@ int ircomm_tty_send_initial_parameters(struct ircomm_tty_cb *self) ...@@ -305,6 +306,7 @@ int ircomm_tty_send_initial_parameters(struct ircomm_tty_cb *self)
* *
*/ */
static void ircomm_tty_discovery_indication(discovery_t *discovery, static void ircomm_tty_discovery_indication(discovery_t *discovery,
DISCOVERY_MODE mode,
void *priv) void *priv)
{ {
struct ircomm_tty_cb *self; struct ircomm_tty_cb *self;
...@@ -312,6 +314,20 @@ static void ircomm_tty_discovery_indication(discovery_t *discovery, ...@@ -312,6 +314,20 @@ static void ircomm_tty_discovery_indication(discovery_t *discovery,
IRDA_DEBUG(2, __FUNCTION__"()\n"); IRDA_DEBUG(2, __FUNCTION__"()\n");
/* Important note :
* We need to drop all passive discoveries.
* The LSAP management of IrComm is deficient and doesn't deal
* with the case of two instance connecting to each other
* simultaneously (it will deadlock in LMP).
* The proper fix would be to use the same technique as in IrNET,
* to have one server socket and separate instances for the
* connecting/connected socket.
* The workaround is to drop passive discovery, which drastically
* reduce the probability of this happening.
* Jean II */
if(mode == DISCOVERY_PASSIVE)
return;
info.daddr = discovery->daddr; info.daddr = discovery->daddr;
info.saddr = discovery->saddr; info.saddr = discovery->saddr;
......
...@@ -93,6 +93,9 @@ void ircomm_tty_change_speed(struct ircomm_tty_cb *self) ...@@ -93,6 +93,9 @@ void ircomm_tty_change_speed(struct ircomm_tty_cb *self)
if (cflag & CRTSCTS) { if (cflag & CRTSCTS) {
self->flags |= ASYNC_CTS_FLOW; self->flags |= ASYNC_CTS_FLOW;
self->settings.flow_control |= IRCOMM_RTS_CTS_IN; self->settings.flow_control |= IRCOMM_RTS_CTS_IN;
/* This got me. Bummer. Jean II */
if (self->service_type == IRCOMM_3_WIRE_RAW)
WARNING(__FUNCTION__ "(), enabling RTS/CTS on link that doesn't support it (3-wire-raw)\n");
} else { } else {
self->flags &= ~ASYNC_CTS_FLOW; self->flags &= ~ASYNC_CTS_FLOW;
self->settings.flow_control &= ~IRCOMM_RTS_CTS_IN; self->settings.flow_control &= ~IRCOMM_RTS_CTS_IN;
......
...@@ -145,7 +145,9 @@ void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr) ...@@ -145,7 +145,9 @@ void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr)
* Remote device with IrLAN server support discovered * Remote device with IrLAN server support discovered
* *
*/ */
void irlan_client_discovery_indication(discovery_t *discovery, void *priv) void irlan_client_discovery_indication(discovery_t *discovery,
DISCOVERY_MODE mode,
void *priv)
{ {
struct irlan_cb *self; struct irlan_cb *self;
__u32 saddr, daddr; __u32 saddr, daddr;
...@@ -155,6 +157,15 @@ void irlan_client_discovery_indication(discovery_t *discovery, void *priv) ...@@ -155,6 +157,15 @@ void irlan_client_discovery_indication(discovery_t *discovery, void *priv)
ASSERT(irlan != NULL, return;); ASSERT(irlan != NULL, return;);
ASSERT(discovery != NULL, return;); ASSERT(discovery != NULL, return;);
/*
* I didn't check it, but I bet that IrLAN suffer from the same
* deficiency as IrComm and doesn't handle two instances
* simultaneously connecting to each other.
* Same workaround, drop passive discoveries.
* Jean II */
if(mode == DISCOVERY_PASSIVE)
return;
saddr = discovery->saddr; saddr = discovery->saddr;
daddr = discovery->daddr; daddr = discovery->daddr;
......
...@@ -174,6 +174,12 @@ static void irlap_poll_timer_expired(void *data) ...@@ -174,6 +174,12 @@ static void irlap_poll_timer_expired(void *data)
irlap_do_event(self, POLL_TIMER_EXPIRED, NULL, NULL); irlap_do_event(self, POLL_TIMER_EXPIRED, NULL, NULL);
} }
/*
* Calculate and set time before we will have to send back the pf bit
* to the peer. Use in primary.
* Make sure that state is XMIT_P/XMIT_S when calling this function
* (and that nobody messed up with the state). - Jean II
*/
void irlap_start_poll_timer(struct irlap_cb *self, int timeout) void irlap_start_poll_timer(struct irlap_cb *self, int timeout)
{ {
ASSERT(self != NULL, return;); ASSERT(self != NULL, return;);
...@@ -1163,15 +1169,26 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, ...@@ -1163,15 +1169,26 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
self->ack_required = TRUE; self->ack_required = TRUE;
irlap_wait_min_turn_around(self, &self->qos_tx); irlap_wait_min_turn_around(self, &self->qos_tx);
/*
* Important to switch state before calling
* upper layers
*/
irlap_next_state(self, LAP_XMIT_P);
/* Call higher layer *before* changing state
* to give them a chance to send data in the
* next LAP frame.
* Jean II */
irlap_data_indication(self, skb, FALSE); irlap_data_indication(self, skb, FALSE);
/* This is the last frame */ /* XMIT states are the most dangerous state
* to be in, because user requests are
* processed directly and may change state.
* On the other hand, in NDM_P, those
* requests are queued and we will process
* them when we return to irlap_do_event().
* Jean II
*/
irlap_next_state(self, LAP_XMIT_P);
/* This is the last frame.
* Make sure it's always called in XMIT state.
* - Jean II */
irlap_start_poll_timer(self, self->poll_timeout); irlap_start_poll_timer(self, self->poll_timeout);
} }
break; break;
...@@ -1309,6 +1326,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, ...@@ -1309,6 +1326,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
} else { } else {
del_timer(&self->final_timer); del_timer(&self->final_timer);
irlap_data_indication(self, skb, TRUE); irlap_data_indication(self, skb, TRUE);
irlap_next_state(self, LAP_XMIT_P);
printk(__FUNCTION__ "(): RECV_UI_FRAME: next state %s\n", irlap_state[self->state]); printk(__FUNCTION__ "(): RECV_UI_FRAME: next state %s\n", irlap_state[self->state]);
irlap_start_poll_timer(self, self->poll_timeout); irlap_start_poll_timer(self, self->poll_timeout);
} }
......
...@@ -732,7 +732,9 @@ void irlmp_do_expiry() ...@@ -732,7 +732,9 @@ void irlmp_do_expiry()
* On links which are connected, we can't do discovery * On links which are connected, we can't do discovery
* anymore and can't refresh the log, so we freeze the * anymore and can't refresh the log, so we freeze the
* discovery log to keep info about the device we are * discovery log to keep info about the device we are
* connected to. - Jean II * connected to.
* This info is mandatory if we want irlmp_connect_request()
* to work properly. - Jean II
*/ */
lap = (struct lap_cb *) hashbin_get_first(irlmp->links); lap = (struct lap_cb *) hashbin_get_first(irlmp->links);
while (lap != NULL) { while (lap != NULL) {
...@@ -804,7 +806,7 @@ void irlmp_do_discovery(int nslots) ...@@ -804,7 +806,7 @@ void irlmp_do_discovery(int nslots)
void irlmp_discovery_request(int nslots) void irlmp_discovery_request(int nslots)
{ {
/* Return current cached discovery log */ /* Return current cached discovery log */
irlmp_discovery_confirm(irlmp->cachelog); irlmp_discovery_confirm(irlmp->cachelog, DISCOVERY_LOG);
/* /*
* Start a single discovery operation if discovery is not already * Start a single discovery operation if discovery is not already
...@@ -907,7 +909,8 @@ void irlmp_check_services(discovery_t *discovery) ...@@ -907,7 +909,8 @@ void irlmp_check_services(discovery_t *discovery)
* partial/selective discovery based on the hints that it passed to IrLMP. * partial/selective discovery based on the hints that it passed to IrLMP.
*/ */
static inline void static inline void
irlmp_notify_client(irlmp_client_t *client, hashbin_t *log) irlmp_notify_client(irlmp_client_t *client,
hashbin_t *log, DISCOVERY_MODE mode)
{ {
discovery_t *discovery; discovery_t *discovery;
...@@ -930,7 +933,7 @@ irlmp_notify_client(irlmp_client_t *client, hashbin_t *log) ...@@ -930,7 +933,7 @@ irlmp_notify_client(irlmp_client_t *client, hashbin_t *log)
* bits ;-) * bits ;-)
*/ */
if (client->hint_mask & discovery->hints.word & 0x7f7f) if (client->hint_mask & discovery->hints.word & 0x7f7f)
client->disco_callback(discovery, client->priv); client->disco_callback(discovery, mode, client->priv);
discovery = (discovery_t *) hashbin_get_next(log); discovery = (discovery_t *) hashbin_get_next(log);
} }
...@@ -943,7 +946,7 @@ irlmp_notify_client(irlmp_client_t *client, hashbin_t *log) ...@@ -943,7 +946,7 @@ irlmp_notify_client(irlmp_client_t *client, hashbin_t *log)
* device it is, and give indication to the client(s) * device it is, and give indication to the client(s)
* *
*/ */
void irlmp_discovery_confirm(hashbin_t *log) void irlmp_discovery_confirm(hashbin_t *log, DISCOVERY_MODE mode)
{ {
irlmp_client_t *client; irlmp_client_t *client;
...@@ -957,7 +960,7 @@ void irlmp_discovery_confirm(hashbin_t *log) ...@@ -957,7 +960,7 @@ void irlmp_discovery_confirm(hashbin_t *log)
client = (irlmp_client_t *) hashbin_get_first(irlmp->clients); client = (irlmp_client_t *) hashbin_get_first(irlmp->clients);
while (client != NULL) { while (client != NULL) {
/* Check if we should notify client */ /* Check if we should notify client */
irlmp_notify_client(client, log); irlmp_notify_client(client, log, mode);
client = (irlmp_client_t *) hashbin_get_next(irlmp->clients); client = (irlmp_client_t *) hashbin_get_next(irlmp->clients);
} }
...@@ -987,7 +990,8 @@ void irlmp_discovery_expiry(discovery_t *expiry) ...@@ -987,7 +990,8 @@ void irlmp_discovery_expiry(discovery_t *expiry)
/* Check if we should notify client */ /* Check if we should notify client */
if ((client->expir_callback) && if ((client->expir_callback) &&
(client->hint_mask & expiry->hints.word & 0x7f7f)) (client->hint_mask & expiry->hints.word & 0x7f7f))
client->expir_callback(expiry, client->priv); client->expir_callback(expiry, EXPIRY_TIMEOUT,
client->priv);
/* Next client */ /* Next client */
client = (irlmp_client_t *) hashbin_get_next(irlmp->clients); client = (irlmp_client_t *) hashbin_get_next(irlmp->clients);
......
...@@ -459,6 +459,15 @@ static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event, ...@@ -459,6 +459,15 @@ static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event,
LM_LAP_DISCONNECT_INDICATION, LM_LAP_DISCONNECT_INDICATION,
NULL); NULL);
} }
/* Force an expiry of the discovery log.
* Now that the LAP is free, the system may attempt to
* connect to another device. Unfortunately, our entries
* are stale. There is a small window (<3s) before the
* normal discovery will run and where irlmp_connect_request()
* can get the wrong info, so make sure things get
* cleaned *NOW* ;-) - Jean II */
irlmp_do_expiry();
break; break;
default: default:
IRDA_DEBUG(0, __FUNCTION__ "(), Unknown event %s\n", IRDA_DEBUG(0, __FUNCTION__ "(), Unknown event %s\n",
......
...@@ -378,7 +378,7 @@ void irlmp_link_discovery_indication(struct lap_cb *self, ...@@ -378,7 +378,7 @@ void irlmp_link_discovery_indication(struct lap_cb *self,
/* Just handle it the same way as a discovery confirm, /* Just handle it the same way as a discovery confirm,
* bypass the LM_LAP state machine (see below) */ * bypass the LM_LAP state machine (see below) */
irlmp_discovery_confirm(irlmp->cachelog); irlmp_discovery_confirm(irlmp->cachelog, DISCOVERY_PASSIVE);
} }
/* /*
...@@ -404,7 +404,7 @@ void irlmp_link_discovery_confirm(struct lap_cb *self, hashbin_t *log) ...@@ -404,7 +404,7 @@ void irlmp_link_discovery_confirm(struct lap_cb *self, hashbin_t *log)
* 2) It doesn't affect the LM_LAP state * 2) It doesn't affect the LM_LAP state
* 3) Faster, slimer, simpler, ... * 3) Faster, slimer, simpler, ...
* Jean II */ * Jean II */
irlmp_discovery_confirm(irlmp->cachelog); irlmp_discovery_confirm(irlmp->cachelog, DISCOVERY_ACTIVE);
} }
#ifdef CONFIG_IRDA_CACHE_LAST_LSAP #ifdef CONFIG_IRDA_CACHE_LAST_LSAP
......
...@@ -211,6 +211,12 @@ ...@@ -211,6 +211,12 @@
* o When receiving a disconnect indication, don't reenable the * o When receiving a disconnect indication, don't reenable the
* PPP Tx queue, this will trigger a reconnect. Instead, close * PPP Tx queue, this will trigger a reconnect. Instead, close
* the channel, which will kill pppd... * the channel, which will kill pppd...
*
* v11 - 20.3.02 - Jean II
* o Oops ! v10 fix disabled IrNET retries and passive behaviour.
* Better fix in irnet_disconnect_indication() :
* - if connected, kill pppd via hangup.
* - if not connected, reenable ppp Tx, which trigger IrNET retry.
*/ */
/***************************** INCLUDES *****************************/ /***************************** INCLUDES *****************************/
......
...@@ -1070,7 +1070,7 @@ irnet_data_indication(void * instance, ...@@ -1070,7 +1070,7 @@ irnet_data_indication(void * instance,
* o attempted to connect, timeout * o attempted to connect, timeout
* o connected, link is broken, LAP has timeout * o connected, link is broken, LAP has timeout
* o connected, other side close the link * o connected, other side close the link
* o connection request on the server no handled * o connection request on the server not handled
*/ */
static void static void
irnet_disconnect_indication(void * instance, irnet_disconnect_indication(void * instance,
...@@ -1121,20 +1121,31 @@ irnet_disconnect_indication(void * instance, ...@@ -1121,20 +1121,31 @@ irnet_disconnect_indication(void * instance,
DEBUG(IRDA_CB_INFO, "Closing our TTP connection.\n"); DEBUG(IRDA_CB_INFO, "Closing our TTP connection.\n");
irttp_close_tsap(self->tsap); irttp_close_tsap(self->tsap);
self->tsap = NULL; self->tsap = NULL;
/* Cleanup & close the PPP channel, which will kill pppd and the rest */
if(self->ppp_open)
ppp_unregister_channel(&self->chan);
self->ppp_open = 0;
} }
/* Cleanup the socket in case we want to reconnect */ /* Cleanup the socket in case we want to reconnect in ppp_output_wakeup() */
self->stsap_sel = 0; self->stsap_sel = 0;
self->daddr = DEV_ADDR_ANY; self->daddr = DEV_ADDR_ANY;
self->tx_flow = FLOW_START; self->tx_flow = FLOW_START;
/* Note : what should we say to ppp ? /* Deal with the ppp instance if it's still alive */
* It seem the ppp_generic and pppd are happy that way and will eventually if(self->ppp_open)
* timeout gracefully, so don't bother them... */ {
if(test_open)
{
/* If we were connected, cleanup & close the PPP channel,
* which will kill pppd (hangup) and the rest */
ppp_unregister_channel(&self->chan);
self->ppp_open = 0;
}
else
{
/* If we were trying to connect, flush (drain) ppp_generic
* Tx queue (most often we have blocked it), which will
* trigger an other attempt to connect. If we are passive,
* this will empty the Tx queue after last try. */
ppp_output_wakeup(&self->chan);
}
}
DEXIT(IRDA_TCB_TRACE, "\n"); DEXIT(IRDA_TCB_TRACE, "\n");
} }
...@@ -1588,8 +1599,9 @@ irnet_discovervalue_confirm(int result, ...@@ -1588,8 +1599,9 @@ irnet_discovervalue_confirm(int result,
* is to messy, so we leave that to user space... * is to messy, so we leave that to user space...
*/ */
static void static void
irnet_discovery_indication(discovery_t *discovery, irnet_discovery_indication(discovery_t * discovery,
void * priv) DISCOVERY_MODE mode,
void * priv)
{ {
irnet_socket * self = &irnet_server.s; irnet_socket * self = &irnet_server.s;
...@@ -1627,6 +1639,7 @@ irnet_discovery_indication(discovery_t *discovery, ...@@ -1627,6 +1639,7 @@ irnet_discovery_indication(discovery_t *discovery,
*/ */
static void static void
irnet_expiry_indication(discovery_t * expiry, irnet_expiry_indication(discovery_t * expiry,
DISCOVERY_MODE mode,
void * priv) void * priv)
{ {
irnet_socket * self = &irnet_server.s; irnet_socket * self = &irnet_server.s;
......
...@@ -151,9 +151,11 @@ static void ...@@ -151,9 +151,11 @@ static void
#ifdef DISCOVERY_EVENTS #ifdef DISCOVERY_EVENTS
static void static void
irnet_discovery_indication(discovery_t *, irnet_discovery_indication(discovery_t *,
DISCOVERY_MODE,
void *); void *);
static void static void
irnet_expiry_indication(discovery_t *, irnet_expiry_indication(discovery_t *,
DISCOVERY_MODE,
void *); void *);
#endif #endif
/* -------------------------- PROC ENTRY -------------------------- */ /* -------------------------- PROC ENTRY -------------------------- */
......
...@@ -34,17 +34,19 @@ ...@@ -34,17 +34,19 @@
#define NET_IRDA 412 /* Random number */ #define NET_IRDA 412 /* Random number */
enum { DISCOVERY=1, DEVNAME, DEBUG, FAST_POLL, DISCOVERY_SLOTS, enum { DISCOVERY=1, DEVNAME, DEBUG, FAST_POLL, DISCOVERY_SLOTS,
DISCOVERY_TIMEOUT, SLOT_TIMEOUT, MAX_BAUD_RATE, MIN_TX_TURN_TIME, DISCOVERY_TIMEOUT, SLOT_TIMEOUT, MAX_BAUD_RATE, MIN_TX_TURN_TIME,
MAX_NOREPLY_TIME, WARN_NOREPLY_TIME, LAP_KEEPALIVE_TIME }; MAX_TX_DATA_SIZE, MAX_NOREPLY_TIME, WARN_NOREPLY_TIME,
LAP_KEEPALIVE_TIME };
extern int sysctl_discovery; extern int sysctl_discovery;
extern int sysctl_discovery_slots; extern int sysctl_discovery_slots;
extern int sysctl_discovery_timeout; extern int sysctl_discovery_timeout;
extern int sysctl_slot_timeout; /* Candidate */ extern int sysctl_slot_timeout;
extern int sysctl_fast_poll_increase; extern int sysctl_fast_poll_increase;
int sysctl_compression = 0; int sysctl_compression = 0;
extern char sysctl_devname[]; extern char sysctl_devname[];
extern int sysctl_max_baud_rate; extern int sysctl_max_baud_rate;
extern int sysctl_min_tx_turn_time; extern int sysctl_min_tx_turn_time;
extern int sysctl_max_tx_data_size;
extern int sysctl_max_noreply_time; extern int sysctl_max_noreply_time;
extern int sysctl_warn_noreply_time; extern int sysctl_warn_noreply_time;
extern int sysctl_lap_keepalive_time; extern int sysctl_lap_keepalive_time;
...@@ -64,6 +66,8 @@ static int max_max_baud_rate = 16000000; /* See qos.c - IrLAP spec */ ...@@ -64,6 +66,8 @@ static int max_max_baud_rate = 16000000; /* See qos.c - IrLAP spec */
static int min_max_baud_rate = 2400; static int min_max_baud_rate = 2400;
static int max_min_tx_turn_time = 10000; /* See qos.c - IrLAP spec */ static int max_min_tx_turn_time = 10000; /* See qos.c - IrLAP spec */
static int min_min_tx_turn_time = 0; static int min_min_tx_turn_time = 0;
static int max_max_tx_data_size = 2048; /* See qos.c - IrLAP spec */
static int min_max_tx_data_size = 64;
static int max_max_noreply_time = 40; /* See qos.c - IrLAP spec */ static int max_max_noreply_time = 40; /* See qos.c - IrLAP spec */
static int min_max_noreply_time = 3; static int min_max_noreply_time = 3;
static int max_warn_noreply_time = 3; /* 3s == standard */ static int max_warn_noreply_time = 3; /* 3s == standard */
...@@ -117,6 +121,9 @@ static ctl_table irda_table[] = { ...@@ -117,6 +121,9 @@ static ctl_table irda_table[] = {
{ MIN_TX_TURN_TIME, "min_tx_turn_time", &sysctl_min_tx_turn_time, { MIN_TX_TURN_TIME, "min_tx_turn_time", &sysctl_min_tx_turn_time,
sizeof(int), 0644, NULL, &proc_dointvec_minmax, &sysctl_intvec, sizeof(int), 0644, NULL, &proc_dointvec_minmax, &sysctl_intvec,
NULL, &min_min_tx_turn_time, &max_min_tx_turn_time }, NULL, &min_min_tx_turn_time, &max_min_tx_turn_time },
{ MAX_TX_DATA_SIZE, "max_tx_data_size", &sysctl_max_tx_data_size,
sizeof(int), 0644, NULL, &proc_dointvec_minmax, &sysctl_intvec,
NULL, &min_max_tx_data_size, &max_max_tx_data_size },
{ MAX_NOREPLY_TIME, "max_noreply_time", &sysctl_max_noreply_time, { MAX_NOREPLY_TIME, "max_noreply_time", &sysctl_max_noreply_time,
sizeof(int), 0644, NULL, &proc_dointvec_minmax, &sysctl_intvec, sizeof(int), 0644, NULL, &proc_dointvec_minmax, &sysctl_intvec,
NULL, &min_max_noreply_time, &max_max_noreply_time }, NULL, &min_max_noreply_time, &max_max_noreply_time },
......
...@@ -57,10 +57,26 @@ int sysctl_max_noreply_time = 12; ...@@ -57,10 +57,26 @@ int sysctl_max_noreply_time = 12;
* Nonzero values (usec) are used as lower limit to the per-connection * Nonzero values (usec) are used as lower limit to the per-connection
* mtt value which was announced by the other end during negotiation. * mtt value which was announced by the other end during negotiation.
* Might be helpful if the peer device provides too short mtt. * Might be helpful if the peer device provides too short mtt.
* Default is 10 which means using the unmodified value given by the peer * Default is 10us which means using the unmodified value given by the
* except if it's 0 (0 is likely a bug in the other stack). * peer except if it's 0 (0 is likely a bug in the other stack).
*/ */
unsigned sysctl_min_tx_turn_time = 10; unsigned sysctl_min_tx_turn_time = 10;
/*
* Maximum data size to be used in transmission in payload of LAP frame.
* There is a bit of confusion in the IrDA spec :
* The LAP spec defines the payload of a LAP frame (I field) to be
* 2048 bytes max (IrLAP 1.1, chapt 6.6.5, p40).
* On the other hand, the PHY mention frames of 2048 bytes max (IrPHY
* 1.2, chapt 5.3.2.1, p41). But, this number includes the LAP header
* (2 bytes), and CRC (32 bits at 4 Mb/s). So, for the I field (LAP
* payload), that's only 2042 bytes. Oups !
* I've had trouble trouble transmitting 2048 bytes frames with USB
* dongles and nsc-ircc at 4 Mb/s, so adjust to 2042... I don't know
* if this bug applies only for 2048 bytes frames or all negociated
* frame sizes, but all hardware seem to support "2048 bytes" frames.
* You can use the sysctl to play with this value anyway.
* Jean II */
unsigned sysctl_max_tx_data_size = 2042;
static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get); static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get);
static int irlap_param_link_disconnect(void *instance, irda_param_t *parm, static int irlap_param_link_disconnect(void *instance, irda_param_t *parm,
...@@ -355,10 +371,10 @@ void irlap_adjust_qos_settings(struct qos_info *qos) ...@@ -355,10 +371,10 @@ void irlap_adjust_qos_settings(struct qos_info *qos)
while ((qos->data_size.value > line_capacity) && (index > 0)) { while ((qos->data_size.value > line_capacity) && (index > 0)) {
qos->data_size.value = data_sizes[index--]; qos->data_size.value = data_sizes[index--];
IRDA_DEBUG(2, __FUNCTION__ IRDA_DEBUG(2, __FUNCTION__
"(), redusing data size to %d\n", "(), reducing data size to %d\n",
qos->data_size.value); qos->data_size.value);
} }
#else /* Use method descibed in section 6.6.11 of IrLAP */ #else /* Use method described in section 6.6.11 of IrLAP */
while (irlap_requested_line_capacity(qos) > line_capacity) { while (irlap_requested_line_capacity(qos) > line_capacity) {
ASSERT(index != 0, return;); ASSERT(index != 0, return;);
...@@ -366,18 +382,24 @@ void irlap_adjust_qos_settings(struct qos_info *qos) ...@@ -366,18 +382,24 @@ void irlap_adjust_qos_settings(struct qos_info *qos)
if (qos->window_size.value > 1) { if (qos->window_size.value > 1) {
qos->window_size.value--; qos->window_size.value--;
IRDA_DEBUG(2, __FUNCTION__ IRDA_DEBUG(2, __FUNCTION__
"(), redusing window size to %d\n", "(), reducing window size to %d\n",
qos->window_size.value); qos->window_size.value);
} else if (index > 1) { } else if (index > 1) {
qos->data_size.value = data_sizes[index--]; qos->data_size.value = data_sizes[index--];
IRDA_DEBUG(2, __FUNCTION__ IRDA_DEBUG(2, __FUNCTION__
"(), redusing data size to %d\n", "(), reducing data size to %d\n",
qos->data_size.value); qos->data_size.value);
} else { } else {
WARNING(__FUNCTION__ "(), nothing more we can do!\n"); WARNING(__FUNCTION__ "(), nothing more we can do!\n");
} }
} }
#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
/*
* Fix tx data size according to user limits - Jean II
*/
if (qos->data_size.value > sysctl_max_tx_data_size)
/* Allow non discrete adjustement to avoid loosing capacity */
qos->data_size.value = sysctl_max_tx_data_size;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment