Commit 32d62496 authored by Anton Blanchard's avatar Anton Blanchard

Merge bk://ppc.bkbits.net/for-linus-ppc64

into samba.org:/home/anton/ppc64/for-linus-ppc64
parents f7cee096 348ed50d
VERSION = 2 VERSION = 2
PATCHLEVEL = 5 PATCHLEVEL = 5
SUBLEVEL = 45 SUBLEVEL = 46
EXTRAVERSION = EXTRAVERSION =
# *DOCUMENTATION* # *DOCUMENTATION*
......
...@@ -117,6 +117,18 @@ quirk_cypress(struct pci_dev *dev) ...@@ -117,6 +117,18 @@ quirk_cypress(struct pci_dev *dev)
} }
} }
/* Called for each device after PCI setup is done. */
static void __init
pcibios_fixup_final(struct pci_dev *dev)
{
unsigned int class = dev->class >> 8;
if (class == PCI_CLASS_BRIDGE_ISA || class == PCI_CLASS_BRIDGE_ISA) {
dev->dma_mask = MAX_ISA_DMA_ADDRESS - 1;
isa_bridge = dev;
}
}
struct pci_fixup pcibios_fixups[] __initdata = { struct pci_fixup pcibios_fixups[] __initdata = {
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, { PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375,
quirk_eisa_bridge }, quirk_eisa_bridge },
...@@ -126,6 +138,8 @@ struct pci_fixup pcibios_fixups[] __initdata = { ...@@ -126,6 +138,8 @@ struct pci_fixup pcibios_fixups[] __initdata = {
quirk_ali_ide_ports }, quirk_ali_ide_ports },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, { PCI_FIXUP_HEADER, PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693,
quirk_cypress }, quirk_cypress },
{ PCI_FIXUP_FINAL, PCI_ANY_ID, PCI_ANY_ID,
pcibios_fixup_final },
{ 0 } { 0 }
}; };
......
...@@ -30,9 +30,7 @@ ...@@ -30,9 +30,7 @@
#define DEBUG_NODIRECT 0 #define DEBUG_NODIRECT 0
#define DEBUG_FORCEDAC 0 #define DEBUG_FORCEDAC 0
/* Most Alphas support 32-bit ISA DMA. Exceptions are XL, Ruffian, #define ISA_DMA_MASK 0x00ffffff
Nautilus, Sable, and Alcor (see asm-alpha/dma.h for details). */
#define ISA_DMA_MASK (MAX_DMA_ADDRESS - IDENT_ADDR - 1)
static inline unsigned long static inline unsigned long
mk_iommu_pte(unsigned long paddr) mk_iommu_pte(unsigned long paddr)
...@@ -189,6 +187,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, ...@@ -189,6 +187,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
long npages, dma_ofs, i; long npages, dma_ofs, i;
unsigned long paddr; unsigned long paddr;
dma_addr_t ret; dma_addr_t ret;
unsigned int align = 0;
paddr = __pa(cpu_addr); paddr = __pa(cpu_addr);
...@@ -216,27 +215,27 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, ...@@ -216,27 +215,27 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
} }
/* If the machine doesn't define a pci_tbi routine, we have to /* If the machine doesn't define a pci_tbi routine, we have to
assume it doesn't support sg mapping. */ assume it doesn't support sg mapping, and, since we tried to
use direct_map above, it now must be considered an error. */
if (! alpha_mv.mv_pci_tbi) { if (! alpha_mv.mv_pci_tbi) {
static int been_here = 0; static int been_here = 0; /* Only print the message once. */
if (!been_here) { if (!been_here) {
printk(KERN_WARNING "pci_map_single: no hw sg, using " printk(KERN_WARNING "pci_map_single: no HW sg\n");
"direct map when possible\n");
been_here = 1; been_here = 1;
} }
if (paddr + size <= __direct_map_size) return 0;
return (paddr + __direct_map_base);
else
return 0;
} }
arena = hose->sg_pci; arena = hose->sg_pci;
if (!arena || arena->dma_base + arena->size - 1 > max_dma) if (!arena || arena->dma_base + arena->size - 1 > max_dma)
arena = hose->sg_isa; arena = hose->sg_isa;
npages = calc_npages((paddr & ~PAGE_MASK) + size); npages = calc_npages((paddr & ~PAGE_MASK) + size);
/* Force allocation to 64KB boundary for all ISA devices. */
dma_ofs = iommu_arena_alloc(arena, npages, pdev ? 8 : 0); /* Force allocation to 64KB boundary for ISA bridges. */
if (pdev && pdev == isa_bridge)
align = 8;
dma_ofs = iommu_arena_alloc(arena, npages, align);
if (dma_ofs < 0) { if (dma_ofs < 0) {
printk(KERN_WARNING "pci_map_single failed: " printk(KERN_WARNING "pci_map_single failed: "
"could not allocate dma page tables\n"); "could not allocate dma page tables\n");
...@@ -364,8 +363,10 @@ pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) ...@@ -364,8 +363,10 @@ pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
{ {
void *cpu_addr; void *cpu_addr;
long order = get_order(size); long order = get_order(size);
int gfp = GFP_ATOMIC;
cpu_addr = (void *)__get_free_pages(GFP_ATOMIC, order); try_again:
cpu_addr = (void *)__get_free_pages(gfp, order);
if (! cpu_addr) { if (! cpu_addr) {
printk(KERN_INFO "pci_alloc_consistent: " printk(KERN_INFO "pci_alloc_consistent: "
"get_free_pages failed from %p\n", "get_free_pages failed from %p\n",
...@@ -379,7 +380,12 @@ pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) ...@@ -379,7 +380,12 @@ pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
*dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0); *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
if (*dma_addrp == 0) { if (*dma_addrp == 0) {
free_pages((unsigned long)cpu_addr, order); free_pages((unsigned long)cpu_addr, order);
return NULL; if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
return NULL;
/* The address doesn't fit required mask and we
do not have iommu. Try again with GFP_DMA. */
gfp |= GFP_DMA;
goto try_again;
} }
DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n", DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n",
...@@ -727,8 +733,8 @@ pci_dma_supported(struct pci_dev *pdev, u64 mask) ...@@ -727,8 +733,8 @@ pci_dma_supported(struct pci_dev *pdev, u64 mask)
the entire direct mapped space or the total system memory as the entire direct mapped space or the total system memory as
shifted by the map base */ shifted by the map base */
if (__direct_map_size != 0 if (__direct_map_size != 0
&& (__direct_map_base + __direct_map_size - 1 <= mask && (__direct_map_base + __direct_map_size - 1 <= mask ||
|| __direct_map_base + (max_low_pfn<<PAGE_SHIFT)-1 <= mask)) __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
return 1; return 1;
/* Check that we have a scatter-gather arena that fits. */ /* Check that we have a scatter-gather arena that fits. */
...@@ -740,6 +746,10 @@ pci_dma_supported(struct pci_dev *pdev, u64 mask) ...@@ -740,6 +746,10 @@ pci_dma_supported(struct pci_dev *pdev, u64 mask)
if (arena && arena->dma_base + arena->size - 1 <= mask) if (arena && arena->dma_base + arena->size - 1 <= mask)
return 1; return 1;
/* As last resort try ZONE_DMA. */
if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
return 1;
return 0; return 0;
} }
......
...@@ -253,7 +253,7 @@ struct alpha_machine_vector alcor_mv __initmv = { ...@@ -253,7 +253,7 @@ struct alpha_machine_vector alcor_mv __initmv = {
DO_CIA_IO, DO_CIA_IO,
DO_CIA_BUS, DO_CIA_BUS,
.machine_check = cia_machine_check, .machine_check = cia_machine_check,
.max_dma_address = ALPHA_ALCOR_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS,
.min_io_address = EISA_DEFAULT_IO_BASE, .min_io_address = EISA_DEFAULT_IO_BASE,
.min_mem_address = CIA_DEFAULT_MEM_BASE, .min_mem_address = CIA_DEFAULT_MEM_BASE,
...@@ -283,7 +283,7 @@ struct alpha_machine_vector xlt_mv __initmv = { ...@@ -283,7 +283,7 @@ struct alpha_machine_vector xlt_mv __initmv = {
DO_CIA_IO, DO_CIA_IO,
DO_CIA_BUS, DO_CIA_BUS,
.machine_check = cia_machine_check, .machine_check = cia_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = EISA_DEFAULT_IO_BASE, .min_io_address = EISA_DEFAULT_IO_BASE,
.min_mem_address = CIA_DEFAULT_MEM_BASE, .min_mem_address = CIA_DEFAULT_MEM_BASE,
......
...@@ -327,7 +327,7 @@ struct alpha_machine_vector cabriolet_mv __initmv = { ...@@ -327,7 +327,7 @@ struct alpha_machine_vector cabriolet_mv __initmv = {
DO_APECS_IO, DO_APECS_IO,
DO_APECS_BUS, DO_APECS_BUS,
.machine_check = apecs_machine_check, .machine_check = apecs_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
...@@ -354,7 +354,7 @@ struct alpha_machine_vector eb164_mv __initmv = { ...@@ -354,7 +354,7 @@ struct alpha_machine_vector eb164_mv __initmv = {
DO_CIA_IO, DO_CIA_IO,
DO_CIA_BUS, DO_CIA_BUS,
.machine_check = cia_machine_check, .machine_check = cia_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = CIA_DEFAULT_MEM_BASE, .min_mem_address = CIA_DEFAULT_MEM_BASE,
...@@ -380,7 +380,7 @@ struct alpha_machine_vector eb66p_mv __initmv = { ...@@ -380,7 +380,7 @@ struct alpha_machine_vector eb66p_mv __initmv = {
DO_LCA_IO, DO_LCA_IO,
DO_LCA_BUS, DO_LCA_BUS,
.machine_check = lca_machine_check, .machine_check = lca_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
...@@ -405,7 +405,7 @@ struct alpha_machine_vector lx164_mv __initmv = { ...@@ -405,7 +405,7 @@ struct alpha_machine_vector lx164_mv __initmv = {
DO_PYXIS_IO, DO_PYXIS_IO,
DO_CIA_BUS, DO_CIA_BUS,
.machine_check = cia_machine_check, .machine_check = cia_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE, .min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = PYXIS_DAC_OFFSET, .pci_dac_offset = PYXIS_DAC_OFFSET,
...@@ -432,7 +432,7 @@ struct alpha_machine_vector pc164_mv __initmv = { ...@@ -432,7 +432,7 @@ struct alpha_machine_vector pc164_mv __initmv = {
DO_CIA_IO, DO_CIA_IO,
DO_CIA_BUS, DO_CIA_BUS,
.machine_check = cia_machine_check, .machine_check = cia_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = CIA_DEFAULT_MEM_BASE, .min_mem_address = CIA_DEFAULT_MEM_BASE,
......
...@@ -572,7 +572,7 @@ struct alpha_machine_vector dp264_mv __initmv = { ...@@ -572,7 +572,7 @@ struct alpha_machine_vector dp264_mv __initmv = {
DO_TSUNAMI_IO, DO_TSUNAMI_IO,
DO_TSUNAMI_BUS, DO_TSUNAMI_BUS,
.machine_check = tsunami_machine_check, .machine_check = tsunami_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE, .min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = TSUNAMI_DAC_OFFSET, .pci_dac_offset = TSUNAMI_DAC_OFFSET,
...@@ -597,7 +597,7 @@ struct alpha_machine_vector monet_mv __initmv = { ...@@ -597,7 +597,7 @@ struct alpha_machine_vector monet_mv __initmv = {
DO_TSUNAMI_IO, DO_TSUNAMI_IO,
DO_TSUNAMI_BUS, DO_TSUNAMI_BUS,
.machine_check = tsunami_machine_check, .machine_check = tsunami_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE, .min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = TSUNAMI_DAC_OFFSET, .pci_dac_offset = TSUNAMI_DAC_OFFSET,
...@@ -621,7 +621,7 @@ struct alpha_machine_vector webbrick_mv __initmv = { ...@@ -621,7 +621,7 @@ struct alpha_machine_vector webbrick_mv __initmv = {
DO_TSUNAMI_IO, DO_TSUNAMI_IO,
DO_TSUNAMI_BUS, DO_TSUNAMI_BUS,
.machine_check = tsunami_machine_check, .machine_check = tsunami_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE, .min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = TSUNAMI_DAC_OFFSET, .pci_dac_offset = TSUNAMI_DAC_OFFSET,
...@@ -645,7 +645,7 @@ struct alpha_machine_vector clipper_mv __initmv = { ...@@ -645,7 +645,7 @@ struct alpha_machine_vector clipper_mv __initmv = {
DO_TSUNAMI_IO, DO_TSUNAMI_IO,
DO_TSUNAMI_BUS, DO_TSUNAMI_BUS,
.machine_check = tsunami_machine_check, .machine_check = tsunami_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE, .min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = TSUNAMI_DAC_OFFSET, .pci_dac_offset = TSUNAMI_DAC_OFFSET,
...@@ -674,7 +674,7 @@ struct alpha_machine_vector shark_mv __initmv = { ...@@ -674,7 +674,7 @@ struct alpha_machine_vector shark_mv __initmv = {
DO_TSUNAMI_IO, DO_TSUNAMI_IO,
DO_TSUNAMI_BUS, DO_TSUNAMI_BUS,
.machine_check = tsunami_machine_check, .machine_check = tsunami_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE, .min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = TSUNAMI_DAC_OFFSET, .pci_dac_offset = TSUNAMI_DAC_OFFSET,
......
...@@ -214,7 +214,7 @@ struct alpha_machine_vector eb64p_mv __initmv = { ...@@ -214,7 +214,7 @@ struct alpha_machine_vector eb64p_mv __initmv = {
DO_APECS_IO, DO_APECS_IO,
DO_APECS_BUS, DO_APECS_BUS,
.machine_check = apecs_machine_check, .machine_check = apecs_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
...@@ -240,7 +240,7 @@ struct alpha_machine_vector eb66_mv __initmv = { ...@@ -240,7 +240,7 @@ struct alpha_machine_vector eb66_mv __initmv = {
DO_LCA_IO, DO_LCA_IO,
DO_LCA_BUS, DO_LCA_BUS,
.machine_check = lca_machine_check, .machine_check = lca_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
......
...@@ -231,7 +231,7 @@ struct alpha_machine_vector eiger_mv __initmv = { ...@@ -231,7 +231,7 @@ struct alpha_machine_vector eiger_mv __initmv = {
DO_TSUNAMI_IO, DO_TSUNAMI_IO,
DO_TSUNAMI_BUS, DO_TSUNAMI_BUS,
.machine_check = tsunami_machine_check, .machine_check = tsunami_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE, .min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = TSUNAMI_DAC_OFFSET, .pci_dac_offset = TSUNAMI_DAC_OFFSET,
......
...@@ -219,6 +219,11 @@ static void __init ...@@ -219,6 +219,11 @@ static void __init
jensen_init_arch(void) jensen_init_arch(void)
{ {
struct pci_controller *hose; struct pci_controller *hose;
#ifdef CONFIG_PCI
static struct pci_dev fake_isa_bridge = { dma_mask: 0xffffffffUL, };
isa_bridge = &fake_isa_bridge;
#endif
/* Create a hose so that we can report i/o base addresses to /* Create a hose so that we can report i/o base addresses to
userland. */ userland. */
...@@ -257,7 +262,7 @@ struct alpha_machine_vector jensen_mv __initmv = { ...@@ -257,7 +262,7 @@ struct alpha_machine_vector jensen_mv __initmv = {
IO_LITE(JENSEN,jensen), IO_LITE(JENSEN,jensen),
BUS(jensen), BUS(jensen),
.machine_check = jensen_machine_check, .machine_check = jensen_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.rtc_port = 0x170, .rtc_port = 0x170,
.nr_irqs = 16, .nr_irqs = 16,
......
...@@ -269,7 +269,7 @@ struct alpha_machine_vector miata_mv __initmv = { ...@@ -269,7 +269,7 @@ struct alpha_machine_vector miata_mv __initmv = {
DO_PYXIS_IO, DO_PYXIS_IO,
DO_CIA_BUS, DO_CIA_BUS,
.machine_check = cia_machine_check, .machine_check = cia_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE, .min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = PYXIS_DAC_OFFSET, .pci_dac_offset = PYXIS_DAC_OFFSET,
......
...@@ -223,7 +223,7 @@ struct alpha_machine_vector mikasa_mv __initmv = { ...@@ -223,7 +223,7 @@ struct alpha_machine_vector mikasa_mv __initmv = {
DO_APECS_IO, DO_APECS_IO,
DO_APECS_BUS, DO_APECS_BUS,
.machine_check = mikasa_apecs_machine_check, .machine_check = mikasa_apecs_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
...@@ -248,7 +248,7 @@ struct alpha_machine_vector mikasa_primo_mv __initmv = { ...@@ -248,7 +248,7 @@ struct alpha_machine_vector mikasa_primo_mv __initmv = {
DO_CIA_IO, DO_CIA_IO,
DO_CIA_BUS, DO_CIA_BUS,
.machine_check = cia_machine_check, .machine_check = cia_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = CIA_DEFAULT_MEM_BASE, .min_mem_address = CIA_DEFAULT_MEM_BASE,
......
...@@ -185,7 +185,7 @@ struct alpha_machine_vector nautilus_mv __initmv = { ...@@ -185,7 +185,7 @@ struct alpha_machine_vector nautilus_mv __initmv = {
DO_IRONGATE_IO, DO_IRONGATE_IO,
DO_IRONGATE_BUS, DO_IRONGATE_BUS,
.machine_check = nautilus_machine_check, .machine_check = nautilus_machine_check,
.max_dma_address = ALPHA_NAUTILUS_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = IRONGATE_DEFAULT_MEM_BASE, .min_mem_address = IRONGATE_DEFAULT_MEM_BASE,
......
...@@ -305,7 +305,7 @@ struct alpha_machine_vector noritake_mv __initmv = { ...@@ -305,7 +305,7 @@ struct alpha_machine_vector noritake_mv __initmv = {
DO_APECS_IO, DO_APECS_IO,
DO_APECS_BUS, DO_APECS_BUS,
.machine_check = noritake_apecs_machine_check, .machine_check = noritake_apecs_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = EISA_DEFAULT_IO_BASE, .min_io_address = EISA_DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
...@@ -330,7 +330,7 @@ struct alpha_machine_vector noritake_primo_mv __initmv = { ...@@ -330,7 +330,7 @@ struct alpha_machine_vector noritake_primo_mv __initmv = {
DO_CIA_IO, DO_CIA_IO,
DO_CIA_BUS, DO_CIA_BUS,
.machine_check = cia_machine_check, .machine_check = cia_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = EISA_DEFAULT_IO_BASE, .min_io_address = EISA_DEFAULT_IO_BASE,
.min_mem_address = CIA_DEFAULT_MEM_BASE, .min_mem_address = CIA_DEFAULT_MEM_BASE,
......
...@@ -252,7 +252,7 @@ struct alpha_machine_vector rawhide_mv __initmv = { ...@@ -252,7 +252,7 @@ struct alpha_machine_vector rawhide_mv __initmv = {
DO_MCPCIA_IO, DO_MCPCIA_IO,
DO_MCPCIA_BUS, DO_MCPCIA_BUS,
.machine_check = mcpcia_machine_check, .machine_check = mcpcia_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = MCPCIA_DEFAULT_MEM_BASE, .min_mem_address = MCPCIA_DEFAULT_MEM_BASE,
.pci_dac_offset = MCPCIA_DAC_OFFSET, .pci_dac_offset = MCPCIA_DAC_OFFSET,
......
...@@ -219,7 +219,7 @@ struct alpha_machine_vector ruffian_mv __initmv = { ...@@ -219,7 +219,7 @@ struct alpha_machine_vector ruffian_mv __initmv = {
DO_PYXIS_IO, DO_PYXIS_IO,
DO_CIA_BUS, DO_CIA_BUS,
.machine_check = cia_machine_check, .machine_check = cia_machine_check,
.max_dma_address = ALPHA_RUFFIAN_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE, .min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = PYXIS_DAC_OFFSET, .pci_dac_offset = PYXIS_DAC_OFFSET,
......
...@@ -203,7 +203,7 @@ struct alpha_machine_vector rx164_mv __initmv = { ...@@ -203,7 +203,7 @@ struct alpha_machine_vector rx164_mv __initmv = {
DO_POLARIS_IO, DO_POLARIS_IO,
DO_POLARIS_BUS, DO_POLARIS_BUS,
.machine_check = polaris_machine_check, .machine_check = polaris_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE, .min_mem_address = DEFAULT_MEM_BASE,
......
...@@ -290,7 +290,7 @@ struct alpha_machine_vector sable_mv __initmv = { ...@@ -290,7 +290,7 @@ struct alpha_machine_vector sable_mv __initmv = {
DO_T2_IO, DO_T2_IO,
DO_T2_BUS, DO_T2_BUS,
.machine_check = t2_machine_check, .machine_check = t2_machine_check,
.max_dma_address = ALPHA_SABLE_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS,
.min_io_address = EISA_DEFAULT_IO_BASE, .min_io_address = EISA_DEFAULT_IO_BASE,
.min_mem_address = T2_DEFAULT_MEM_BASE, .min_mem_address = T2_DEFAULT_MEM_BASE,
...@@ -322,7 +322,7 @@ struct alpha_machine_vector sable_gamma_mv __initmv = { ...@@ -322,7 +322,7 @@ struct alpha_machine_vector sable_gamma_mv __initmv = {
DO_T2_IO, DO_T2_IO,
DO_T2_BUS, DO_T2_BUS,
.machine_check = t2_machine_check, .machine_check = t2_machine_check,
.max_dma_address = ALPHA_SABLE_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS,
.min_io_address = EISA_DEFAULT_IO_BASE, .min_io_address = EISA_DEFAULT_IO_BASE,
.min_mem_address = T2_DEFAULT_MEM_BASE, .min_mem_address = T2_DEFAULT_MEM_BASE,
......
...@@ -258,7 +258,7 @@ struct alpha_machine_vector alphabook1_mv __initmv = { ...@@ -258,7 +258,7 @@ struct alpha_machine_vector alphabook1_mv __initmv = {
DO_LCA_IO, DO_LCA_IO,
DO_LCA_BUS, DO_LCA_BUS,
.machine_check = lca_machine_check, .machine_check = lca_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
...@@ -289,7 +289,7 @@ struct alpha_machine_vector avanti_mv __initmv = { ...@@ -289,7 +289,7 @@ struct alpha_machine_vector avanti_mv __initmv = {
DO_APECS_IO, DO_APECS_IO,
DO_APECS_BUS, DO_APECS_BUS,
.machine_check = apecs_machine_check, .machine_check = apecs_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
...@@ -318,7 +318,7 @@ struct alpha_machine_vector noname_mv __initmv = { ...@@ -318,7 +318,7 @@ struct alpha_machine_vector noname_mv __initmv = {
DO_LCA_IO, DO_LCA_IO,
DO_LCA_BUS, DO_LCA_BUS,
.machine_check = lca_machine_check, .machine_check = lca_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
...@@ -356,7 +356,7 @@ struct alpha_machine_vector p2k_mv __initmv = { ...@@ -356,7 +356,7 @@ struct alpha_machine_vector p2k_mv __initmv = {
DO_LCA_IO, DO_LCA_IO,
DO_LCA_BUS, DO_LCA_BUS,
.machine_check = lca_machine_check, .machine_check = lca_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
...@@ -385,7 +385,7 @@ struct alpha_machine_vector xl_mv __initmv = { ...@@ -385,7 +385,7 @@ struct alpha_machine_vector xl_mv __initmv = {
DO_APECS_IO, DO_APECS_IO,
BUS(apecs), BUS(apecs),
.machine_check = apecs_machine_check, .machine_check = apecs_machine_check,
.max_dma_address = ALPHA_XL_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_XL_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = XL_DEFAULT_MEM_BASE, .min_mem_address = XL_DEFAULT_MEM_BASE,
......
...@@ -160,7 +160,7 @@ struct alpha_machine_vector sx164_mv __initmv = { ...@@ -160,7 +160,7 @@ struct alpha_machine_vector sx164_mv __initmv = {
DO_PYXIS_IO, DO_PYXIS_IO,
DO_CIA_BUS, DO_CIA_BUS,
.machine_check = cia_machine_check, .machine_check = cia_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE, .min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = PYXIS_DAC_OFFSET, .pci_dac_offset = PYXIS_DAC_OFFSET,
......
...@@ -275,7 +275,7 @@ struct alpha_machine_vector takara_mv __initmv = { ...@@ -275,7 +275,7 @@ struct alpha_machine_vector takara_mv __initmv = {
DO_CIA_IO, DO_CIA_IO,
DO_CIA_BUS, DO_CIA_BUS,
.machine_check = cia_machine_check, .machine_check = cia_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = CIA_DEFAULT_MEM_BASE, .min_mem_address = CIA_DEFAULT_MEM_BASE,
......
...@@ -370,7 +370,7 @@ struct alpha_machine_vector privateer_mv __initmv = { ...@@ -370,7 +370,7 @@ struct alpha_machine_vector privateer_mv __initmv = {
DO_TITAN_IO, DO_TITAN_IO,
DO_TITAN_BUS, DO_TITAN_BUS,
.machine_check = privateer_machine_check, .machine_check = privateer_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE, .min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = TITAN_DAC_OFFSET, .pci_dac_offset = TITAN_DAC_OFFSET,
......
...@@ -339,7 +339,7 @@ struct alpha_machine_vector wildfire_mv __initmv = { ...@@ -339,7 +339,7 @@ struct alpha_machine_vector wildfire_mv __initmv = {
DO_WILDFIRE_IO, DO_WILDFIRE_IO,
DO_WILDFIRE_BUS, DO_WILDFIRE_BUS,
.machine_check = wildfire_machine_check, .machine_check = wildfire_machine_check,
.max_dma_address = ALPHA_MAX_DMA_ADDRESS, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE, .min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE, .min_mem_address = DEFAULT_MEM_BASE,
......
...@@ -479,26 +479,19 @@ config CPU_FREQ ...@@ -479,26 +479,19 @@ config CPU_FREQ
If in doubt, say N. If in doubt, say N.
config CPU_FREQ_24_API config CPU_FREQ_24_API
bool "/proc/sys/cpu/ interface (2.4.)" bool "/proc/sys/cpu/ interface (2.4. / OLD)"
depends on CPU_FREQ depends on CPU_FREQ
---help--- help
This enables the /proc/sys/cpu/ sysctl interface for controlling This enables the /proc/sys/cpu/ sysctl interface for controlling
CPUFreq, as known from the 2.4.-kernel patches for CPUFreq. Note CPUFreq, as known from the 2.4.-kernel patches for CPUFreq. 2.5
that some drivers do not support this interface or offer less uses /proc/cpufreq instead. Please note that some drivers do not
functionality. work well with the 2.4. /proc/sys/cpu sysctl interface, so if in
doubt, say N here.
If you say N here, you'll be able to control CPUFreq using the
new /proc/cpufreq interface.
For details, take a look at linux/Documentation/cpufreq. For details, take a look at linux/Documentation/cpufreq.
If in doubt, say N. If in doubt, say N.
config CPU_FREQ_26_API
bool
depends on CPU_FREQ && !CPU_FREQ_24_API
default y
config X86_POWERNOW_K6 config X86_POWERNOW_K6
tristate "AMD Mobile K6-2/K6-3 PowerNow!" tristate "AMD Mobile K6-2/K6-3 PowerNow!"
depends on CPU_FREQ depends on CPU_FREQ
...@@ -562,7 +555,7 @@ config X86_P4_CLOCKMOD ...@@ -562,7 +555,7 @@ config X86_P4_CLOCKMOD
config X86_LONGRUN config X86_LONGRUN
tristate "Transmeta LongRun" tristate "Transmeta LongRun"
depends on CPU_FREQ && !CPU_FREQ_24_API depends on CPU_FREQ
help help
This adds the CPUFreq driver for Transmeta Crusoe processors which This adds the CPUFreq driver for Transmeta Crusoe processors which
support LongRun. support LongRun.
......
...@@ -251,6 +251,11 @@ static int __init longrun_init(void) ...@@ -251,6 +251,11 @@ static int __init longrun_init(void)
longrun_get_policy(&driver->policy[0]); longrun_get_policy(&driver->policy[0]);
#ifdef CONFIG_CPU_FREQ_24_API
driver->cpu_min_freq = longrun_low_freq;
driver->cpu_cur_freq[0] = longrun_high_freq; /* dummy value */
#endif
driver->verify = &longrun_verify_policy; driver->verify = &longrun_verify_policy;
driver->setpolicy = &longrun_set_policy; driver->setpolicy = &longrun_set_policy;
result = cpufreq_register(driver); result = cpufreq_register(driver);
......
This diff is collapsed.
...@@ -50,27 +50,35 @@ ...@@ -50,27 +50,35 @@
/* /*
* Note: pte --> Linux PTE * Note: pte --> Linux PTE
* HPTE --> PowerPC Hashed Page Table Entry * HPTE --> PowerPC Hashed Page Table Entry
*
* Execution context:
* htab_initialize is called with the MMU off (of course), but
* the kernel has been copied down to zero so it can directly
* reference global data. At this point it is very difficult
* to print debug info.
*
*/ */
HTAB htab_data = {NULL, 0, 0, 0, 0}; HTAB htab_data = {NULL, 0, 0, 0, 0};
extern unsigned long _SDR1; extern unsigned long _SDR1;
extern unsigned long klimit;
extern unsigned long reloc_offset(void);
#define PTRRELOC(x) ((typeof(x))((unsigned long)(x) - offset))
#define PTRUNRELOC(x) ((typeof(x))((unsigned long)(x) + offset))
#define RELOC(x) (*PTRRELOC(&(x)))
#define KB (1024) #define KB (1024)
#define MB (1024*KB) #define MB (1024*KB)
static inline void
loop_forever(void)
{
volatile unsigned long x = 1;
for(;x;x|=1)
;
}
static inline void static inline void
create_pte_mapping(unsigned long start, unsigned long end, create_pte_mapping(unsigned long start, unsigned long end,
unsigned long mode, unsigned long mask, int large) unsigned long mode, int large)
{ {
unsigned long addr, offset = reloc_offset(); unsigned long addr;
HTAB *_htab_data = PTRRELOC(&htab_data);
HPTE *htab = (HPTE *)__v2a(_htab_data->htab);
unsigned int step; unsigned int step;
if (large) if (large)
...@@ -79,14 +87,33 @@ create_pte_mapping(unsigned long start, unsigned long end, ...@@ -79,14 +87,33 @@ create_pte_mapping(unsigned long start, unsigned long end,
step = 4*KB; step = 4*KB;
for (addr = start; addr < end; addr += step) { for (addr = start; addr < end; addr += step) {
unsigned long vpn, hash, hpteg;
unsigned long vsid = get_kernel_vsid(addr); unsigned long vsid = get_kernel_vsid(addr);
unsigned long va = (vsid << 28) | (addr & 0xfffffff); unsigned long va = (vsid << 28) | (addr & 0xfffffff);
int ret;
if (large)
vpn = va >> LARGE_PAGE_SHIFT;
else
vpn = va >> PAGE_SHIFT;
hash = hpt_hash(vpn, large);
hpteg = ((hash & htab_data.htab_hash_mask)*HPTES_PER_GROUP);
if (naca->platform == PLATFORM_PSERIES_LPAR) if (naca->platform == PLATFORM_PSERIES_LPAR)
pSeries_lpar_make_pte(htab, va, ret = pSeries_lpar_hpte_insert(hpteg, va,
(unsigned long)__v2a(addr), mode, mask, large); (unsigned long)__v2a(addr) >> PAGE_SHIFT,
0, mode, 1, large);
else else
pSeries_make_pte(htab, va, ret = pSeries_hpte_insert(hpteg, va,
(unsigned long)__v2a(addr), mode, mask, large); (unsigned long)__v2a(addr) >> PAGE_SHIFT,
0, mode, 1, large);
if (ret == -1) {
ppc64_terminate_msg(0x20, "create_pte_mapping");
loop_forever();
}
} }
} }
...@@ -95,16 +122,13 @@ htab_initialize(void) ...@@ -95,16 +122,13 @@ htab_initialize(void)
{ {
unsigned long table, htab_size_bytes; unsigned long table, htab_size_bytes;
unsigned long pteg_count; unsigned long pteg_count;
unsigned long mode_rw, mask; unsigned long mode_rw;
unsigned long offset = reloc_offset();
struct naca_struct *_naca = RELOC(naca);
HTAB *_htab_data = PTRRELOC(&htab_data);
/* /*
* Calculate the required size of the htab. We want the number of * Calculate the required size of the htab. We want the number of
* PTEGs to equal one half the number of real pages. * PTEGs to equal one half the number of real pages.
*/ */
htab_size_bytes = 1UL << _naca->pftSize; htab_size_bytes = 1UL << naca->pftSize;
pteg_count = htab_size_bytes >> 7; pteg_count = htab_size_bytes >> 7;
/* For debug, make the HTAB 1/8 as big as it normally would be. */ /* For debug, make the HTAB 1/8 as big as it normally would be. */
...@@ -113,42 +137,44 @@ htab_initialize(void) ...@@ -113,42 +137,44 @@ htab_initialize(void)
htab_size_bytes = pteg_count << 7; htab_size_bytes = pteg_count << 7;
} }
_htab_data->htab_num_ptegs = pteg_count; htab_data.htab_num_ptegs = pteg_count;
_htab_data->htab_hash_mask = pteg_count - 1; htab_data.htab_hash_mask = pteg_count - 1;
if (naca->platform == PLATFORM_PSERIES) { if (naca->platform == PLATFORM_PSERIES) {
/* Find storage for the HPT. Must be contiguous in /* Find storage for the HPT. Must be contiguous in
* the absolute address space. * the absolute address space.
*/ */
table = lmb_alloc(htab_size_bytes, htab_size_bytes); table = lmb_alloc(htab_size_bytes, htab_size_bytes);
if ( !table ) if ( !table ) {
panic("ERROR, cannot find space for HPTE\n"); ppc64_terminate_msg(0x20, "hpt space");
_htab_data->htab = (HPTE *)__a2v(table); loop_forever();
}
htab_data.htab = (HPTE *)__a2v(table);
/* htab absolute addr + encoded htabsize */ /* htab absolute addr + encoded htabsize */
RELOC(_SDR1) = table + __ilog2(pteg_count) - 11; _SDR1 = table + __ilog2(pteg_count) - 11;
/* Initialize the HPT with no entries */ /* Initialize the HPT with no entries */
memset((void *)table, 0, htab_size_bytes); memset((void *)table, 0, htab_size_bytes);
} else { } else {
_htab_data->htab = NULL; /* Using a hypervisor which owns the htab */
RELOC(_SDR1) = 0; htab_data.htab = NULL;
_SDR1 = 0;
} }
mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX; mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;
mask = pteg_count-1;
/* XXX we currently map kernel text rw, should fix this */ /* XXX we currently map kernel text rw, should fix this */
if (cpu_has_largepage() && _naca->physicalMemorySize > 256*MB) { if (cpu_has_largepage() && naca->physicalMemorySize > 256*MB) {
create_pte_mapping((unsigned long)KERNELBASE, create_pte_mapping((unsigned long)KERNELBASE,
KERNELBASE + 256*MB, mode_rw, mask, 0); KERNELBASE + 256*MB, mode_rw, 0);
create_pte_mapping((unsigned long)KERNELBASE + 256*MB, create_pte_mapping((unsigned long)KERNELBASE + 256*MB,
KERNELBASE + (_naca->physicalMemorySize), KERNELBASE + (naca->physicalMemorySize),
mode_rw, mask, 1); mode_rw, 1);
} else { } else {
create_pte_mapping((unsigned long)KERNELBASE, create_pte_mapping((unsigned long)KERNELBASE,
KERNELBASE+(_naca->physicalMemorySize), KERNELBASE+(naca->physicalMemorySize),
mode_rw, mask, 0); mode_rw, 0);
} }
} }
#undef KB #undef KB
...@@ -194,7 +220,7 @@ static inline unsigned long computeHptePP(unsigned long pte) ...@@ -194,7 +220,7 @@ static inline unsigned long computeHptePP(unsigned long pte)
* to be valid via Linux page tables, return 1. If handled return 0 * to be valid via Linux page tables, return 1. If handled return 0
*/ */
int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid, int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
pte_t *ptep, unsigned long trap) pte_t *ptep, unsigned long trap, int local)
{ {
unsigned long va, vpn; unsigned long va, vpn;
unsigned long newpp, prpn; unsigned long newpp, prpn;
...@@ -202,9 +228,16 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -202,9 +228,16 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
long slot; long slot;
pte_t old_pte, new_pte; pte_t old_pte, new_pte;
/* XXX fix for large ptes */
int large = 0;
/* Search the Linux page table for a match with va */ /* Search the Linux page table for a match with va */
va = (vsid << 28) | (ea & 0x0fffffff); va = (vsid << 28) | (ea & 0x0fffffff);
vpn = va >> PAGE_SHIFT;
if (large)
vpn = va >> LARGE_PAGE_SHIFT;
else
vpn = va >> PAGE_SHIFT;
/* /*
* If no pte found or not present, send the problem up to * If no pte found or not present, send the problem up to
...@@ -266,16 +299,14 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -266,16 +299,14 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
/* There MIGHT be an HPTE for this pte */ /* There MIGHT be an HPTE for this pte */
unsigned long hash, slot, secondary; unsigned long hash, slot, secondary;
/* XXX fix large pte flag */ hash = hpt_hash(vpn, large);
hash = hpt_hash(vpn, 0);
secondary = (pte_val(old_pte) & _PAGE_SECONDARY) >> 15; secondary = (pte_val(old_pte) & _PAGE_SECONDARY) >> 15;
if (secondary) if (secondary)
hash = ~hash; hash = ~hash;
slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP; slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;
slot += (pte_val(old_pte) & _PAGE_GROUP_IX) >> 12; slot += (pte_val(old_pte) & _PAGE_GROUP_IX) >> 12;
/* XXX fix large pte flag */ if (ppc_md.hpte_updatepp(slot, newpp, va, large, local) == -1)
if (ppc_md.hpte_updatepp(slot, newpp, va, 0) == -1)
pte_val(old_pte) &= ~_PAGE_HPTEFLAGS; pte_val(old_pte) &= ~_PAGE_HPTEFLAGS;
else else
if (!pte_same(old_pte, new_pte)) if (!pte_same(old_pte, new_pte))
...@@ -283,8 +314,7 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -283,8 +314,7 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
} }
if (likely(!(pte_val(old_pte) & _PAGE_HASHPTE))) { if (likely(!(pte_val(old_pte) & _PAGE_HASHPTE))) {
/* XXX fix large pte flag */ unsigned long hash = hpt_hash(vpn, large);
unsigned long hash = hpt_hash(vpn, 0);
unsigned long hpte_group; unsigned long hpte_group;
prpn = pte_val(old_pte) >> PTE_SHIFT; prpn = pte_val(old_pte) >> PTE_SHIFT;
...@@ -299,23 +329,21 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -299,23 +329,21 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
/* copy appropriate flags from linux pte */ /* copy appropriate flags from linux pte */
hpteflags = (pte_val(new_pte) & 0x1f8) | newpp; hpteflags = (pte_val(new_pte) & 0x1f8) | newpp;
/* XXX fix large pte flag */ slot = ppc_md.hpte_insert(hpte_group, va, prpn, 0,
slot = ppc_md.insert_hpte(hpte_group, vpn, prpn, 0, hpteflags, 0, large);
hpteflags, 0, 0);
/* Primary is full, try the secondary */ /* Primary is full, try the secondary */
if (slot == -1) { if (slot == -1) {
pte_val(new_pte) |= 1 << 15; pte_val(new_pte) |= 1 << 15;
hpte_group = ((~hash & htab_data.htab_hash_mask) * hpte_group = ((~hash & htab_data.htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL; HPTES_PER_GROUP) & ~0x7UL;
/* XXX fix large pte flag */ slot = ppc_md.hpte_insert(hpte_group, va, prpn,
slot = ppc_md.insert_hpte(hpte_group, vpn, prpn, 1, hpteflags, 0, large);
1, hpteflags, 0, 0);
if (slot == -1) { if (slot == -1) {
if (mftb() & 0x1) if (mftb() & 0x1)
hpte_group = ((hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; hpte_group = ((hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
ppc_md.remove_hpte(hpte_group); ppc_md.hpte_remove(hpte_group);
goto repeat; goto repeat;
} }
} }
...@@ -341,6 +369,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) ...@@ -341,6 +369,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
struct mm_struct *mm; struct mm_struct *mm;
pte_t *ptep; pte_t *ptep;
int ret; int ret;
int user_region = 0;
int local = 0;
/* Check for invalid addresses. */ /* Check for invalid addresses. */
if (!IS_VALID_EA(ea)) if (!IS_VALID_EA(ea))
...@@ -348,6 +378,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) ...@@ -348,6 +378,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
switch (REGION_ID(ea)) { switch (REGION_ID(ea)) {
case USER_REGION_ID: case USER_REGION_ID:
user_region = 1;
mm = current->mm; mm = current->mm;
if (mm == NULL) if (mm == NULL)
return 1; return 1;
...@@ -363,21 +394,20 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) ...@@ -363,21 +394,20 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
vsid = get_kernel_vsid(ea); vsid = get_kernel_vsid(ea);
break; break;
case IO_UNMAPPED_REGION_ID: case IO_UNMAPPED_REGION_ID:
udbg_printf("EEH Error ea = 0x%lx\n", ea); /*
PPCDBG_ENTER_DEBUGGER(); * Should only be hit if there is an access to MMIO space
panic("EEH Error ea = 0x%lx\n", ea); * which is protected by EEH.
break; * Send the problem up to do_page_fault
*/
case KERNEL_REGION_ID: case KERNEL_REGION_ID:
/* /*
* As htab_initialize is now, we shouldn't ever get here since * Should never get here - entire 0xC0... region is bolted.
* we're bolting the entire 0xC0... region. * Send the problem up to do_page_fault
*/ */
udbg_printf("Little faulted on kernel address 0x%lx\n", ea);
PPCDBG_ENTER_DEBUGGER();
panic("Little faulted on kernel address 0x%lx\n", ea);
break;
default: default:
/* Not a valid range, send the problem up to do_page_fault */ /* Not a valid range
* Send the problem up to do_page_fault
*/
return 1; return 1;
break; break;
} }
...@@ -392,8 +422,12 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) ...@@ -392,8 +422,12 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
* from modifying entries while we search and update * from modifying entries while we search and update
*/ */
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
if (user_region && (mm->cpu_vm_mask == (1 << smp_processor_id())))
local = 1;
ptep = find_linux_pte(pgdir, ea); ptep = find_linux_pte(pgdir, ea);
ret = __hash_page(ea, access, vsid, ptep, trap); ret = __hash_page(ea, access, vsid, ptep, trap, local);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
return ret; return ret;
......
...@@ -22,46 +22,6 @@ ...@@ -22,46 +22,6 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/tlb.h> #include <asm/tlb.h>
/*
* Create a pte. Used during initialization only.
* We assume the PTE will fit in the primary PTEG.
*/
void pSeries_make_pte(HPTE *htab, unsigned long va, unsigned long pa,
int mode, unsigned long hash_mask, int large)
{
HPTE *hptep;
unsigned long hash, i;
unsigned long vpn;
if (large)
vpn = va >> LARGE_PAGE_SHIFT;
else
vpn = va >> PAGE_SHIFT;
hash = hpt_hash(vpn, large);
hptep = htab + ((hash & hash_mask)*HPTES_PER_GROUP);
for (i = 0; i < 8; ++i, ++hptep) {
if (hptep->dw0.dw0.v == 0) { /* !valid */
hptep->dw1.dword1 = pa | mode;
hptep->dw0.dword0 = 0;
hptep->dw0.dw0.avpn = va >> 23;
hptep->dw0.dw0.bolted = 1; /* bolted */
if (large) {
hptep->dw0.dw0.l = 1;
hptep->dw0.dw0.avpn &= ~0x1UL;
}
hptep->dw0.dw0.v = 1; /* make valid */
return;
}
}
/* We should _never_ get here and too early to call xmon. */
while(1)
;
}
#define HPTE_LOCK_BIT 3 #define HPTE_LOCK_BIT 3
static inline void pSeries_lock_hpte(HPTE *hptep) static inline void pSeries_lock_hpte(HPTE *hptep)
...@@ -72,7 +32,7 @@ static inline void pSeries_lock_hpte(HPTE *hptep) ...@@ -72,7 +32,7 @@ static inline void pSeries_lock_hpte(HPTE *hptep)
if (!test_and_set_bit(HPTE_LOCK_BIT, word)) if (!test_and_set_bit(HPTE_LOCK_BIT, word))
break; break;
while(test_bit(HPTE_LOCK_BIT, word)) while(test_bit(HPTE_LOCK_BIT, word))
barrier(); cpu_relax();
} }
} }
...@@ -86,11 +46,10 @@ static inline void pSeries_unlock_hpte(HPTE *hptep) ...@@ -86,11 +46,10 @@ static inline void pSeries_unlock_hpte(HPTE *hptep)
static spinlock_t pSeries_tlbie_lock = SPIN_LOCK_UNLOCKED; static spinlock_t pSeries_tlbie_lock = SPIN_LOCK_UNLOCKED;
static long pSeries_insert_hpte(unsigned long hpte_group, unsigned long vpn, long pSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
unsigned long prpn, int secondary, unsigned long prpn, int secondary,
unsigned long hpteflags, int bolted, int large) unsigned long hpteflags, int bolted, int large)
{ {
unsigned long avpn = vpn >> 11;
unsigned long arpn = physRpn_to_absRpn(prpn); unsigned long arpn = physRpn_to_absRpn(prpn);
HPTE *hptep = htab_data.htab + hpte_group; HPTE *hptep = htab_data.htab + hpte_group;
Hpte_dword0 dw0; Hpte_dword0 dw0;
...@@ -120,13 +79,15 @@ static long pSeries_insert_hpte(unsigned long hpte_group, unsigned long vpn, ...@@ -120,13 +79,15 @@ static long pSeries_insert_hpte(unsigned long hpte_group, unsigned long vpn,
lhpte.dw1.flags.flags = hpteflags; lhpte.dw1.flags.flags = hpteflags;
lhpte.dw0.dword0 = 0; lhpte.dw0.dword0 = 0;
lhpte.dw0.dw0.avpn = avpn; lhpte.dw0.dw0.avpn = va >> 23;
lhpte.dw0.dw0.h = secondary; lhpte.dw0.dw0.h = secondary;
lhpte.dw0.dw0.bolted = bolted; lhpte.dw0.dw0.bolted = bolted;
lhpte.dw0.dw0.v = 1; lhpte.dw0.dw0.v = 1;
if (large) if (large) {
lhpte.dw0.dw0.l = 1; lhpte.dw0.dw0.l = 1;
lhpte.dw0.dw0.avpn &= ~0x1UL;
}
hptep->dw1.dword1 = lhpte.dw1.dword1; hptep->dw1.dword1 = lhpte.dw1.dword1;
...@@ -144,23 +105,16 @@ static long pSeries_insert_hpte(unsigned long hpte_group, unsigned long vpn, ...@@ -144,23 +105,16 @@ static long pSeries_insert_hpte(unsigned long hpte_group, unsigned long vpn,
return i; return i;
} }
static long pSeries_remove_hpte(unsigned long hpte_group) static long pSeries_hpte_remove(unsigned long hpte_group)
{ {
HPTE *hptep; HPTE *hptep;
Hpte_dword0 dw0; Hpte_dword0 dw0;
int i; int i;
int slot_offset; int slot_offset;
unsigned long vsid, group, pi, pi_high;
unsigned long slot;
unsigned long flags;
int large;
unsigned long va;
/* pick a random slot to start at */ /* pick a random entry to start at */
slot_offset = mftb() & 0x7; slot_offset = mftb() & 0x7;
udbg_printf("remove_hpte in %d\n", slot_offset);
for (i = 0; i < HPTES_PER_GROUP; i++) { for (i = 0; i < HPTES_PER_GROUP; i++) {
hptep = htab_data.htab + hpte_group + slot_offset; hptep = htab_data.htab + hpte_group + slot_offset;
dw0 = hptep->dw0.dw0; dw0 = hptep->dw0.dw0;
...@@ -181,30 +135,9 @@ static long pSeries_remove_hpte(unsigned long hpte_group) ...@@ -181,30 +135,9 @@ static long pSeries_remove_hpte(unsigned long hpte_group)
if (i == HPTES_PER_GROUP) if (i == HPTES_PER_GROUP)
return -1; return -1;
large = dw0.l;
/* Invalidate the hpte. NOTE: this also unlocks it */ /* Invalidate the hpte. NOTE: this also unlocks it */
hptep->dw0.dword0 = 0; hptep->dw0.dword0 = 0;
/* Invalidate the tlb */
vsid = dw0.avpn >> 5;
slot = hptep - htab_data.htab;
group = slot >> 3;
if (dw0.h)
group = ~group;
pi = (vsid ^ group) & 0x7ff;
pi_high = (dw0.avpn & 0x1f) << 11;
pi |= pi_high;
if (large)
va = pi << LARGE_PAGE_SHIFT;
else
va = pi << PAGE_SHIFT;
spin_lock_irqsave(&pSeries_tlbie_lock, flags);
_tlbie(va, large);
spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
return i; return i;
} }
...@@ -259,41 +192,40 @@ static long pSeries_hpte_find(unsigned long vpn) ...@@ -259,41 +192,40 @@ static long pSeries_hpte_find(unsigned long vpn)
} }
static long pSeries_hpte_updatepp(unsigned long slot, unsigned long newpp, static long pSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned long va, int large) unsigned long va, int large, int local)
{ {
HPTE *hptep = htab_data.htab + slot; HPTE *hptep = htab_data.htab + slot;
Hpte_dword0 dw0; Hpte_dword0 dw0;
unsigned long vpn, avpn; unsigned long avpn = va >> 23;
unsigned long flags; unsigned long flags;
int ret = 0;
if (large) if (large)
vpn = va >> LARGE_PAGE_SHIFT; avpn &= ~0x1UL;
else
vpn = va >> PAGE_SHIFT;
avpn = vpn >> 11;
pSeries_lock_hpte(hptep); pSeries_lock_hpte(hptep);
dw0 = hptep->dw0.dw0; dw0 = hptep->dw0.dw0;
/* Even if we miss, we need to invalidate the TLB */
if ((dw0.avpn != avpn) || !dw0.v) { if ((dw0.avpn != avpn) || !dw0.v) {
pSeries_unlock_hpte(hptep); pSeries_unlock_hpte(hptep);
udbg_printf("updatepp missed\n"); ret = -1;
return -1; } else {
set_pp_bit(newpp, hptep);
pSeries_unlock_hpte(hptep);
} }
set_pp_bit(newpp, hptep);
pSeries_unlock_hpte(hptep);
/* Ensure it is out of the tlb too */ /* Ensure it is out of the tlb too */
/* XXX use tlbiel where possible */ if (cpu_has_tlbiel() && !large && local) {
spin_lock_irqsave(&pSeries_tlbie_lock, flags); _tlbiel(va);
_tlbie(va, large); } else {
spin_unlock_irqrestore(&pSeries_tlbie_lock, flags); spin_lock_irqsave(&pSeries_tlbie_lock, flags);
_tlbie(va, large);
spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
}
return 0; return ret;
} }
/* /*
...@@ -322,7 +254,6 @@ static void pSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea) ...@@ -322,7 +254,6 @@ static void pSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
set_pp_bit(newpp, hptep); set_pp_bit(newpp, hptep);
/* Ensure it is out of the tlb too */ /* Ensure it is out of the tlb too */
/* XXX use tlbiel where possible */
spin_lock_irqsave(&pSeries_tlbie_lock, flags); spin_lock_irqsave(&pSeries_tlbie_lock, flags);
_tlbie(va, 0); _tlbie(va, 0);
spin_unlock_irqrestore(&pSeries_tlbie_lock, flags); spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
...@@ -333,29 +264,24 @@ static void pSeries_hpte_invalidate(unsigned long slot, unsigned long va, ...@@ -333,29 +264,24 @@ static void pSeries_hpte_invalidate(unsigned long slot, unsigned long va,
{ {
HPTE *hptep = htab_data.htab + slot; HPTE *hptep = htab_data.htab + slot;
Hpte_dword0 dw0; Hpte_dword0 dw0;
unsigned long vpn, avpn; unsigned long avpn = va >> 23;
unsigned long flags; unsigned long flags;
if (large) if (large)
vpn = va >> LARGE_PAGE_SHIFT; avpn &= ~0x1UL;
else
vpn = va >> PAGE_SHIFT;
avpn = vpn >> 11;
pSeries_lock_hpte(hptep); pSeries_lock_hpte(hptep);
dw0 = hptep->dw0.dw0; dw0 = hptep->dw0.dw0;
/* Even if we miss, we need to invalidate the TLB */
if ((dw0.avpn != avpn) || !dw0.v) { if ((dw0.avpn != avpn) || !dw0.v) {
pSeries_unlock_hpte(hptep); pSeries_unlock_hpte(hptep);
udbg_printf("invalidate missed\n"); } else {
return; /* Invalidate the hpte. NOTE: this also unlocks it */
hptep->dw0.dword0 = 0;
} }
/* Invalidate the hpte. NOTE: this also unlocks it */
hptep->dw0.dword0 = 0;
/* Invalidate the tlb */ /* Invalidate the tlb */
if (cpu_has_tlbiel() && !large && local) { if (cpu_has_tlbiel() && !large && local) {
_tlbiel(va); _tlbiel(va);
...@@ -374,6 +300,7 @@ static void pSeries_flush_hash_range(unsigned long context, ...@@ -374,6 +300,7 @@ static void pSeries_flush_hash_range(unsigned long context,
HPTE *hptep; HPTE *hptep;
Hpte_dword0 dw0; Hpte_dword0 dw0;
struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[smp_processor_id()]; struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[smp_processor_id()];
/* XXX fix for large ptes */ /* XXX fix for large ptes */
unsigned long large = 0; unsigned long large = 0;
...@@ -399,22 +326,24 @@ static void pSeries_flush_hash_range(unsigned long context, ...@@ -399,22 +326,24 @@ static void pSeries_flush_hash_range(unsigned long context,
slot += (pte_val(batch->pte[i]) & _PAGE_GROUP_IX) >> 12; slot += (pte_val(batch->pte[i]) & _PAGE_GROUP_IX) >> 12;
hptep = htab_data.htab + slot; hptep = htab_data.htab + slot;
avpn = vpn >> 11;
avpn = va >> 23;
if (large)
avpn &= ~0x1UL;
pSeries_lock_hpte(hptep); pSeries_lock_hpte(hptep);
dw0 = hptep->dw0.dw0; dw0 = hptep->dw0.dw0;
/* Even if we miss, we need to invalidate the TLB */
if ((dw0.avpn != avpn) || !dw0.v) { if ((dw0.avpn != avpn) || !dw0.v) {
pSeries_unlock_hpte(hptep); pSeries_unlock_hpte(hptep);
udbg_printf("invalidate missed\n"); } else {
continue; /* Invalidate the hpte. NOTE: this also unlocks it */
hptep->dw0.dword0 = 0;
} }
j++; j++;
/* Invalidate the hpte. NOTE: this also unlocks it */
hptep->dw0.dword0 = 0;
} }
if (cpu_has_tlbiel() && !large && local) { if (cpu_has_tlbiel() && !large && local) {
...@@ -455,9 +384,8 @@ void hpte_init_pSeries(void) ...@@ -455,9 +384,8 @@ void hpte_init_pSeries(void)
ppc_md.hpte_invalidate = pSeries_hpte_invalidate; ppc_md.hpte_invalidate = pSeries_hpte_invalidate;
ppc_md.hpte_updatepp = pSeries_hpte_updatepp; ppc_md.hpte_updatepp = pSeries_hpte_updatepp;
ppc_md.hpte_updateboltedpp = pSeries_hpte_updateboltedpp; ppc_md.hpte_updateboltedpp = pSeries_hpte_updateboltedpp;
ppc_md.insert_hpte = pSeries_insert_hpte; ppc_md.hpte_insert = pSeries_hpte_insert;
ppc_md.remove_hpte = pSeries_remove_hpte; ppc_md.hpte_remove = pSeries_hpte_remove;
ppc_md.make_pte = pSeries_make_pte;
/* Disable TLB batching on nighthawk */ /* Disable TLB batching on nighthawk */
root = find_path_device("/"); root = find_path_device("/");
......
...@@ -403,67 +403,11 @@ int hvc_count(int *start_termno) ...@@ -403,67 +403,11 @@ int hvc_count(int *start_termno)
long pSeries_lpar_hpte_insert(unsigned long hpte_group,
/* unsigned long va, unsigned long prpn,
* Create a pte - LPAR . Used during initialization only. int secondary, unsigned long hpteflags,
* We assume the PTE will fit in the primary PTEG. int bolted, int large)
*/
void pSeries_lpar_make_pte(HPTE *htab, unsigned long va, unsigned long pa,
int mode, unsigned long hash_mask, int large)
{
HPTE local_hpte;
unsigned long hash, slot, flags, lpar_rc, vpn;
unsigned long dummy1, dummy2;
if (large)
vpn = va >> LARGE_PAGE_SHIFT;
else
vpn = va >> PAGE_SHIFT;
hash = hpt_hash(vpn, large);
slot = ((hash & hash_mask)*HPTES_PER_GROUP);
local_hpte.dw1.dword1 = pa | mode;
local_hpte.dw0.dword0 = 0;
local_hpte.dw0.dw0.avpn = va >> 23;
local_hpte.dw0.dw0.bolted = 1; /* bolted */
if (large) {
local_hpte.dw0.dw0.l = 1; /* large page */
local_hpte.dw0.dw0.avpn &= ~0x1UL;
}
local_hpte.dw0.dw0.v = 1;
/* Set CEC cookie to 0 */
/* Zero page = 0 */
/* I-cache Invalidate = 0 */
/* I-cache synchronize = 0 */
/* Exact = 0 - modify any entry in group */
flags = 0;
lpar_rc = plpar_pte_enter(flags, slot, local_hpte.dw0.dword0,
local_hpte.dw1.dword1, &dummy1, &dummy2);
if (lpar_rc == H_PTEG_Full) {
while(1)
;
}
/*
* NOTE: we explicitly do not check return status here because it is
* "normal" for early boot code to map io regions for which a partition
* has no access. However, we will die if we actually fault on these
* "permission denied" pages.
*/
}
static long pSeries_lpar_insert_hpte(unsigned long hpte_group,
unsigned long vpn, unsigned long prpn,
int secondary, unsigned long hpteflags,
int bolted, int large)
{ {
/* XXX fix for large page */
unsigned long avpn = vpn >> 11;
unsigned long arpn = physRpn_to_absRpn(prpn); unsigned long arpn = physRpn_to_absRpn(prpn);
unsigned long lpar_rc; unsigned long lpar_rc;
unsigned long flags; unsigned long flags;
...@@ -476,13 +420,15 @@ static long pSeries_lpar_insert_hpte(unsigned long hpte_group, ...@@ -476,13 +420,15 @@ static long pSeries_lpar_insert_hpte(unsigned long hpte_group,
lhpte.dw1.flags.flags = hpteflags; lhpte.dw1.flags.flags = hpteflags;
lhpte.dw0.dword0 = 0; lhpte.dw0.dword0 = 0;
lhpte.dw0.dw0.avpn = avpn; lhpte.dw0.dw0.avpn = va >> 23;
lhpte.dw0.dw0.h = secondary; lhpte.dw0.dw0.h = secondary;
lhpte.dw0.dw0.bolted = bolted; lhpte.dw0.dw0.bolted = bolted;
lhpte.dw0.dw0.v = 1; lhpte.dw0.dw0.v = 1;
if (large) if (large) {
lhpte.dw0.dw0.l = 1; lhpte.dw0.dw0.l = 1;
lhpte.dw0.dw0.avpn &= ~0x1UL;
}
/* Now fill in the actual HPTE */ /* Now fill in the actual HPTE */
/* Set CEC cookie to 0 */ /* Set CEC cookie to 0 */
...@@ -522,7 +468,7 @@ static long pSeries_lpar_insert_hpte(unsigned long hpte_group, ...@@ -522,7 +468,7 @@ static long pSeries_lpar_insert_hpte(unsigned long hpte_group,
static spinlock_t pSeries_lpar_tlbie_lock = SPIN_LOCK_UNLOCKED; static spinlock_t pSeries_lpar_tlbie_lock = SPIN_LOCK_UNLOCKED;
static long pSeries_lpar_remove_hpte(unsigned long hpte_group) static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
{ {
unsigned long slot_offset; unsigned long slot_offset;
unsigned long lpar_rc; unsigned long lpar_rc;
...@@ -559,12 +505,15 @@ static long pSeries_lpar_remove_hpte(unsigned long hpte_group) ...@@ -559,12 +505,15 @@ static long pSeries_lpar_remove_hpte(unsigned long hpte_group)
* already zero. For now I am paranoid. * already zero. For now I am paranoid.
*/ */
static long pSeries_lpar_hpte_updatepp(unsigned long slot, unsigned long newpp, static long pSeries_lpar_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned long va, int large) unsigned long va, int large, int local)
{ {
unsigned long lpar_rc; unsigned long lpar_rc;
unsigned long flags = (newpp & 7) | H_AVPN; unsigned long flags = (newpp & 7) | H_AVPN;
unsigned long avpn = va >> 23; unsigned long avpn = va >> 23;
if (large)
avpn &= ~0x1UL;
lpar_rc = plpar_pte_protect(flags, slot, (avpn << 7)); lpar_rc = plpar_pte_protect(flags, slot, (avpn << 7));
if (lpar_rc == H_Not_Found) { if (lpar_rc == H_Not_Found) {
...@@ -662,6 +611,9 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va, ...@@ -662,6 +611,9 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
unsigned long lpar_rc; unsigned long lpar_rc;
unsigned long dummy1, dummy2; unsigned long dummy1, dummy2;
if (large)
avpn &= ~0x1UL;
lpar_rc = plpar_pte_remove(H_AVPN, slot, (avpn << 7), &dummy1, lpar_rc = plpar_pte_remove(H_AVPN, slot, (avpn << 7), &dummy1,
&dummy2); &dummy2);
...@@ -695,11 +647,10 @@ void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number, ...@@ -695,11 +647,10 @@ void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number,
void pSeries_lpar_mm_init(void) void pSeries_lpar_mm_init(void)
{ {
ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate; ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate;
ppc_md.hpte_updatepp = pSeries_lpar_hpte_updatepp; ppc_md.hpte_updatepp = pSeries_lpar_hpte_updatepp;
ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp; ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
ppc_md.insert_hpte = pSeries_lpar_insert_hpte; ppc_md.hpte_insert = pSeries_lpar_hpte_insert;
ppc_md.remove_hpte = pSeries_lpar_remove_hpte; ppc_md.hpte_remove = pSeries_lpar_hpte_remove;
ppc_md.make_pte = pSeries_lpar_make_pte;
ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range; ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range;
} }
...@@ -116,8 +116,6 @@ udbg_puts(const char *s) ...@@ -116,8 +116,6 @@ udbg_puts(const char *s)
if (s && *s != '\0') { if (s && *s != '\0') {
while ((c = *s++) != '\0') while ((c = *s++) != '\0')
ppc_md.udbg_putc(c); ppc_md.udbg_putc(c);
} else {
udbg_puts("NULL");
} }
} else { } else {
printk("%s", s); printk("%s", s);
...@@ -135,8 +133,7 @@ udbg_write(const char *s, int n) ...@@ -135,8 +133,7 @@ udbg_write(const char *s, int n)
while ( (( c = *s++ ) != '\0') && (remain-- > 0)) { while ( (( c = *s++ ) != '\0') && (remain-- > 0)) {
ppc_md.udbg_putc(c); ppc_md.udbg_putc(c);
} }
} else }
udbg_puts("NULL");
return n - remain; return n - remain;
} }
......
...@@ -220,16 +220,26 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags) ...@@ -220,16 +220,26 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
set_pte(ptep, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); set_pte(ptep, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags)));
spin_unlock(&ioremap_mm.page_table_lock); spin_unlock(&ioremap_mm.page_table_lock);
} else { } else {
/* If the mm subsystem is not fully up, we cannot create a unsigned long va, vpn, hash, hpteg;
/*
* If the mm subsystem is not fully up, we cannot create a
* linux page table entry for this mapping. Simply bolt an * linux page table entry for this mapping. Simply bolt an
* entry in the hardware page table. * entry in the hardware page table.
*/ */
vsid = get_kernel_vsid(ea); vsid = get_kernel_vsid(ea);
ppc_md.make_pte(htab_data.htab, va = (vsid << 28) | (ea & 0xFFFFFFF);
(vsid << 28) | (ea & 0xFFFFFFF), // va (NOT the ea) vpn = va >> PAGE_SHIFT;
pa,
_PAGE_NO_CACHE | _PAGE_GUARDED | PP_RWXX, hash = hpt_hash(vpn, 0);
htab_data.htab_hash_mask, 0);
hpteg = ((hash & htab_data.htab_hash_mask)*HPTES_PER_GROUP);
if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT, 0,
_PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX,
1, 0) == -1) {
panic("map_io_page: could not insert mapping");
}
} }
} }
...@@ -649,7 +659,7 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, ...@@ -649,7 +659,7 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
extern pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea); extern pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea);
int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid, int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
pte_t *ptep, unsigned long trap); pte_t *ptep, unsigned long trap, int local);
/* /*
* This is called at the end of handling a user page fault, when the * This is called at the end of handling a user page fault, when the
...@@ -665,6 +675,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea, ...@@ -665,6 +675,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
unsigned long vsid; unsigned long vsid;
void *pgdir; void *pgdir;
pte_t *ptep; pte_t *ptep;
int local = 0;
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
if (!pte_young(pte)) if (!pte_young(pte))
...@@ -677,6 +688,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea, ...@@ -677,6 +688,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
ptep = find_linux_pte(pgdir, ea); ptep = find_linux_pte(pgdir, ea);
vsid = get_vsid(vma->vm_mm->context, ea); vsid = get_vsid(vma->vm_mm->context, ea);
if (vma->vm_mm->cpu_vm_mask == (1 << smp_processor_id()))
local = 1;
__hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep, __hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
0x300); 0x300, local);
} }
...@@ -642,6 +642,11 @@ device_initcall(pci_init); ...@@ -642,6 +642,11 @@ device_initcall(pci_init);
__setup("pci=", pci_setup); __setup("pci=", pci_setup);
#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
struct pci_dev *isa_bridge;
EXPORT_SYMBOL(isa_bridge);
#endif
EXPORT_SYMBOL(pci_enable_device); EXPORT_SYMBOL(pci_enable_device);
EXPORT_SYMBOL(pci_disable_device); EXPORT_SYMBOL(pci_disable_device);
EXPORT_SYMBOL(pci_find_capability); EXPORT_SYMBOL(pci_find_capability);
......
...@@ -1473,8 +1473,8 @@ config ZISOFS_FS ...@@ -1473,8 +1473,8 @@ config ZISOFS_FS
config FS_MBCACHE config FS_MBCACHE
tristate tristate
depends on EXT2_FS_XATTR || EXT3_FS_XATTR depends on EXT2_FS_XATTR || EXT3_FS_XATTR
default m if EXT2_FS=m || EXT3_FS=m
default y if EXT2_FS=y || EXT3_FS=y default y if EXT2_FS=y || EXT3_FS=y
default m if EXT2_FS=m || EXT3_FS=m
# Posix ACL utility routines (for now, only ext2/ext3/jfs) # Posix ACL utility routines (for now, only ext2/ext3/jfs)
config FS_POSIX_ACL config FS_POSIX_ACL
......
...@@ -23,14 +23,6 @@ kafs-objs := \ ...@@ -23,14 +23,6 @@ kafs-objs := \
vnode.o \ vnode.o \
volume.o volume.o
# cache.o obj-$(CONFIG_AFS_FS) := kafs.o
obj-m := kafs.o
# superfluous for 2.5, but needed for 2.4..
ifeq "$(VERSION).$(PATCHLEVEL)" "2.4"
kafs.o: $(kafs-objs)
$(LD) -r -o kafs.o $(kafs-objs)
endif
include $(TOPDIR)/Rules.make include $(TOPDIR)/Rules.make
...@@ -38,19 +38,17 @@ struct file_operations afs_dir_file_operations = { ...@@ -38,19 +38,17 @@ struct file_operations afs_dir_file_operations = {
struct inode_operations afs_dir_inode_operations = { struct inode_operations afs_dir_inode_operations = {
.lookup = afs_dir_lookup, .lookup = afs_dir_lookup,
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
.getattr = afs_inode_getattr, .getattr = afs_inode_getattr,
#else #if 0 /* TODO */
.revalidate = afs_inode_revalidate, .create = afs_dir_create,
.link = afs_dir_link,
.unlink = afs_dir_unlink,
.symlink = afs_dir_symlink,
.mkdir = afs_dir_mkdir,
.rmdir = afs_dir_rmdir,
.mknod = afs_dir_mknod,
.rename = afs_dir_rename,
#endif #endif
// .create = afs_dir_create,
// .link = afs_dir_link,
// .unlink = afs_dir_unlink,
// .symlink = afs_dir_symlink,
// .mkdir = afs_dir_mkdir,
// .rmdir = afs_dir_rmdir,
// .mknod = afs_dir_mknod,
// .rename = afs_dir_rename,
}; };
static struct dentry_operations afs_fs_dentry_operations = { static struct dentry_operations afs_fs_dentry_operations = {
...@@ -250,7 +248,7 @@ static int afs_dir_iterate_block(unsigned *fpos, ...@@ -250,7 +248,7 @@ static int afs_dir_iterate_block(unsigned *fpos,
/* skip entries marked unused in the bitmap */ /* skip entries marked unused in the bitmap */
if (!(block->pagehdr.bitmap[offset/8] & (1 << (offset % 8)))) { if (!(block->pagehdr.bitmap[offset/8] & (1 << (offset % 8)))) {
_debug("ENT[%u.%u]: unused\n",blkoff/sizeof(afs_dir_block_t),offset); _debug("ENT[%Zu.%u]: unused\n",blkoff/sizeof(afs_dir_block_t),offset);
if (offset>=curr) if (offset>=curr)
*fpos = blkoff + next * sizeof(afs_dirent_t); *fpos = blkoff + next * sizeof(afs_dirent_t);
continue; continue;
...@@ -260,26 +258,26 @@ static int afs_dir_iterate_block(unsigned *fpos, ...@@ -260,26 +258,26 @@ static int afs_dir_iterate_block(unsigned *fpos,
dire = &block->dirents[offset]; dire = &block->dirents[offset];
nlen = strnlen(dire->parts.name,sizeof(*block) - offset*sizeof(afs_dirent_t)); nlen = strnlen(dire->parts.name,sizeof(*block) - offset*sizeof(afs_dirent_t));
_debug("ENT[%u.%u]: %s %u \"%.*s\"\n", _debug("ENT[%Zu.%u]: %s %Zu \"%s\"\n",
blkoff/sizeof(afs_dir_block_t),offset, blkoff/sizeof(afs_dir_block_t),offset,
offset<curr ? "skip" : "fill", (offset<curr ? "skip" : "fill"),
nlen,nlen,dire->name); nlen,dire->u.name);
/* work out where the next possible entry is */ /* work out where the next possible entry is */
for (tmp=nlen; tmp>15; tmp-=sizeof(afs_dirent_t)) { for (tmp=nlen; tmp>15; tmp-=sizeof(afs_dirent_t)) {
if (next>=AFS_DIRENT_PER_BLOCK) { if (next>=AFS_DIRENT_PER_BLOCK) {
_debug("ENT[%u.%u]:" _debug("ENT[%Zu.%u]:"
" %u travelled beyond end dir block (len %u/%u)\n", " %u travelled beyond end dir block (len %u/%Zu)\n",
blkoff/sizeof(afs_dir_block_t),offset,next,tmp,nlen); blkoff/sizeof(afs_dir_block_t),offset,next,tmp,nlen);
return -EIO; return -EIO;
} }
if (!(block->pagehdr.bitmap[next/8] & (1 << (next % 8)))) { if (!(block->pagehdr.bitmap[next/8] & (1 << (next % 8)))) {
_debug("ENT[%u.%u]: %u unmarked extension (len %u/%u)\n", _debug("ENT[%Zu.%u]: %u unmarked extension (len %u/%Zu)\n",
blkoff/sizeof(afs_dir_block_t),offset,next,tmp,nlen); blkoff/sizeof(afs_dir_block_t),offset,next,tmp,nlen);
return -EIO; return -EIO;
} }
_debug("ENT[%u.%u]: ext %u/%u\n", _debug("ENT[%Zu.%u]: ext %u/%Zu\n",
blkoff/sizeof(afs_dir_block_t),next,tmp,nlen); blkoff/sizeof(afs_dir_block_t),next,tmp,nlen);
next++; next++;
} }
...@@ -397,7 +395,7 @@ static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen, lof ...@@ -397,7 +395,7 @@ static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen, lof
{ {
struct afs_dir_lookup_cookie *cookie = _cookie; struct afs_dir_lookup_cookie *cookie = _cookie;
_enter("{%s,%u},%s,%u,,%lu,%u",cookie->name,cookie->nlen,name,nlen,ino,ntohl(dtype)); _enter("{%s,%Zu},%s,%u,,%lu,%u",cookie->name,cookie->nlen,name,nlen,ino,ntohl(dtype));
if (cookie->nlen != nlen || memcmp(cookie->name,name,nlen)!=0) { if (cookie->nlen != nlen || memcmp(cookie->name,name,nlen)!=0) {
_leave(" = 0 [no]"); _leave(" = 0 [no]");
...@@ -471,7 +469,7 @@ static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry) ...@@ -471,7 +469,7 @@ static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry)
} }
dentry->d_op = &afs_fs_dentry_operations; dentry->d_op = &afs_fs_dentry_operations;
dentry->d_fsdata = (void*) (unsigned) vnode->status.version; dentry->d_fsdata = (void*) (unsigned long) vnode->status.version;
d_add(dentry,inode); d_add(dentry,inode);
_leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%lu }", _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%lu }",
...@@ -500,15 +498,9 @@ static int afs_d_revalidate(struct dentry *dentry, int flags) ...@@ -500,15 +498,9 @@ static int afs_d_revalidate(struct dentry *dentry, int flags)
_enter("%s,%x",dentry->d_name.name,flags); _enter("%s,%x",dentry->d_name.name,flags);
/* lock down the parent dentry so we can peer at it */ /* lock down the parent dentry so we can peer at it */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
read_lock(&dparent_lock); read_lock(&dparent_lock);
parent = dget(dentry->d_parent); parent = dget(dentry->d_parent);
read_unlock(&dparent_lock); read_unlock(&dparent_lock);
#else
lock_kernel();
parent = dget(dentry->d_parent);
unlock_kernel();
#endif
dir = parent->d_inode; dir = parent->d_inode;
inode = dentry->d_inode; inode = dentry->d_inode;
...@@ -541,10 +533,10 @@ static int afs_d_revalidate(struct dentry *dentry, int flags) ...@@ -541,10 +533,10 @@ static int afs_d_revalidate(struct dentry *dentry, int flags)
goto out_bad; goto out_bad;
} }
if ((unsigned)dentry->d_fsdata != (unsigned)AFS_FS_I(dir)->status.version) { if ((unsigned long)dentry->d_fsdata != (unsigned long)AFS_FS_I(dir)->status.version) {
_debug("%s: parent changed %u -> %u", _debug("%s: parent changed %lu -> %u",
dentry->d_name.name, dentry->d_name.name,
(unsigned)dentry->d_fsdata, (unsigned long)dentry->d_fsdata,
(unsigned)AFS_FS_I(dir)->status.version); (unsigned)AFS_FS_I(dir)->status.version);
/* search the directory for this vnode */ /* search the directory for this vnode */
...@@ -585,7 +577,7 @@ static int afs_d_revalidate(struct dentry *dentry, int flags) ...@@ -585,7 +577,7 @@ static int afs_d_revalidate(struct dentry *dentry, int flags)
goto out_bad; goto out_bad;
} }
dentry->d_fsdata = (void*) (unsigned) AFS_FS_I(dir)->status.version; dentry->d_fsdata = (void*) (unsigned long) AFS_FS_I(dir)->status.version;
} }
out_valid: out_valid:
......
...@@ -9,8 +9,8 @@ ...@@ -9,8 +9,8 @@
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
*/ */
#ifndef _H_DB712916_5113_11D6_9A6D_0002B3163499 #ifndef _LINUX_AFS_ERRORS_H
#define _H_DB712916_5113_11D6_9A6D_0002B3163499 #define _LINUX_AFS_ERRORS_H
#include "types.h" #include "types.h"
...@@ -31,4 +31,4 @@ typedef enum { ...@@ -31,4 +31,4 @@ typedef enum {
extern int afs_abort_to_error(int abortcode); extern int afs_abort_to_error(int abortcode);
#endif /* _H_DB712916_5113_11D6_9A6D_0002B3163499 */ #endif /* _LINUX_AFS_ERRORS_H */
...@@ -21,53 +21,34 @@ ...@@ -21,53 +21,34 @@
#include <rxrpc/call.h> #include <rxrpc/call.h>
#include "internal.h" #include "internal.h"
//static int afs_file_open(struct inode *inode, struct file *file); #if 0
//static int afs_file_release(struct inode *inode, struct file *file); static int afs_file_open(struct inode *inode, struct file *file);
static int afs_file_release(struct inode *inode, struct file *file);
#endif
static int afs_file_readpage(struct file *file, struct page *page); static int afs_file_readpage(struct file *file, struct page *page);
//static ssize_t afs_file_read(struct file *file, char *buf, size_t size, loff_t *off);
static ssize_t afs_file_write(struct file *file, const char *buf, size_t size, loff_t *off); static ssize_t afs_file_write(struct file *file, const char *buf, size_t size, loff_t *off);
struct inode_operations afs_file_inode_operations = { struct inode_operations afs_file_inode_operations = {
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
.getattr = afs_inode_getattr, .getattr = afs_inode_getattr,
#else
.revalidate = afs_inode_revalidate,
#endif
}; };
struct file_operations afs_file_file_operations = { struct file_operations afs_file_file_operations = {
// .open = afs_file_open, .read = generic_file_read,
// .release = afs_file_release,
.read = generic_file_read, //afs_file_read,
.write = afs_file_write, .write = afs_file_write,
.mmap = generic_file_mmap, .mmap = generic_file_mmap,
// .fsync = afs_file_fsync, #if 0
.open = afs_file_open,
.release = afs_file_release,
.fsync = afs_file_fsync,
#endif
}; };
struct address_space_operations afs_fs_aops = { struct address_space_operations afs_fs_aops = {
.readpage = afs_file_readpage, .readpage = afs_file_readpage,
}; };
/*****************************************************************************/
/*
* AFS file read
*/
#if 0
static ssize_t afs_file_read(struct file *file, char *buf, size_t size, loff_t *off)
{
struct afs_inode_info *ai;
ai = AFS_FS_I(file->f_dentry->d_inode);
if (ai->flags & AFS_INODE_DELETED)
return -ESTALE;
return -EIO;
} /* end afs_file_read() */
#endif
/*****************************************************************************/ /*****************************************************************************/
/* /*
* AFS file write * AFS file write
......
...@@ -426,7 +426,7 @@ int afs_rxfs_fetch_file_data(afs_server_t *server, ...@@ -426,7 +426,7 @@ int afs_rxfs_fetch_file_data(afs_server_t *server,
int ret; int ret;
u32 *bp; u32 *bp;
_enter("%p,{fid={%u,%u,%u},sz=%u,of=%lu}", _enter("%p,{fid={%u,%u,%u},sz=%Zu,of=%lu}",
server, server,
desc->fid.vid, desc->fid.vid,
desc->fid.vnode, desc->fid.vnode,
......
...@@ -28,9 +28,6 @@ ...@@ -28,9 +28,6 @@
struct afs_iget_data { struct afs_iget_data {
afs_fid_t fid; afs_fid_t fid;
afs_volume_t *volume; /* volume on which resides */ afs_volume_t *volume; /* volume on which resides */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
afs_vnode_t *new_vnode; /* new vnode record */
#endif
}; };
/*****************************************************************************/ /*****************************************************************************/
...@@ -41,7 +38,7 @@ static int afs_inode_map_status(afs_vnode_t *vnode) ...@@ -41,7 +38,7 @@ static int afs_inode_map_status(afs_vnode_t *vnode)
{ {
struct inode *inode = AFS_VNODE_TO_I(vnode); struct inode *inode = AFS_VNODE_TO_I(vnode);
_debug("FS: ft=%d lk=%d sz=%u ver=%Lu mod=%hu", _debug("FS: ft=%d lk=%d sz=%Zu ver=%Lu mod=%hu",
vnode->status.type, vnode->status.type,
vnode->status.nlink, vnode->status.nlink,
vnode->status.size, vnode->status.size,
...@@ -117,7 +114,6 @@ int afs_inode_fetch_status(struct inode *inode) ...@@ -117,7 +114,6 @@ int afs_inode_fetch_status(struct inode *inode)
/* /*
* iget5() comparator * iget5() comparator
*/ */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
static int afs_iget5_test(struct inode *inode, void *opaque) static int afs_iget5_test(struct inode *inode, void *opaque)
{ {
struct afs_iget_data *data = opaque; struct afs_iget_data *data = opaque;
...@@ -125,13 +121,11 @@ static int afs_iget5_test(struct inode *inode, void *opaque) ...@@ -125,13 +121,11 @@ static int afs_iget5_test(struct inode *inode, void *opaque)
/* only match inodes with the same version number */ /* only match inodes with the same version number */
return inode->i_ino==data->fid.vnode && inode->i_version==data->fid.unique; return inode->i_ino==data->fid.vnode && inode->i_version==data->fid.unique;
} /* end afs_iget5_test() */ } /* end afs_iget5_test() */
#endif
/*****************************************************************************/ /*****************************************************************************/
/* /*
* iget5() inode initialiser * iget5() inode initialiser
*/ */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
static int afs_iget5_set(struct inode *inode, void *opaque) static int afs_iget5_set(struct inode *inode, void *opaque)
{ {
struct afs_iget_data *data = opaque; struct afs_iget_data *data = opaque;
...@@ -144,71 +138,6 @@ static int afs_iget5_set(struct inode *inode, void *opaque) ...@@ -144,71 +138,6 @@ static int afs_iget5_set(struct inode *inode, void *opaque)
return 0; return 0;
} /* end afs_iget5_set() */ } /* end afs_iget5_set() */
#endif
/*****************************************************************************/
/*
* iget4() comparator
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
static int afs_iget4_test(struct inode *inode, ino_t ino, void *opaque)
{
struct afs_iget_data *data = opaque;
/* only match inodes with the same version number */
return inode->i_ino==data->fid.vnode && inode->i_version==data->fid.unique;
} /* end afs_iget4_test() */
#endif
/*****************************************************************************/
/*
* read an inode (2.4 only)
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
void afs_read_inode2(struct inode *inode, void *opaque)
{
struct afs_iget_data *data = opaque;
afs_vnode_t *vnode;
int ret;
_enter(",{{%u,%u,%u},%p}",data->fid.vid,data->fid.vnode,data->fid.unique,data->volume);
if (inode->u.generic_ip) BUG();
/* attach a pre-allocated vnode record */
inode->u.generic_ip = vnode = data->new_vnode;
data->new_vnode = NULL;
memset(vnode,0,sizeof(*vnode));
vnode->inode = inode;
init_waitqueue_head(&vnode->update_waitq);
spin_lock_init(&vnode->lock);
INIT_LIST_HEAD(&vnode->cb_link);
INIT_LIST_HEAD(&vnode->cb_hash_link);
afs_timer_init(&vnode->cb_timeout,&afs_vnode_cb_timed_out_ops);
vnode->flags |= AFS_VNODE_CHANGED;
vnode->volume = data->volume;
vnode->fid = data->fid;
/* ask the server for a status check */
ret = afs_vnode_fetch_status(vnode);
if (ret<0) {
make_bad_inode(inode);
_leave(" [bad inode]");
return;
}
ret = afs_inode_map_status(vnode);
if (ret<0) {
make_bad_inode(inode);
_leave(" [bad inode]");
return;
}
_leave("");
return;
} /* end afs_read_inode2() */
#endif
/*****************************************************************************/ /*****************************************************************************/
/* /*
...@@ -227,7 +156,6 @@ inline int afs_iget(struct super_block *sb, afs_fid_t *fid, struct inode **_inod ...@@ -227,7 +156,6 @@ inline int afs_iget(struct super_block *sb, afs_fid_t *fid, struct inode **_inod
as = sb->s_fs_info; as = sb->s_fs_info;
data.volume = as->volume; data.volume = as->volume;
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
inode = iget5_locked(sb,fid->vnode,afs_iget5_test,afs_iget5_set,&data); inode = iget5_locked(sb,fid->vnode,afs_iget5_test,afs_iget5_set,&data);
if (!inode) { if (!inode) {
_leave(" = -ENOMEM"); _leave(" = -ENOMEM");
...@@ -253,13 +181,6 @@ inline int afs_iget(struct super_block *sb, afs_fid_t *fid, struct inode **_inod ...@@ -253,13 +181,6 @@ inline int afs_iget(struct super_block *sb, afs_fid_t *fid, struct inode **_inod
if (ret<0) if (ret<0)
goto bad_inode; goto bad_inode;
#if 0
/* find a cache entry for it */
ret = afs_cache_lookup_vnode(as->volume,vnode);
if (ret<0)
goto bad_inode;
#endif
/* success */ /* success */
unlock_new_inode(inode); unlock_new_inode(inode);
...@@ -280,42 +201,12 @@ inline int afs_iget(struct super_block *sb, afs_fid_t *fid, struct inode **_inod ...@@ -280,42 +201,12 @@ inline int afs_iget(struct super_block *sb, afs_fid_t *fid, struct inode **_inod
_leave(" = %d [bad]",ret); _leave(" = %d [bad]",ret);
return ret; return ret;
#else
/* pre-allocate a vnode record so that afs_read_inode2() doesn't have to return an inode
* without one attached
*/
data.new_vnode = kmalloc(sizeof(afs_vnode_t),GFP_KERNEL);
if (!data.new_vnode) {
_leave(" = -ENOMEM");
return -ENOMEM;
}
inode = iget4(sb,fid->vnode,afs_iget4_test,&data);
if (data.new_vnode) kfree(data.new_vnode);
if (!inode) {
_leave(" = -ENOMEM");
return -ENOMEM;
}
vnode = AFS_FS_I(inode);
*_inode = inode;
_leave(" = 0 [CB { v=%u x=%lu t=%u nix=%u }]",
vnode->cb_version,
vnode->cb_timeout.timo_jif,
vnode->cb_type,
vnode->nix
);
return 0;
#endif
} /* end afs_iget() */ } /* end afs_iget() */
/*****************************************************************************/ /*****************************************************************************/
/* /*
* read the attributes of an inode * read the attributes of an inode
*/ */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
{ {
struct inode *inode; struct inode *inode;
...@@ -349,44 +240,6 @@ int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat ...@@ -349,44 +240,6 @@ int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
return 0; return 0;
} /* end afs_inode_getattr() */ } /* end afs_inode_getattr() */
#endif
/*****************************************************************************/
/*
* revalidate the inode
*/
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0)
int afs_inode_revalidate(struct dentry *dentry)
{
struct inode *inode;
afs_vnode_t *vnode;
int ret;
inode = dentry->d_inode;
_enter("{ ino=%lu v=%lu }",inode->i_ino,inode->i_version);
vnode = AFS_FS_I(inode);
ret = afs_inode_fetch_status(inode);
if (ret==-ENOENT) {
_leave(" = %d [%d %p]",ret,atomic_read(&dentry->d_count),dentry->d_inode);
return ret;
}
else if (ret<0) {
make_bad_inode(inode);
_leave(" = %d",ret);
return ret;
}
_leave(" = 0 CB { v=%u x=%u t=%u }",
vnode->cb_version,
vnode->cb_expiry,
vnode->cb_type);
return 0;
} /* end afs_inode_revalidate() */
#endif
/*****************************************************************************/ /*****************************************************************************/
/* /*
...@@ -410,9 +263,5 @@ void afs_clear_inode(struct inode *inode) ...@@ -410,9 +263,5 @@ void afs_clear_inode(struct inode *inode)
afs_vnode_give_up_callback(vnode); afs_vnode_give_up_callback(vnode);
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0)
if (inode->u.generic_ip) kfree(inode->u.generic_ip);
#endif
_leave(""); _leave("");
} /* end afs_clear_inode() */ } /* end afs_clear_inode() */
...@@ -21,34 +21,24 @@ ...@@ -21,34 +21,24 @@
/* /*
* debug tracing * debug tracing
*/ */
#define kenter(FMT,...) printk("==> %s("FMT")\n",__FUNCTION__,##__VA_ARGS__) #define kenter(FMT, a...) printk("==> %s("FMT")\n",__FUNCTION__ , ## a)
#define kleave(FMT,...) printk("<== %s()"FMT"\n",__FUNCTION__,##__VA_ARGS__) #define kleave(FMT, a...) printk("<== %s()"FMT"\n",__FUNCTION__ , ## a)
#define kdebug(FMT,...) printk(FMT"\n",##__VA_ARGS__) #define kdebug(FMT, a...) printk(FMT"\n" , ## a)
#define kproto(FMT,...) printk("### "FMT"\n",##__VA_ARGS__) #define kproto(FMT, a...) printk("### "FMT"\n" , ## a)
#define knet(FMT,...) printk(FMT"\n",##__VA_ARGS__) #define knet(FMT, a...) printk(FMT"\n" , ## a)
#if 0 #if 0
#define _enter(FMT,...) kenter(FMT,##__VA_ARGS__) #define _enter(FMT, a...) kenter(FMT , ## a)
#define _leave(FMT,...) kleave(FMT,##__VA_ARGS__) #define _leave(FMT, a...) kleave(FMT , ## a)
#define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__) #define _debug(FMT, a...) kdebug(FMT , ## a)
#define _proto(FMT,...) kproto(FMT,##__VA_ARGS__) #define _proto(FMT, a...) kproto(FMT , ## a)
#define _net(FMT,...) knet(FMT,##__VA_ARGS__) #define _net(FMT, a...) knet(FMT , ## a)
#else #else
#define _enter(FMT,...) do { } while(0) #define _enter(FMT, a...) do { } while(0)
#define _leave(FMT,...) do { } while(0) #define _leave(FMT, a...) do { } while(0)
#define _debug(FMT,...) do { } while(0) #define _debug(FMT, a...) do { } while(0)
#define _proto(FMT,...) do { } while(0) #define _proto(FMT, a...) do { } while(0)
#define _net(FMT,...) do { } while(0) #define _net(FMT, a...) do { } while(0)
#endif
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0)
#define wait_on_page_locked wait_on_page
#define PageUptodate Page_Uptodate
static inline struct proc_dir_entry *PDE(const struct inode *inode)
{
return (struct proc_dir_entry *)inode->u.generic_ip;
}
#endif #endif
static inline void afs_discard_my_signals(void) static inline void afs_discard_my_signals(void)
...@@ -85,12 +75,7 @@ extern struct file_operations afs_file_file_operations; ...@@ -85,12 +75,7 @@ extern struct file_operations afs_file_file_operations;
* inode.c * inode.c
*/ */
extern int afs_iget(struct super_block *sb, afs_fid_t *fid, struct inode **_inode); extern int afs_iget(struct super_block *sb, afs_fid_t *fid, struct inode **_inode);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
extern int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat); extern int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
#else
extern void afs_read_inode2(struct inode *inode, void *opaque);
extern int afs_inode_revalidate(struct dentry *dentry);
#endif
extern void afs_clear_inode(struct inode *inode); extern void afs_clear_inode(struct inode *inode);
/* /*
...@@ -113,7 +98,7 @@ extern struct list_head afs_cb_hash_tbl[]; ...@@ -113,7 +98,7 @@ extern struct list_head afs_cb_hash_tbl[];
extern spinlock_t afs_cb_hash_lock; extern spinlock_t afs_cb_hash_lock;
#define afs_cb_hash(SRV,FID) \ #define afs_cb_hash(SRV,FID) \
afs_cb_hash_tbl[((unsigned)(SRV) + (FID)->vid + (FID)->vnode + (FID)->unique) % \ afs_cb_hash_tbl[((unsigned long)(SRV) + (FID)->vid + (FID)->vnode + (FID)->unique) % \
AFS_CB_HASH_COUNT] AFS_CB_HASH_COUNT]
/* /*
......
...@@ -103,11 +103,7 @@ static int kafsasyncd(void *arg) ...@@ -103,11 +103,7 @@ static int kafsasyncd(void *arg)
/* only certain signals are of interest */ /* only certain signals are of interest */
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sig->siglock);
siginitsetinv(&current->blocked,0); siginitsetinv(&current->blocked,0);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
recalc_sigpending(); recalc_sigpending();
#else
recalc_sigpending(current);
#endif
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sig->siglock);
/* loop around looking for things to attend to */ /* loop around looking for things to attend to */
......
...@@ -80,11 +80,7 @@ static int kafstimod(void *arg) ...@@ -80,11 +80,7 @@ static int kafstimod(void *arg)
/* only certain signals are of interest */ /* only certain signals are of interest */
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sig->siglock);
siginitsetinv(&current->blocked,0); siginitsetinv(&current->blocked,0);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
recalc_sigpending(); recalc_sigpending();
#else
recalc_sigpending(current);
#endif
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sig->siglock);
/* loop around looking for things to attend to */ /* loop around looking for things to attend to */
......
...@@ -173,8 +173,8 @@ static void afs_discarding_peer(struct rxrpc_peer *peer) ...@@ -173,8 +173,8 @@ static void afs_discarding_peer(struct rxrpc_peer *peer)
_debug("Discarding peer %08x (rtt=%lu.%lumS)\n", _debug("Discarding peer %08x (rtt=%lu.%lumS)\n",
ntohl(peer->addr.s_addr), ntohl(peer->addr.s_addr),
peer->rtt/1000, (long)(peer->rtt/1000),
peer->rtt%1000); (long)(peer->rtt%1000));
/* uncross-point the structs under a global lock */ /* uncross-point the structs under a global lock */
spin_lock(&afs_server_peer_lock); spin_lock(&afs_server_peer_lock);
......
...@@ -31,11 +31,7 @@ struct file_operations afs_mntpt_file_operations = { ...@@ -31,11 +31,7 @@ struct file_operations afs_mntpt_file_operations = {
struct inode_operations afs_mntpt_inode_operations = { struct inode_operations afs_mntpt_inode_operations = {
.lookup = afs_mntpt_lookup, .lookup = afs_mntpt_lookup,
.readlink = page_readlink, .readlink = page_readlink,
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
.getattr = afs_inode_getattr, .getattr = afs_inode_getattr,
#else
.revalidate = afs_inode_revalidate,
#endif
}; };
/*****************************************************************************/ /*****************************************************************************/
...@@ -70,7 +66,7 @@ int afs_mntpt_check_symlink(afs_vnode_t *vnode) ...@@ -70,7 +66,7 @@ int afs_mntpt_check_symlink(afs_vnode_t *vnode)
/* examine the symlink's contents */ /* examine the symlink's contents */
size = vnode->status.size; size = vnode->status.size;
_debug("symlink to %*.*s",size,size,buf); _debug("symlink to %*.*s",size,(int)size,buf);
if (size>2 && if (size>2 &&
(buf[0]=='%' || buf[0]=='#') && (buf[0]=='%' || buf[0]=='#') &&
......
...@@ -412,6 +412,7 @@ static int afs_proc_cell_volumes_release(struct inode *inode, struct file *file) ...@@ -412,6 +412,7 @@ static int afs_proc_cell_volumes_release(struct inode *inode, struct file *file)
afs_put_cell(cell); afs_put_cell(cell);
return ret;
} /* end afs_proc_cell_volumes_release() */ } /* end afs_proc_cell_volumes_release() */
/*****************************************************************************/ /*****************************************************************************/
...@@ -536,6 +537,7 @@ static int afs_proc_cell_vlservers_release(struct inode *inode, struct file *fil ...@@ -536,6 +537,7 @@ static int afs_proc_cell_vlservers_release(struct inode *inode, struct file *fil
afs_put_cell(cell); afs_put_cell(cell);
return ret;
} /* end afs_proc_cell_vlservers_release() */ } /* end afs_proc_cell_vlservers_release() */
/*****************************************************************************/ /*****************************************************************************/
...@@ -651,6 +653,7 @@ static int afs_proc_cell_servers_release(struct inode *inode, struct file *file) ...@@ -651,6 +653,7 @@ static int afs_proc_cell_servers_release(struct inode *inode, struct file *file)
afs_put_cell(cell); afs_put_cell(cell);
return ret;
} /* end afs_proc_cell_servers_release() */ } /* end afs_proc_cell_servers_release() */
/*****************************************************************************/ /*****************************************************************************/
......
...@@ -39,12 +39,8 @@ static inline char *strdup(const char *s) ...@@ -39,12 +39,8 @@ static inline char *strdup(const char *s)
static void afs_i_init_once(void *foo, kmem_cache_t *cachep, unsigned long flags); static void afs_i_init_once(void *foo, kmem_cache_t *cachep, unsigned long flags);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
static struct super_block *afs_get_sb(struct file_system_type *fs_type, static struct super_block *afs_get_sb(struct file_system_type *fs_type,
int flags, char *dev_name, void *data); int flags, char *dev_name, void *data);
#else
static struct super_block *afs_read_super(struct super_block *sb, void *data, int);
#endif
static struct inode *afs_alloc_inode(struct super_block *sb); static struct inode *afs_alloc_inode(struct super_block *sb);
...@@ -55,30 +51,20 @@ static void afs_destroy_inode(struct inode *inode); ...@@ -55,30 +51,20 @@ static void afs_destroy_inode(struct inode *inode);
static struct file_system_type afs_fs_type = { static struct file_system_type afs_fs_type = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.name = "afs", .name = "afs",
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
.get_sb = afs_get_sb, .get_sb = afs_get_sb,
.kill_sb = kill_anon_super, .kill_sb = kill_anon_super,
#else
.read_super = afs_read_super,
#endif
}; };
static struct super_operations afs_super_ops = { static struct super_operations afs_super_ops = {
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
.statfs = simple_statfs, .statfs = simple_statfs,
.alloc_inode = afs_alloc_inode, .alloc_inode = afs_alloc_inode,
.drop_inode = generic_delete_inode, .drop_inode = generic_delete_inode,
.destroy_inode = afs_destroy_inode, .destroy_inode = afs_destroy_inode,
#else
.read_inode2 = afs_read_inode2,
#endif
.clear_inode = afs_clear_inode, .clear_inode = afs_clear_inode,
.put_super = afs_put_super, .put_super = afs_put_super,
}; };
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
static kmem_cache_t *afs_inode_cachep; static kmem_cache_t *afs_inode_cachep;
#endif
/*****************************************************************************/ /*****************************************************************************/
/* /*
...@@ -90,23 +76,6 @@ int __init afs_fs_init(void) ...@@ -90,23 +76,6 @@ int __init afs_fs_init(void)
kenter(""); kenter("");
/* open the cache */
#if 0
ret = -EINVAL;
if (!cachedev) {
printk(KERN_NOTICE "kAFS: No cache device specified as module parm\n");
printk(KERN_NOTICE "kAFS: Set with \"cachedev=<devname>\" on insmod's cmdline\n");
return ret;
}
ret = afs_cache_open(cachedev,&afs_cache);
if (ret<0) {
printk(KERN_NOTICE "kAFS: Failed to open cache device\n");
return ret;
}
#endif
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
/* create ourselves an inode cache */ /* create ourselves an inode cache */
ret = -ENOMEM; ret = -ENOMEM;
afs_inode_cachep = kmem_cache_create("afs_inode_cache", afs_inode_cachep = kmem_cache_create("afs_inode_cache",
...@@ -117,22 +86,13 @@ int __init afs_fs_init(void) ...@@ -117,22 +86,13 @@ int __init afs_fs_init(void)
NULL); NULL);
if (!afs_inode_cachep) { if (!afs_inode_cachep) {
printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n"); printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n");
#if 0
afs_put_cache(afs_cache);
#endif
return ret; return ret;
} }
#endif
/* now export our filesystem to lesser mortals */ /* now export our filesystem to lesser mortals */
ret = register_filesystem(&afs_fs_type); ret = register_filesystem(&afs_fs_type);
if (ret<0) { if (ret<0) {
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
kmem_cache_destroy(afs_inode_cachep); kmem_cache_destroy(afs_inode_cachep);
#endif
#if 0
afs_put_cache(afs_cache);
#endif
kleave(" = %d",ret); kleave(" = %d",ret);
return ret; return ret;
} }
...@@ -148,16 +108,10 @@ int __init afs_fs_init(void) ...@@ -148,16 +108,10 @@ int __init afs_fs_init(void)
void __exit afs_fs_exit(void) void __exit afs_fs_exit(void)
{ {
/* destroy our private inode cache */ /* destroy our private inode cache */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
kmem_cache_destroy(afs_inode_cachep); kmem_cache_destroy(afs_inode_cachep);
#endif
unregister_filesystem(&afs_fs_type); unregister_filesystem(&afs_fs_type);
#if 0
if (afs_cache)
afs_put_cache(afs_cache);
#endif
} /* end afs_fs_exit() */ } /* end afs_fs_exit() */
/*****************************************************************************/ /*****************************************************************************/
...@@ -453,7 +407,6 @@ static int afs_fill_super(struct super_block *sb, void *_data, int silent) ...@@ -453,7 +407,6 @@ static int afs_fill_super(struct super_block *sb, void *_data, int silent)
* get an AFS superblock * get an AFS superblock
* - TODO: don't use get_sb_nodev(), but rather call sget() directly * - TODO: don't use get_sb_nodev(), but rather call sget() directly
*/ */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
static struct super_block *afs_get_sb(struct file_system_type *fs_type, static struct super_block *afs_get_sb(struct file_system_type *fs_type,
int flags, int flags,
char *dev_name, char *dev_name,
...@@ -482,39 +435,6 @@ static struct super_block *afs_get_sb(struct file_system_type *fs_type, ...@@ -482,39 +435,6 @@ static struct super_block *afs_get_sb(struct file_system_type *fs_type,
_leave(""); _leave("");
return sb; return sb;
} /* end afs_get_sb() */ } /* end afs_get_sb() */
#endif
/*****************************************************************************/
/*
* read an AFS superblock
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
static struct super_block *afs_read_super(struct super_block *sb, void *options, int silent)
{
void *data[2] = { NULL, options };
int ret;
_enter(",,%s",(char*)options);
/* start the cache manager */
ret = afscm_start();
if (ret<0) {
_leave(" = NULL (%d)",ret);
return NULL;
}
/* allocate a deviceless superblock */
ret = afs_fill_super(sb,data,silent);
if (ret<0) {
afscm_stop();
_leave(" = NULL (%d)",ret);
return NULL;
}
_leave(" = %p",sb);
return sb;
} /* end afs_read_super() */
#endif
/*****************************************************************************/ /*****************************************************************************/
/* /*
...@@ -540,7 +460,6 @@ static void afs_put_super(struct super_block *sb) ...@@ -540,7 +460,6 @@ static void afs_put_super(struct super_block *sb)
/* /*
* initialise an inode cache slab element prior to any use * initialise an inode cache slab element prior to any use
*/ */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
static void afs_i_init_once(void *_vnode, kmem_cache_t *cachep, unsigned long flags) static void afs_i_init_once(void *_vnode, kmem_cache_t *cachep, unsigned long flags)
{ {
afs_vnode_t *vnode = (afs_vnode_t *) _vnode; afs_vnode_t *vnode = (afs_vnode_t *) _vnode;
...@@ -556,13 +475,11 @@ static void afs_i_init_once(void *_vnode, kmem_cache_t *cachep, unsigned long fl ...@@ -556,13 +475,11 @@ static void afs_i_init_once(void *_vnode, kmem_cache_t *cachep, unsigned long fl
} }
} /* end afs_i_init_once() */ } /* end afs_i_init_once() */
#endif
/*****************************************************************************/ /*****************************************************************************/
/* /*
* allocate an AFS inode struct from our slab cache * allocate an AFS inode struct from our slab cache
*/ */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
static struct inode *afs_alloc_inode(struct super_block *sb) static struct inode *afs_alloc_inode(struct super_block *sb)
{ {
afs_vnode_t *vnode; afs_vnode_t *vnode;
...@@ -580,16 +497,13 @@ static struct inode *afs_alloc_inode(struct super_block *sb) ...@@ -580,16 +497,13 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
return &vnode->vfs_inode; return &vnode->vfs_inode;
} /* end afs_alloc_inode() */ } /* end afs_alloc_inode() */
#endif
/*****************************************************************************/ /*****************************************************************************/
/* /*
* destroy an AFS inode struct * destroy an AFS inode struct
*/ */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
static void afs_destroy_inode(struct inode *inode) static void afs_destroy_inode(struct inode *inode)
{ {
_enter("{%lu}",inode->i_ino); _enter("{%lu}",inode->i_ino);
kmem_cache_free(afs_inode_cachep, AFS_FS_I(inode)); kmem_cache_free(afs_inode_cachep, AFS_FS_I(inode));
} /* end afs_destroy_inode() */ } /* end afs_destroy_inode() */
#endif
...@@ -626,7 +626,7 @@ static void afs_rxvl_get_entry_by_id_attn(struct rxrpc_call *call) ...@@ -626,7 +626,7 @@ static void afs_rxvl_get_entry_by_id_attn(struct rxrpc_call *call)
case RXRPC_CSTATE_CLNT_GOT_REPLY: case RXRPC_CSTATE_CLNT_GOT_REPLY:
if (call->app_read_count==0) if (call->app_read_count==0)
break; break;
printk("kAFS: Reply bigger than expected {cst=%u asyn=%d mark=%d rdy=%u pr=%u%s}", printk("kAFS: Reply bigger than expected {cst=%u asyn=%d mark=%Zu rdy=%Zu pr=%u%s}",
call->app_call_state, call->app_call_state,
call->app_async_read, call->app_async_read,
call->app_mark, call->app_mark,
......
...@@ -27,11 +27,7 @@ struct afs_rxfs_fetch_descriptor; ...@@ -27,11 +27,7 @@ struct afs_rxfs_fetch_descriptor;
*/ */
struct afs_vnode struct afs_vnode
{ {
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
struct inode vfs_inode; /* the VFS's inode record */ struct inode vfs_inode; /* the VFS's inode record */
#else
struct inode *inode; /* the VFS's inode */
#endif
afs_volume_t *volume; /* volume on which vnode resides */ afs_volume_t *volume; /* volume on which vnode resides */
afs_fid_t fid; /* the file identifier for this inode */ afs_fid_t fid; /* the file identifier for this inode */
...@@ -59,20 +55,12 @@ struct afs_vnode ...@@ -59,20 +55,12 @@ struct afs_vnode
static inline afs_vnode_t *AFS_FS_I(struct inode *inode) static inline afs_vnode_t *AFS_FS_I(struct inode *inode)
{ {
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0) return container_of(inode,afs_vnode_t,vfs_inode);
return list_entry(inode,afs_vnode_t,vfs_inode);
#else
return inode->u.generic_ip;
#endif
} }
static inline struct inode *AFS_VNODE_TO_I(afs_vnode_t *vnode) static inline struct inode *AFS_VNODE_TO_I(afs_vnode_t *vnode)
{ {
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
return &vnode->vfs_inode; return &vnode->vfs_inode;
#else
return vnode->inode;
#endif
} }
extern int afs_vnode_fetch_status(afs_vnode_t *vnode); extern int afs_vnode_fetch_status(afs_vnode_t *vnode);
......
...@@ -299,7 +299,7 @@ static ssize_t part_attr_show(struct kobject * kobj, struct attribute * attr, ...@@ -299,7 +299,7 @@ static ssize_t part_attr_show(struct kobject * kobj, struct attribute * attr,
} }
static struct sysfs_ops part_sysfs_ops = { static struct sysfs_ops part_sysfs_ops = {
.show part_attr_show, .show = part_attr_show,
}; };
static ssize_t part_dev_read(struct hd_struct * p, static ssize_t part_dev_read(struct hd_struct * p,
......
...@@ -85,44 +85,47 @@ ...@@ -85,44 +85,47 @@
/* The maximum address for ISA DMA transfer on Alpha XL, due to an /* The maximum address for ISA DMA transfer on Alpha XL, due to an
hardware SIO limitation, is 64MB. hardware SIO limitation, is 64MB.
*/ */
#define ALPHA_XL_MAX_DMA_ADDRESS (IDENT_ADDR+0x04000000UL) #define ALPHA_XL_MAX_ISA_DMA_ADDRESS 0x04000000UL
/* The maximum address for ISA DMA transfer on RUFFIAN and NAUTILUS, /* The maximum address for ISA DMA transfer on RUFFIAN,
due to an hardware SIO limitation, is 16MB. due to an hardware SIO limitation, is 16MB.
*/ */
#define ALPHA_RUFFIAN_MAX_DMA_ADDRESS (IDENT_ADDR+0x01000000UL) #define ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS 0x01000000UL
#define ALPHA_NAUTILUS_MAX_DMA_ADDRESS (IDENT_ADDR+0x01000000UL)
/* The maximum address for ISA DMA transfer on SABLE, and some ALCORs, /* The maximum address for ISA DMA transfer on SABLE, and some ALCORs,
due to an hardware SIO chip limitation, is 2GB. due to an hardware SIO chip limitation, is 2GB.
*/ */
#define ALPHA_SABLE_MAX_DMA_ADDRESS (IDENT_ADDR+0x80000000UL) #define ALPHA_SABLE_MAX_ISA_DMA_ADDRESS 0x80000000UL
#define ALPHA_ALCOR_MAX_DMA_ADDRESS (IDENT_ADDR+0x80000000UL) #define ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS 0x80000000UL
/* /*
Maximum address for all the others is the complete 32-bit bus Maximum address for all the others is the complete 32-bit bus
address space. address space.
*/ */
#define ALPHA_MAX_DMA_ADDRESS (IDENT_ADDR+0x100000000UL) #define ALPHA_MAX_ISA_DMA_ADDRESS 0x100000000UL
#ifdef CONFIG_ALPHA_GENERIC #ifdef CONFIG_ALPHA_GENERIC
# define MAX_DMA_ADDRESS (alpha_mv.max_dma_address) # define MAX_ISA_DMA_ADDRESS (alpha_mv.max_isa_dma_address)
#else #else
# if defined(CONFIG_ALPHA_XL) # if defined(CONFIG_ALPHA_XL)
# define MAX_DMA_ADDRESS ALPHA_XL_MAX_DMA_ADDRESS # define MAX_ISA_DMA_ADDRESS ALPHA_XL_MAX_ISA_DMA_ADDRESS
# elif defined(CONFIG_ALPHA_RUFFIAN) # elif defined(CONFIG_ALPHA_RUFFIAN)
# define MAX_DMA_ADDRESS ALPHA_RUFFIAN_MAX_DMA_ADDRESS # define MAX_ISA_DMA_ADDRESS ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS
# elif defined(CONFIG_ALPHA_NAUTILUS)
# define MAX_DMA_ADDRESS ALPHA_NAUTILUS_MAX_DMA_ADDRESS
# elif defined(CONFIG_ALPHA_SABLE) # elif defined(CONFIG_ALPHA_SABLE)
# define MAX_DMA_ADDRESS ALPHA_SABLE_MAX_DMA_ADDRESS # define MAX_ISA_DMA_ADDRESS ALPHA_SABLE_MAX_DMA_ISA_ADDRESS
# elif defined(CONFIG_ALPHA_ALCOR) # elif defined(CONFIG_ALPHA_ALCOR)
# define MAX_DMA_ADDRESS ALPHA_ALCOR_MAX_DMA_ADDRESS # define MAX_ISA_DMA_ADDRESS ALPHA_ALCOR_MAX_DMA_ISA_ADDRESS
# else # else
# define MAX_DMA_ADDRESS ALPHA_MAX_DMA_ADDRESS # define MAX_ISA_DMA_ADDRESS ALPHA_MAX_ISA_DMA_ADDRESS
# endif # endif
#endif #endif
/* If we have the iommu, we don't have any address limitations on DMA.
Otherwise (Nautilus, RX164), we have to have 0-16 Mb DMA zone
like i386. */
#define MAX_DMA_ADDRESS (alpha_mv.mv_pci_tbi ? \
~0UL : IDENT_ADDR + 0x01000000)
/* 8237 DMA controllers */ /* 8237 DMA controllers */
#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ #define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ #define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
......
...@@ -51,12 +51,12 @@ alpha_fd_dma_setup(char *addr, unsigned long size, int mode, int io) ...@@ -51,12 +51,12 @@ alpha_fd_dma_setup(char *addr, unsigned long size, int mode, int io)
if (bus_addr if (bus_addr
&& (addr != prev_addr || size != prev_size || dir != prev_dir)) { && (addr != prev_addr || size != prev_size || dir != prev_dir)) {
/* different from last time -- unmap prev */ /* different from last time -- unmap prev */
pci_unmap_single(NULL, bus_addr, prev_size, prev_dir); pci_unmap_single(isa_bridge, bus_addr, prev_size, prev_dir);
bus_addr = 0; bus_addr = 0;
} }
if (!bus_addr) /* need to map it */ if (!bus_addr) /* need to map it */
bus_addr = pci_map_single(NULL, addr, size, dir); bus_addr = pci_map_single(isa_bridge, addr, size, dir);
/* remember this one as prev */ /* remember this one as prev */
prev_addr = addr; prev_addr = addr;
......
...@@ -34,7 +34,7 @@ struct alpha_machine_vector ...@@ -34,7 +34,7 @@ struct alpha_machine_vector
int nr_irqs; int nr_irqs;
int rtc_port; int rtc_port;
int max_asn; int max_asn;
unsigned long max_dma_address; unsigned long max_isa_dma_address;
unsigned long irq_probe_mask; unsigned long irq_probe_mask;
unsigned long iack_sc; unsigned long iack_sc;
unsigned long min_io_address; unsigned long min_io_address;
......
...@@ -55,9 +55,6 @@ extern void release_thread(struct task_struct *); ...@@ -55,9 +55,6 @@ extern void release_thread(struct task_struct *);
/* Create a kernel thread without removing it from tasklists. */ /* Create a kernel thread without removing it from tasklists. */
extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
#define copy_segments(tsk, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
/* See arch/alpha/kernel/ptrace.c for details. */ /* See arch/alpha/kernel/ptrace.c for details. */
......
...@@ -62,10 +62,6 @@ struct task_struct; ...@@ -62,10 +62,6 @@ struct task_struct;
/* Free all resources held by a thread. */ /* Free all resources held by a thread. */
extern void release_thread(struct task_struct *); extern void release_thread(struct task_struct *);
/* Copy and release all segment info associated with a VM */
#define copy_segments(tsk, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier() #define cpu_relax() barrier()
......
...@@ -108,10 +108,6 @@ unsigned long get_wchan(struct task_struct *p); ...@@ -108,10 +108,6 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
#define copy_segments(tsk, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
#define forget_segments() do { } while (0)
/* /*
* Free current thread data structures etc.. * Free current thread data structures etc..
*/ */
......
...@@ -362,10 +362,6 @@ struct task_struct; ...@@ -362,10 +362,6 @@ struct task_struct;
*/ */
extern int kernel_thread (int (*fn)(void *), void *arg, unsigned long flags); extern int kernel_thread (int (*fn)(void *), void *arg, unsigned long flags);
/* Copy and release all segment info associated with a VM */
#define copy_segments(tsk, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
/* Get wait channel for task P. */ /* Get wait channel for task P. */
extern unsigned long get_wchan (struct task_struct *p); extern unsigned long get_wchan (struct task_struct *p);
......
...@@ -114,9 +114,6 @@ static inline void release_thread(struct task_struct *dead_task) ...@@ -114,9 +114,6 @@ static inline void release_thread(struct task_struct *dead_task)
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
#define copy_segments(tsk, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
/* /*
* Free current thread data structures etc.. * Free current thread data structures etc..
*/ */
......
...@@ -104,10 +104,6 @@ static inline void release_thread(struct task_struct *dead_task) ...@@ -104,10 +104,6 @@ static inline void release_thread(struct task_struct *dead_task)
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
#define copy_segments(tsk, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
#define forget_segments() do { } while (0)
/* /*
* Free current thread data structures etc.. * Free current thread data structures etc..
*/ */
......
...@@ -210,10 +210,6 @@ struct thread_struct { ...@@ -210,10 +210,6 @@ struct thread_struct {
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
/* Copy and release all segment info associated with a VM */
#define copy_segments(p, mm) do { } while(0)
#define release_segments(mm) do { } while(0)
/* /*
* Return saved PC of a blocked thread. * Return saved PC of a blocked thread.
*/ */
......
...@@ -233,10 +233,6 @@ struct thread_struct { ...@@ -233,10 +233,6 @@ struct thread_struct {
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
/* Copy and release all segment info associated with a VM */
#define copy_segments(p, mm) do { } while(0)
#define release_segments(mm) do { } while(0)
/* /*
* Return saved PC of a blocked thread. * Return saved PC of a blocked thread.
*/ */
......
...@@ -295,12 +295,6 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); ...@@ -295,12 +295,6 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
extern void map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm); extern void map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm);
#define copy_segments(tsk, mm) do { \
if (tsk->personality == PER_HPUX) \
map_hpux_gateway_page(tsk,mm); \
} while (0)
#define release_segments(mm) do { } while (0)
static inline unsigned long get_wchan(struct task_struct *p) static inline unsigned long get_wchan(struct task_struct *p)
{ {
return 0xdeadbeef; /* XXX */ return 0xdeadbeef; /* XXX */
......
...@@ -736,9 +736,6 @@ struct thread_struct { ...@@ -736,9 +736,6 @@ struct thread_struct {
#define thread_saved_pc(tsk) \ #define thread_saved_pc(tsk) \
((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
#define copy_segments(tsk, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) #define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
......
...@@ -37,25 +37,21 @@ struct machdep_calls { ...@@ -37,25 +37,21 @@ struct machdep_calls {
long (*hpte_updatepp)(unsigned long slot, long (*hpte_updatepp)(unsigned long slot,
unsigned long newpp, unsigned long newpp,
unsigned long va, unsigned long va,
int large); int large,
int local);
void (*hpte_updateboltedpp)(unsigned long newpp, void (*hpte_updateboltedpp)(unsigned long newpp,
unsigned long ea); unsigned long ea);
long (*insert_hpte)(unsigned long hpte_group, long (*hpte_insert)(unsigned long hpte_group,
unsigned long vpn, unsigned long va,
unsigned long prpn, unsigned long prpn,
int secondary, int secondary,
unsigned long hpteflags, unsigned long hpteflags,
int bolted, int bolted,
int large); int large);
long (*remove_hpte)(unsigned long hpte_group); long (*hpte_remove)(unsigned long hpte_group);
void (*flush_hash_range)(unsigned long context, void (*flush_hash_range)(unsigned long context,
unsigned long number, unsigned long number,
int local); int local);
void (*make_pte)(void *htab, unsigned long va,
unsigned long pa,
int mode,
unsigned long hash_mask,
int large);
void (*tce_build)(struct TceTable * tbl, void (*tce_build)(struct TceTable * tbl,
long tcenum, long tcenum,
......
...@@ -682,10 +682,6 @@ extern struct mm_struct ioremap_mm; ...@@ -682,10 +682,6 @@ extern struct mm_struct ioremap_mm;
#define thread_saved_pc(tsk) \ #define thread_saved_pc(tsk) \
((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
#define copy_segments(tsk, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
#define forget_segments() do { } while (0)
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) #define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
......
...@@ -114,10 +114,6 @@ struct mm_struct; ...@@ -114,10 +114,6 @@ struct mm_struct;
extern void release_thread(struct task_struct *); extern void release_thread(struct task_struct *);
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
/* Copy and release all segment info associated with a VM */
#define copy_segments(nr, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
/* /*
* Return saved PC of a blocked thread. * Return saved PC of a blocked thread.
*/ */
......
...@@ -131,10 +131,6 @@ struct mm_struct; ...@@ -131,10 +131,6 @@ struct mm_struct;
extern void release_thread(struct task_struct *); extern void release_thread(struct task_struct *);
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
/* Copy and release all segment info associated with a VM */
#define copy_segments(nr, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
/* /*
* Return saved PC of a blocked thread. * Return saved PC of a blocked thread.
*/ */
......
...@@ -147,11 +147,6 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); ...@@ -147,11 +147,6 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
#define MCA_bus 0 #define MCA_bus 0
#define MCA_bus__is_a_macro /* for versions in ksyms.c */ #define MCA_bus__is_a_macro /* for versions in ksyms.c */
/* Copy and release all segment info associated with a VM */
#define copy_segments(p, mm) do { } while(0)
#define release_segments(mm) do { } while(0)
/* /*
* FPU lazy state save handling. * FPU lazy state save handling.
*/ */
......
...@@ -140,10 +140,6 @@ extern __inline__ void start_thread(struct pt_regs * regs, unsigned long pc, ...@@ -140,10 +140,6 @@ extern __inline__ void start_thread(struct pt_regs * regs, unsigned long pc,
#define release_thread(tsk) do { } while(0) #define release_thread(tsk) do { } while(0)
extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
#define copy_segments(tsk, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
#define get_wchan(__TSK) \ #define get_wchan(__TSK) \
({ extern void scheduling_functions_start_here(void); \ ({ extern void scheduling_functions_start_here(void); \
extern void scheduling_functions_end_here(void); \ extern void scheduling_functions_end_here(void); \
......
...@@ -188,9 +188,6 @@ do { \ ...@@ -188,9 +188,6 @@ do { \
extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
#define copy_segments(tsk, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
#define get_wchan(__TSK) \ #define get_wchan(__TSK) \
({ extern void scheduling_functions_start_here(void); \ ({ extern void scheduling_functions_start_here(void); \
extern void scheduling_functions_end_here(void); \ extern void scheduling_functions_end_here(void); \
......
...@@ -92,17 +92,6 @@ extern void release_thread(struct task_struct *); ...@@ -92,17 +92,6 @@ extern void release_thread(struct task_struct *);
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
extern void dump_thread(struct pt_regs *regs, struct user *u); extern void dump_thread(struct pt_regs *regs, struct user *u);
static inline void release_segments(struct mm_struct *mm)
{
}
static inline void copy_segments(struct task_struct *p,
struct mm_struct *new_mm)
{
}
#define forget_segments() do ; while(0)
extern unsigned long thread_saved_pc(struct task_struct *t); extern unsigned long thread_saved_pc(struct task_struct *t);
#define init_stack (init_thread_union.stack) #define init_stack (init_thread_union.stack)
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* (C) 2002 Dominik Brodowski <linux@brodo.de> * (C) 2002 Dominik Brodowski <linux@brodo.de>
* *
* *
* $Id: cpufreq.h,v 1.26 2002/09/21 09:05:29 db Exp $ * $Id: cpufreq.h,v 1.27 2002/10/08 14:54:23 db Exp $
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -148,11 +148,9 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, u ...@@ -148,11 +148,9 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, u
int cpufreq_set_policy(struct cpufreq_policy *policy); int cpufreq_set_policy(struct cpufreq_policy *policy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
#ifdef CONFIG_CPU_FREQ_26_API
#ifdef CONFIG_PM #ifdef CONFIG_PM
int cpufreq_restore(void); int cpufreq_restore(void);
#endif #endif
#endif
#ifdef CONFIG_CPU_FREQ_24_API #ifdef CONFIG_CPU_FREQ_24_API
...@@ -160,9 +158,6 @@ int cpufreq_restore(void); ...@@ -160,9 +158,6 @@ int cpufreq_restore(void);
* CPUFREQ 2.4. INTERFACE * * CPUFREQ 2.4. INTERFACE *
*********************************************************************/ *********************************************************************/
int cpufreq_setmax(unsigned int cpu); int cpufreq_setmax(unsigned int cpu);
#ifdef CONFIG_PM
int cpufreq_restore(void);
#endif
int cpufreq_set(unsigned int kHz, unsigned int cpu); int cpufreq_set(unsigned int kHz, unsigned int cpu);
unsigned int cpufreq_get(unsigned int cpu); unsigned int cpufreq_get(unsigned int cpu);
......
...@@ -643,6 +643,10 @@ void pci_pool_destroy (struct pci_pool *pool); ...@@ -643,6 +643,10 @@ void pci_pool_destroy (struct pci_pool *pool);
void *pci_pool_alloc (struct pci_pool *pool, int flags, dma_addr_t *handle); void *pci_pool_alloc (struct pci_pool *pool, int flags, dma_addr_t *handle);
void pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t addr); void pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t addr);
#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
extern struct pci_dev *isa_bridge;
#endif
#endif /* CONFIG_PCI */ #endif /* CONFIG_PCI */
/* Include architecture-dependent settings and functions */ /* Include architecture-dependent settings and functions */
...@@ -703,6 +707,8 @@ static inline int pci_enable_wake(struct pci_dev *dev, u32 state, int enable) { ...@@ -703,6 +707,8 @@ static inline int pci_enable_wake(struct pci_dev *dev, u32 state, int enable) {
#define pci_for_each_dev(dev) \ #define pci_for_each_dev(dev) \
for(dev = NULL; 0; ) for(dev = NULL; 0; )
#define isa_bridge ((struct pci_dev *)NULL)
#else #else
/* /*
......
...@@ -153,7 +153,7 @@ do { (CALL)->app_scr_alloc = (CALL)->app_scratch; } while(0) ...@@ -153,7 +153,7 @@ do { (CALL)->app_scr_alloc = (CALL)->app_scratch; } while(0)
(CALL)->app_scr_alloc += (SIZE); \ (CALL)->app_scr_alloc += (SIZE); \
if ((SIZE)>RXRPC_CALL_SCRATCH_SIZE || \ if ((SIZE)>RXRPC_CALL_SCRATCH_SIZE || \
(size_t)((CALL)->app_scr_alloc - (u8*)(CALL)) > RXRPC_CALL_SCRATCH_SIZE) { \ (size_t)((CALL)->app_scr_alloc - (u8*)(CALL)) > RXRPC_CALL_SCRATCH_SIZE) { \
printk("rxrpc_call_alloc_scratch(%p,%u)\n",(CALL),(SIZE)); \ printk("rxrpc_call_alloc_scratch(%p,%Zu)\n",(CALL),(size_t)(SIZE)); \
BUG(); \ BUG(); \
} \ } \
ptr; \ ptr; \
...@@ -167,7 +167,7 @@ do { (CALL)->app_scr_alloc = (CALL)->app_scratch; } while(0) ...@@ -167,7 +167,7 @@ do { (CALL)->app_scr_alloc = (CALL)->app_scratch; } while(0)
(CALL)->app_scr_alloc += size; \ (CALL)->app_scr_alloc += size; \
if (size>RXRPC_CALL_SCRATCH_SIZE || \ if (size>RXRPC_CALL_SCRATCH_SIZE || \
(size_t)((CALL)->app_scr_alloc - (u8*)(CALL)) > RXRPC_CALL_SCRATCH_SIZE) { \ (size_t)((CALL)->app_scr_alloc - (u8*)(CALL)) > RXRPC_CALL_SCRATCH_SIZE) { \
printk("rxrpc_call_alloc_scratch(%p,%u)\n",(CALL),size); \ printk("rxrpc_call_alloc_scratch(%p,%Zu)\n",(CALL),size); \
BUG(); \ BUG(); \
} \ } \
ptr; \ ptr; \
......
...@@ -57,8 +57,8 @@ struct rxrpc_peer ...@@ -57,8 +57,8 @@ struct rxrpc_peer
/* calculated RTT cache */ /* calculated RTT cache */
#define RXRPC_RTT_CACHE_SIZE 32 #define RXRPC_RTT_CACHE_SIZE 32
suseconds_t rtt; /* current RTT estimate (in uS) */ suseconds_t rtt; /* current RTT estimate (in uS) */
unsigned short rtt_point; /* next entry at which to insert */ unsigned rtt_point; /* next entry at which to insert */
unsigned short rtt_usage; /* amount of cache actually used */ unsigned rtt_usage; /* amount of cache actually used */
suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */ suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */
}; };
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
* Copyright (C) 2001 Russell King * Copyright (C) 2001 Russell King
* (C) 2002 Dominik Brodowski <linux@brodo.de> * (C) 2002 Dominik Brodowski <linux@brodo.de>
* *
* $Id: cpufreq.c,v 1.43 2002/09/21 09:05:29 db Exp $ * $Id: cpufreq.c,v 1.45 2002/10/08 14:54:23 db Exp $
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -21,13 +21,10 @@ ...@@ -21,13 +21,10 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/proc_fs.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#ifdef CONFIG_CPU_FREQ_26_API
#include <linux/proc_fs.h>
#endif
#ifdef CONFIG_CPU_FREQ_24_API #ifdef CONFIG_CPU_FREQ_24_API
#include <linux/sysctl.h> #include <linux/sysctl.h>
#endif #endif
...@@ -200,7 +197,6 @@ static int __init cpufreq_setup(char *str) ...@@ -200,7 +197,6 @@ static int __init cpufreq_setup(char *str)
__setup("cpufreq=", cpufreq_setup); __setup("cpufreq=", cpufreq_setup);
#ifdef CONFIG_CPU_FREQ_26_API
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
/** /**
...@@ -335,7 +331,6 @@ static void cpufreq_proc_exit (void) ...@@ -335,7 +331,6 @@ static void cpufreq_proc_exit (void)
return; return;
} }
#endif /* CONFIG_PROC_FS */ #endif /* CONFIG_PROC_FS */
#endif /* CONFIG_CPU_FREQ_26_API */
...@@ -344,10 +339,6 @@ static void cpufreq_proc_exit (void) ...@@ -344,10 +339,6 @@ static void cpufreq_proc_exit (void)
*********************************************************************/ *********************************************************************/
#ifdef CONFIG_CPU_FREQ_24_API #ifdef CONFIG_CPU_FREQ_24_API
/* NOTE #1: when you use this API, you may not use any other calls,
* except cpufreq_[un]register_notifier, of course.
*/
/** /**
* cpufreq_set - set the CPU frequency * cpufreq_set - set the CPU frequency
* @freq: target frequency in kHz * @freq: target frequency in kHz
...@@ -879,7 +870,7 @@ int cpufreq_set_policy(struct cpufreq_policy *policy) ...@@ -879,7 +870,7 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
cpufreq_driver->policy[policy->cpu].max = policy->max; cpufreq_driver->policy[policy->cpu].max = policy->max;
cpufreq_driver->policy[policy->cpu].policy = policy->policy; cpufreq_driver->policy[policy->cpu].policy = policy->policy;
} }
#ifdef CONFIG_CPU_FREQ_24_API #ifdef CONFIG_CPU_FREQ_24_API
if (policy->cpu == CPUFREQ_ALL_CPUS) { if (policy->cpu == CPUFREQ_ALL_CPUS) {
for (i=0;i<NR_CPUS;i++) for (i=0;i<NR_CPUS;i++)
...@@ -945,6 +936,14 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) ...@@ -945,6 +936,14 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
case CPUFREQ_POSTCHANGE: case CPUFREQ_POSTCHANGE:
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs); notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs);
#ifdef CONFIG_CPU_FREQ_24_API
if (freqs->cpu == CPUFREQ_ALL_CPUS) {
int i;
for (i=0;i<NR_CPUS;i++)
cpu_cur_freq[i] = freqs->new;
} else
cpu_cur_freq[freqs->cpu] = freqs->new;
#endif
break; break;
} }
up(&cpufreq_notifier_sem); up(&cpufreq_notifier_sem);
...@@ -992,9 +991,7 @@ int cpufreq_register(struct cpufreq_driver *driver_data) ...@@ -992,9 +991,7 @@ int cpufreq_register(struct cpufreq_driver *driver_data)
ret = cpufreq_set_policy(&default_policy); ret = cpufreq_set_policy(&default_policy);
#ifdef CONFIG_CPU_FREQ_26_API
cpufreq_proc_init(); cpufreq_proc_init();
#endif
#ifdef CONFIG_CPU_FREQ_24_API #ifdef CONFIG_CPU_FREQ_24_API
down(&cpufreq_driver_sem); down(&cpufreq_driver_sem);
...@@ -1042,9 +1039,7 @@ int cpufreq_unregister(void) ...@@ -1042,9 +1039,7 @@ int cpufreq_unregister(void)
up(&cpufreq_driver_sem); up(&cpufreq_driver_sem);
#ifdef CONFIG_CPU_FREQ_26_API
cpufreq_proc_exit(); cpufreq_proc_exit();
#endif
#ifdef CONFIG_CPU_FREQ_24_API #ifdef CONFIG_CPU_FREQ_24_API
cpufreq_sysctl_exit(); cpufreq_sysctl_exit();
...@@ -1086,13 +1081,7 @@ int cpufreq_restore(void) ...@@ -1086,13 +1081,7 @@ int cpufreq_restore(void)
policy.cpu = i; policy.cpu = i;
up(&cpufreq_driver_sem); up(&cpufreq_driver_sem);
#ifdef CONFIG_CPU_FREQ_26_API
cpufreq_set_policy(&policy); cpufreq_set_policy(&policy);
#endif
#ifdef CONFIG_CPU_FREQ_24_API
cpufreq_set(cpu_cur_freq[i], i);
#endif
} }
return 0; return 0;
......
...@@ -15,19 +15,13 @@ rxrpc-objs := \ ...@@ -15,19 +15,13 @@ rxrpc-objs := \
rxrpc_syms.o \ rxrpc_syms.o \
transport.o transport.o
#ifeq ($(CONFIG_PROC_FS),y) ifeq ($(CONFIG_PROC_FS),y)
rxrpc-objs += proc.o rxrpc-objs += proc.o
#endif endif
#ifeq ($(CONFIG_SYSCTL),y) ifeq ($(CONFIG_SYSCTL),y)
rxrpc-objs += sysctl.o rxrpc-objs += sysctl.o
#endif
obj-m := rxrpc.o
# superfluous for 2.5, but needed for 2.4..
ifeq "$(VERSION).$(PATCHLEVEL)" "2.4"
rxrpc.o: $(rxrpc-objs)
$(LD) -r -o $@ $(rxrpc-objs)
endif endif
obj-$(CONFIG_RXRPC) := rxrpc.o
include $(TOPDIR)/Rules.make include $(TOPDIR)/Rules.make
...@@ -26,10 +26,10 @@ __RXACCT_DECL(atomic_t rxrpc_message_count); ...@@ -26,10 +26,10 @@ __RXACCT_DECL(atomic_t rxrpc_message_count);
LIST_HEAD(rxrpc_calls); LIST_HEAD(rxrpc_calls);
DECLARE_RWSEM(rxrpc_calls_sem); DECLARE_RWSEM(rxrpc_calls_sem);
unsigned rxrpc_call_rcv_timeout = 30; unsigned rxrpc_call_rcv_timeout = HZ/3;
unsigned rxrpc_call_acks_timeout = 30; unsigned rxrpc_call_acks_timeout = HZ/3;
unsigned rxrpc_call_dfr_ack_timeout = 5; unsigned rxrpc_call_dfr_ack_timeout = HZ/20;
unsigned short rxrpc_call_max_resend = 10; unsigned short rxrpc_call_max_resend = HZ/10;
const char *rxrpc_call_states[] = { const char *rxrpc_call_states[] = {
"COMPLETE", "COMPLETE",
...@@ -129,6 +129,22 @@ static void __rxrpc_call_ackr_timeout(unsigned long _call) ...@@ -129,6 +129,22 @@ static void __rxrpc_call_ackr_timeout(unsigned long _call)
rxrpc_krxiod_queue_call(call); rxrpc_krxiod_queue_call(call);
} }
/*****************************************************************************/
/*
* calculate a timeout based on an RTT value
*/
static inline unsigned long __rxrpc_rtt_based_timeout(struct rxrpc_call *call, unsigned long val)
{
unsigned long expiry = call->conn->peer->rtt / (1000000/HZ);
expiry += 10;
if (expiry<HZ/25) expiry = HZ/25;
if (expiry>HZ) expiry = HZ;
_leave(" = %lu jiffies",expiry);
return jiffies + expiry;
} /* end __rxrpc_rtt_based_timeout() */
/*****************************************************************************/ /*****************************************************************************/
/* /*
* create a new call record * create a new call record
...@@ -321,7 +337,10 @@ int rxrpc_incoming_call(struct rxrpc_connection *conn, ...@@ -321,7 +337,10 @@ int rxrpc_incoming_call(struct rxrpc_connection *conn,
spin_lock(&conn->lock); spin_lock(&conn->lock);
if (!conn->channels[cix]) { if (!conn->channels[cix] ||
conn->channels[cix]->app_call_state == RXRPC_CSTATE_COMPLETE ||
conn->channels[cix]->app_call_state == RXRPC_CSTATE_ERROR
) {
conn->channels[cix] = call; conn->channels[cix] = call;
rxrpc_get_connection(conn); rxrpc_get_connection(conn);
ret = 0; ret = 0;
...@@ -329,9 +348,10 @@ int rxrpc_incoming_call(struct rxrpc_connection *conn, ...@@ -329,9 +348,10 @@ int rxrpc_incoming_call(struct rxrpc_connection *conn,
spin_unlock(&conn->lock); spin_unlock(&conn->lock);
if (ret<0) free_page((unsigned long)call); if (ret<0) {
free_page((unsigned long)call);
_leave(" = %p",call); call = NULL;
}
if (ret==0) { if (ret==0) {
down_write(&rxrpc_calls_sem); down_write(&rxrpc_calls_sem);
...@@ -341,6 +361,7 @@ int rxrpc_incoming_call(struct rxrpc_connection *conn, ...@@ -341,6 +361,7 @@ int rxrpc_incoming_call(struct rxrpc_connection *conn,
*_call = call; *_call = call;
} }
_leave(" = %d [%p]",ret,call);
return ret; return ret;
} /* end rxrpc_incoming_call() */ } /* end rxrpc_incoming_call() */
...@@ -367,7 +388,8 @@ void rxrpc_put_call(struct rxrpc_call *call) ...@@ -367,7 +388,8 @@ void rxrpc_put_call(struct rxrpc_call *call)
return; return;
} }
conn->channels[ntohl(call->chan_ix)] = NULL; if (conn->channels[ntohl(call->chan_ix)]==call)
conn->channels[ntohl(call->chan_ix)] = NULL;
spin_unlock(&conn->lock); spin_unlock(&conn->lock);
...@@ -1005,7 +1027,7 @@ static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, struct rxrpc ...@@ -1005,7 +1027,7 @@ static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, struct rxrpc
} }
/* next in sequence - simply append into the call's ready queue */ /* next in sequence - simply append into the call's ready queue */
_debug("Call add packet %d to readyq (+%d => %d bytes)", _debug("Call add packet %d to readyq (+%Zd => %Zd bytes)",
msg->seq,msg->dsize,call->app_ready_qty); msg->seq,msg->dsize,call->app_ready_qty);
spin_lock(&call->lock); spin_lock(&call->lock);
...@@ -1021,7 +1043,7 @@ static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, struct rxrpc ...@@ -1021,7 +1043,7 @@ static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, struct rxrpc
break; break;
/* next in sequence - just move list-to-list */ /* next in sequence - just move list-to-list */
_debug("Call transfer packet %d to readyq (+%d => %d bytes)", _debug("Call transfer packet %d to readyq (+%Zd => %Zd bytes)",
pmsg->seq,pmsg->dsize,call->app_ready_qty); pmsg->seq,pmsg->dsize,call->app_ready_qty);
call->app_ready_seq = pmsg->seq; call->app_ready_seq = pmsg->seq;
...@@ -1156,7 +1178,7 @@ static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, struct rxrpc ...@@ -1156,7 +1178,7 @@ static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, struct rxrpc
/* otherwise just invoke the data function whenever we can satisfy its desire for more /* otherwise just invoke the data function whenever we can satisfy its desire for more
* data * data
*/ */
_proto("Rx Received Op Data: st=%u qty=%u mk=%u%s", _proto("Rx Received Op Data: st=%u qty=%Zu mk=%Zu%s",
call->app_call_state,call->app_ready_qty,call->app_mark, call->app_call_state,call->app_ready_qty,call->app_mark,
call->app_last_rcv ? " last-rcvd" : ""); call->app_last_rcv ? " last-rcvd" : "");
...@@ -1394,7 +1416,7 @@ static int rxrpc_call_record_ACK(struct rxrpc_call *call, ...@@ -1394,7 +1416,7 @@ static int rxrpc_call_record_ACK(struct rxrpc_call *call,
char resend, now_complete; char resend, now_complete;
u8 acks[16]; u8 acks[16];
_enter("%p{apc=%u ads=%u},%p,%u,%u", _enter("%p{apc=%u ads=%u},%p,%u,%Zu",
call,call->acks_pend_cnt,call->acks_dftv_seq,msg,seq,count); call,call->acks_pend_cnt,call->acks_dftv_seq,msg,seq,count);
/* handle re-ACK'ing of definitively ACK'd packets (may be out-of-order ACKs) */ /* handle re-ACK'ing of definitively ACK'd packets (may be out-of-order ACKs) */
...@@ -1443,7 +1465,7 @@ static int rxrpc_call_record_ACK(struct rxrpc_call *call, ...@@ -1443,7 +1465,7 @@ static int rxrpc_call_record_ACK(struct rxrpc_call *call,
} }
_proto("Rx ACK of packets #%u-#%u [%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c] (pend=%u)", _proto("Rx ACK of packets #%u-#%u [%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c] (pend=%u)",
seq,seq+chunk-1, seq,(unsigned)(seq+chunk-1),
_acktype[acks[0x0]], _acktype[acks[0x0]],
_acktype[acks[0x1]], _acktype[acks[0x1]],
_acktype[acks[0x2]], _acktype[acks[0x2]],
...@@ -1552,7 +1574,7 @@ static int __rxrpc_call_read_data(struct rxrpc_call *call) ...@@ -1552,7 +1574,7 @@ static int __rxrpc_call_read_data(struct rxrpc_call *call)
size_t qty; size_t qty;
int ret; int ret;
_enter("%p{as=%d buf=%p qty=%u/%u}", _enter("%p{as=%d buf=%p qty=%Zu/%Zu}",
call,call->app_async_read,call->app_read_buf,call->app_ready_qty,call->app_mark); call,call->app_async_read,call->app_read_buf,call->app_ready_qty,call->app_mark);
/* check the state */ /* check the state */
...@@ -1560,7 +1582,7 @@ static int __rxrpc_call_read_data(struct rxrpc_call *call) ...@@ -1560,7 +1582,7 @@ static int __rxrpc_call_read_data(struct rxrpc_call *call)
case RXRPC_CSTATE_SRVR_RCV_ARGS: case RXRPC_CSTATE_SRVR_RCV_ARGS:
case RXRPC_CSTATE_CLNT_RCV_REPLY: case RXRPC_CSTATE_CLNT_RCV_REPLY:
if (call->app_last_rcv) { if (call->app_last_rcv) {
printk("%s(%p,%p,%d): Inconsistent call state (%s, last pkt)", printk("%s(%p,%p,%Zd): Inconsistent call state (%s, last pkt)",
__FUNCTION__,call,call->app_read_buf,call->app_mark, __FUNCTION__,call,call->app_read_buf,call->app_mark,
rxrpc_call_states[call->app_call_state]); rxrpc_call_states[call->app_call_state]);
BUG(); BUG();
...@@ -1574,7 +1596,7 @@ static int __rxrpc_call_read_data(struct rxrpc_call *call) ...@@ -1574,7 +1596,7 @@ static int __rxrpc_call_read_data(struct rxrpc_call *call)
case RXRPC_CSTATE_SRVR_SND_REPLY: case RXRPC_CSTATE_SRVR_SND_REPLY:
if (!call->app_last_rcv) { if (!call->app_last_rcv) {
printk("%s(%p,%p,%d): Inconsistent call state (%s, not last pkt)", printk("%s(%p,%p,%Zd): Inconsistent call state (%s, not last pkt)",
__FUNCTION__,call,call->app_read_buf,call->app_mark, __FUNCTION__,call,call->app_read_buf,call->app_mark,
rxrpc_call_states[call->app_call_state]); rxrpc_call_states[call->app_call_state]);
BUG(); BUG();
...@@ -1616,11 +1638,11 @@ static int __rxrpc_call_read_data(struct rxrpc_call *call) ...@@ -1616,11 +1638,11 @@ static int __rxrpc_call_read_data(struct rxrpc_call *call)
/* drag as much data as we need out of this packet */ /* drag as much data as we need out of this packet */
qty = min(call->app_mark,msg->dsize); qty = min(call->app_mark,msg->dsize);
_debug("reading %u from skb=%p off=%lu",qty,msg->pkt,msg->offset); _debug("reading %Zu from skb=%p off=%lu",qty,msg->pkt,msg->offset);
if (call->app_read_buf) if (call->app_read_buf)
if (skb_copy_bits(msg->pkt,msg->offset,call->app_read_buf,qty)<0) if (skb_copy_bits(msg->pkt,msg->offset,call->app_read_buf,qty)<0)
panic("%s: Failed to copy data from packet: (%p,%p,%d)", panic("%s: Failed to copy data from packet: (%p,%p,%Zd)",
__FUNCTION__,call,call->app_read_buf,qty); __FUNCTION__,call,call->app_read_buf,qty);
/* if that packet is now empty, discard it */ /* if that packet is now empty, discard it */
...@@ -1673,7 +1695,7 @@ static int __rxrpc_call_read_data(struct rxrpc_call *call) ...@@ -1673,7 +1695,7 @@ static int __rxrpc_call_read_data(struct rxrpc_call *call)
} }
if (call->app_last_rcv) { if (call->app_last_rcv) {
_debug("Insufficient data (%u/%u)",call->app_ready_qty,call->app_mark); _debug("Insufficient data (%Zu/%Zu)",call->app_ready_qty,call->app_mark);
call->app_async_read = 0; call->app_async_read = 0;
call->app_mark = RXRPC_APP_MARK_EOF; call->app_mark = RXRPC_APP_MARK_EOF;
call->app_read_buf = NULL; call->app_read_buf = NULL;
...@@ -1703,7 +1725,7 @@ int rxrpc_call_read_data(struct rxrpc_call *call, void *buffer, size_t size, int ...@@ -1703,7 +1725,7 @@ int rxrpc_call_read_data(struct rxrpc_call *call, void *buffer, size_t size, int
{ {
int ret; int ret;
_enter("%p{arq=%u},%p,%d,%x",call,call->app_ready_qty,buffer,size,flags); _enter("%p{arq=%Zu},%p,%Zd,%x",call,call->app_ready_qty,buffer,size,flags);
spin_lock(&call->lock); spin_lock(&call->lock);
...@@ -1799,7 +1821,7 @@ int rxrpc_call_write_data(struct rxrpc_call *call, ...@@ -1799,7 +1821,7 @@ int rxrpc_call_write_data(struct rxrpc_call *call,
char *buf; char *buf;
int ret; int ret;
_enter("%p,%u,%p,%02x,%x,%d,%p",call,sioc,siov,rxhdr_flags,alloc_flags,dup_data,size_sent); _enter("%p,%Zu,%p,%02x,%x,%d,%p",call,sioc,siov,rxhdr_flags,alloc_flags,dup_data,size_sent);
*size_sent = 0; *size_sent = 0;
size = 0; size = 0;
...@@ -1827,7 +1849,7 @@ int rxrpc_call_write_data(struct rxrpc_call *call, ...@@ -1827,7 +1849,7 @@ int rxrpc_call_write_data(struct rxrpc_call *call,
size += sptr->iov_len; size += sptr->iov_len;
} }
_debug("- size=%u mtu=%u",size,call->conn->mtu_size); _debug("- size=%Zu mtu=%Zu",size,call->conn->mtu_size);
do { do {
/* make sure there's a message under construction */ /* make sure there's a message under construction */
...@@ -1837,7 +1859,7 @@ int rxrpc_call_write_data(struct rxrpc_call *call, ...@@ -1837,7 +1859,7 @@ int rxrpc_call_write_data(struct rxrpc_call *call,
0,NULL,alloc_flags,&call->snd_nextmsg); 0,NULL,alloc_flags,&call->snd_nextmsg);
if (ret<0) if (ret<0)
goto out; goto out;
_debug("- allocated new message [ds=%u]",call->snd_nextmsg->dsize); _debug("- allocated new message [ds=%Zu]",call->snd_nextmsg->dsize);
} }
msg = call->snd_nextmsg; msg = call->snd_nextmsg;
...@@ -1857,7 +1879,7 @@ int rxrpc_call_write_data(struct rxrpc_call *call, ...@@ -1857,7 +1879,7 @@ int rxrpc_call_write_data(struct rxrpc_call *call,
space = call->conn->mtu_size - msg->dsize; space = call->conn->mtu_size - msg->dsize;
chunk = min(space,size); chunk = min(space,size);
_debug("- [before] space=%u chunk=%u",space,chunk); _debug("- [before] space=%Zu chunk=%Zu",space,chunk);
while (!siov->iov_len) while (!siov->iov_len)
siov++; siov++;
...@@ -1916,7 +1938,7 @@ int rxrpc_call_write_data(struct rxrpc_call *call, ...@@ -1916,7 +1938,7 @@ int rxrpc_call_write_data(struct rxrpc_call *call,
} }
} }
_debug("- [loaded] chunk=%u size=%u",chunk,size); _debug("- [loaded] chunk=%Zu size=%Zu",chunk,size);
/* dispatch the message when full, final or requesting ACK */ /* dispatch the message when full, final or requesting ACK */
if (msg->dsize>=call->conn->mtu_size || rxhdr_flags) { if (msg->dsize>=call->conn->mtu_size || rxhdr_flags) {
...@@ -1929,7 +1951,7 @@ int rxrpc_call_write_data(struct rxrpc_call *call, ...@@ -1929,7 +1951,7 @@ int rxrpc_call_write_data(struct rxrpc_call *call,
ret = 0; ret = 0;
out: out:
_leave(" = %d (%d queued, %d rem)",ret,*size_sent,size); _leave(" = %d (%Zd queued, %Zd rem)",ret,*size_sent,size);
return ret; return ret;
} /* end rxrpc_call_write_data() */ } /* end rxrpc_call_write_data() */
...@@ -1960,7 +1982,7 @@ int rxrpc_call_flush(struct rxrpc_call *call) ...@@ -1960,7 +1982,7 @@ int rxrpc_call_flush(struct rxrpc_call *call)
msg->hdr.flags |= RXRPC_MORE_PACKETS; msg->hdr.flags |= RXRPC_MORE_PACKETS;
} }
_proto("Sending DATA message { ds=%u dc=%u df=%02lu }", _proto("Sending DATA message { ds=%Zu dc=%u df=%02lu }",
msg->dsize,msg->dcount,msg->dfree); msg->dsize,msg->dcount,msg->dfree);
/* queue and adjust call state */ /* queue and adjust call state */
...@@ -1993,7 +2015,8 @@ int rxrpc_call_flush(struct rxrpc_call *call) ...@@ -1993,7 +2015,8 @@ int rxrpc_call_flush(struct rxrpc_call *call)
call->acks_pend_cnt++; call->acks_pend_cnt++;
mod_timer(&call->acks_timeout,jiffies + rxrpc_call_acks_timeout); mod_timer(&call->acks_timeout,
__rxrpc_rtt_based_timeout(call,rxrpc_call_acks_timeout));
spin_unlock(&call->lock); spin_unlock(&call->lock);
...@@ -2061,7 +2084,7 @@ static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest) ...@@ -2061,7 +2084,7 @@ static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest)
spin_unlock(&call->lock); spin_unlock(&call->lock);
/* send each message again (and ignore any errors we might incur) */ /* send each message again (and ignore any errors we might incur) */
_proto("Resending DATA message { ds=%u dc=%u df=%02lu }", _proto("Resending DATA message { ds=%Zu dc=%u df=%02lu }",
msg->dsize,msg->dcount,msg->dfree); msg->dsize,msg->dcount,msg->dfree);
if (rxrpc_conn_sendmsg(call->conn,msg)==0) if (rxrpc_conn_sendmsg(call->conn,msg)==0)
...@@ -2073,7 +2096,7 @@ static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest) ...@@ -2073,7 +2096,7 @@ static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest)
} }
/* reset the timeout */ /* reset the timeout */
mod_timer(&call->acks_timeout,jiffies + rxrpc_call_acks_timeout); mod_timer(&call->acks_timeout,__rxrpc_rtt_based_timeout(call,rxrpc_call_acks_timeout));
spin_unlock(&call->lock); spin_unlock(&call->lock);
......
...@@ -121,7 +121,7 @@ int rxrpc_create_connection(struct rxrpc_transport *trans, ...@@ -121,7 +121,7 @@ int rxrpc_create_connection(struct rxrpc_transport *trans,
conn->out_epoch = rxrpc_epoch; conn->out_epoch = rxrpc_epoch;
conn->in_clientflag = 0; conn->in_clientflag = 0;
conn->out_clientflag = RXRPC_CLIENT_INITIATED; conn->out_clientflag = RXRPC_CLIENT_INITIATED;
conn->conn_id = htonl((unsigned) conn & RXRPC_CIDMASK); conn->conn_id = htonl((unsigned long) conn & RXRPC_CIDMASK);
conn->service_id = htons(service_id); conn->service_id = htons(service_id);
/* attach to peer */ /* attach to peer */
...@@ -547,7 +547,7 @@ int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg) ...@@ -547,7 +547,7 @@ int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg)
msghdr.msg_controllen = 0; msghdr.msg_controllen = 0;
msghdr.msg_flags = MSG_CONFIRM|MSG_DONTWAIT; msghdr.msg_flags = MSG_CONFIRM|MSG_DONTWAIT;
_net("Sending message type %d of %d bytes to %08x:%d", _net("Sending message type %d of %Zd bytes to %08x:%d",
msg->hdr.type, msg->hdr.type,
msg->dsize, msg->dsize,
htonl(conn->addr.sin_addr.s_addr), htonl(conn->addr.sin_addr.s_addr),
......
...@@ -29,24 +29,24 @@ __RXACCT_DECL(extern atomic_t rxrpc_message_count); ...@@ -29,24 +29,24 @@ __RXACCT_DECL(extern atomic_t rxrpc_message_count);
/* /*
* debug tracing * debug tracing
*/ */
#define kenter(FMT,...) printk("==> %s("FMT")\n",__FUNCTION__,##__VA_ARGS__) #define kenter(FMT, a...) printk("==> %s("FMT")\n",__FUNCTION__ , ##a)
#define kleave(FMT,...) printk("<== %s()"FMT"\n",__FUNCTION__,##__VA_ARGS__) #define kleave(FMT, a...) printk("<== %s()"FMT"\n",__FUNCTION__ , ##a)
#define kdebug(FMT,...) printk(" "FMT"\n",##__VA_ARGS__) #define kdebug(FMT, a...) printk(" "FMT"\n" , ##a)
#define kproto(FMT,...) printk("### "FMT"\n",##__VA_ARGS__) #define kproto(FMT, a...) printk("### "FMT"\n" , ##a)
#define knet(FMT,...) printk(" "FMT"\n",##__VA_ARGS__) #define knet(FMT, a...) printk(" "FMT"\n" , ##a)
#if 0 #if 0
#define _enter(FMT,...) kenter(FMT,##__VA_ARGS__) #define _enter(FMT, a...) kenter(FMT , ##a)
#define _leave(FMT,...) kleave(FMT,##__VA_ARGS__) #define _leave(FMT, a...) kleave(FMT , ##a)
#define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__) #define _debug(FMT, a...) kdebug(FMT , ##a)
#define _proto(FMT,...) kproto(FMT,##__VA_ARGS__) #define _proto(FMT, a...) kproto(FMT , ##a)
#define _net(FMT,...) knet(FMT,##__VA_ARGS__) #define _net(FMT, a...) knet(FMT , ##a)
#else #else
#define _enter(FMT,...) do { if (rxrpc_ktrace) kenter(FMT,##__VA_ARGS__); } while(0) #define _enter(FMT, a...) do { if (rxrpc_ktrace) kenter(FMT , ##a); } while(0)
#define _leave(FMT,...) do { if (rxrpc_ktrace) kleave(FMT,##__VA_ARGS__); } while(0) #define _leave(FMT, a...) do { if (rxrpc_ktrace) kleave(FMT , ##a); } while(0)
#define _debug(FMT,...) do { if (rxrpc_kdebug) kdebug(FMT,##__VA_ARGS__); } while(0) #define _debug(FMT, a...) do { if (rxrpc_kdebug) kdebug(FMT , ##a); } while(0)
#define _proto(FMT,...) do { if (rxrpc_kproto) kproto(FMT,##__VA_ARGS__); } while(0) #define _proto(FMT, a...) do { if (rxrpc_kproto) kproto(FMT , ##a); } while(0)
#define _net(FMT,...) do { if (rxrpc_knet) knet (FMT,##__VA_ARGS__); } while(0) #define _net(FMT, a...) do { if (rxrpc_knet) knet (FMT , ##a); } while(0)
#endif #endif
static inline void rxrpc_discard_my_signals(void) static inline void rxrpc_discard_my_signals(void)
......
...@@ -49,11 +49,7 @@ static int rxrpc_krxiod(void *arg) ...@@ -49,11 +49,7 @@ static int rxrpc_krxiod(void *arg)
/* only certain signals are of interest */ /* only certain signals are of interest */
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sig->siglock);
siginitsetinv(&current->blocked,0); siginitsetinv(&current->blocked,0);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
recalc_sigpending(); recalc_sigpending();
#else
recalc_sigpending(current);
#endif
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sig->siglock);
/* loop around waiting for work to do */ /* loop around waiting for work to do */
......
...@@ -61,11 +61,7 @@ static int rxrpc_krxsecd(void *arg) ...@@ -61,11 +61,7 @@ static int rxrpc_krxsecd(void *arg)
/* only certain signals are of interest */ /* only certain signals are of interest */
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sig->siglock);
siginitsetinv(&current->blocked,0); siginitsetinv(&current->blocked,0);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
recalc_sigpending(); recalc_sigpending();
#else
recalc_sigpending(current);
#endif
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sig->siglock);
/* loop around waiting for work to do */ /* loop around waiting for work to do */
......
...@@ -79,11 +79,7 @@ static int krxtimod(void *arg) ...@@ -79,11 +79,7 @@ static int krxtimod(void *arg)
/* only certain signals are of interest */ /* only certain signals are of interest */
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sig->siglock);
siginitsetinv(&current->blocked,0); siginitsetinv(&current->blocked,0);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
recalc_sigpending(); recalc_sigpending();
#else
recalc_sigpending(current);
#endif
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sig->siglock);
/* loop around looking for things to attend to */ /* loop around looking for things to attend to */
......
...@@ -370,11 +370,13 @@ void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer, ...@@ -370,11 +370,13 @@ void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer,
if (peer->rtt_usage<RXRPC_RTT_CACHE_SIZE) peer->rtt_usage++; if (peer->rtt_usage<RXRPC_RTT_CACHE_SIZE) peer->rtt_usage++;
/* recalculate RTT */ /* recalculate RTT */
rtt = 0;
for (loop=peer->rtt_usage-1; loop>=0; loop--) for (loop=peer->rtt_usage-1; loop>=0; loop--)
rtt += peer->rtt_cache[loop]; rtt += peer->rtt_cache[loop];
peer->rtt = do_div(rtt,peer->rtt_usage); do_div(rtt,peer->rtt_usage);
peer->rtt = rtt;
_leave(" RTT=%lu.%lums",peer->rtt/1000,peer->rtt%1000); _leave(" RTT=%lu.%lums",(long)(peer->rtt/1000),(long)(peer->rtt%1000));
} /* end rxrpc_peer_calculate_rtt() */ } /* end rxrpc_peer_calculate_rtt() */
...@@ -22,13 +22,6 @@ ...@@ -22,13 +22,6 @@
#include <rxrpc/message.h> #include <rxrpc/message.h>
#include "internal.h" #include "internal.h"
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
static inline struct proc_dir_entry *PDE(const struct inode *inode)
{
return (struct proc_dir_entry *)inode->u.generic_ip;
}
#endif
static struct proc_dir_entry *proc_rxrpc; static struct proc_dir_entry *proc_rxrpc;
static int rxrpc_proc_transports_open(struct inode *inode, struct file *file); static int rxrpc_proc_transports_open(struct inode *inode, struct file *file);
...@@ -379,14 +372,14 @@ static int rxrpc_proc_peers_show(struct seq_file *m, void *v) ...@@ -379,14 +372,14 @@ static int rxrpc_proc_peers_show(struct seq_file *m, void *v)
if (!list_empty(&peer->timeout.link)) if (!list_empty(&peer->timeout.link))
timeout = (signed long)peer->timeout.timo_jif - (signed long)jiffies; timeout = (signed long)peer->timeout.timo_jif - (signed long)jiffies;
seq_printf(m,"%5hu %08x %5d %5d %8ld %5u %7lu\n", seq_printf(m,"%5hu %08x %5d %5d %8ld %5Zu %7lu\n",
peer->trans->port, peer->trans->port,
ntohl(peer->addr.s_addr), ntohl(peer->addr.s_addr),
atomic_read(&peer->usage), atomic_read(&peer->usage),
atomic_read(&peer->conn_count), atomic_read(&peer->conn_count),
timeout, timeout,
peer->if_mtu, peer->if_mtu,
peer->rtt (long) peer->rtt
); );
return 0; return 0;
...@@ -484,7 +477,7 @@ static int rxrpc_proc_conns_show(struct seq_file *m, void *v) ...@@ -484,7 +477,7 @@ static int rxrpc_proc_conns_show(struct seq_file *m, void *v)
if (!list_empty(&conn->timeout.link)) if (!list_empty(&conn->timeout.link))
timeout = (signed long)conn->timeout.timo_jif - (signed long)jiffies; timeout = (signed long)conn->timeout.timo_jif - (signed long)jiffies;
seq_printf(m,"%5hu %08x %5hu %04hx %08x %-3.3s %08x %08x %5u %8ld\n", seq_printf(m,"%5hu %08x %5hu %04hx %08x %-3.3s %08x %08x %5Zu %8ld\n",
conn->trans->port, conn->trans->port,
ntohl(conn->addr.sin_addr.s_addr), ntohl(conn->addr.sin_addr.s_addr),
ntohs(conn->addr.sin_port), ntohs(conn->addr.sin_port),
......
...@@ -691,12 +691,12 @@ static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans) ...@@ -691,12 +691,12 @@ static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans)
msg.msg_controllen = (char*)msg.msg_control - (char*)&emsg; msg.msg_controllen = (char*)msg.msg_control - (char*)&emsg;
if (msg.msg_controllen<sizeof(emsg.cmsg) || msg.msg_namelen<sizeof(sin)) { if (msg.msg_controllen<sizeof(emsg.cmsg) || msg.msg_namelen<sizeof(sin)) {
printk("%s: short control message (nlen=%u clen=%u fl=%x)\n", printk("%s: short control message (nlen=%u clen=%Zu fl=%x)\n",
__FUNCTION__,msg.msg_namelen,msg.msg_controllen,msg.msg_flags); __FUNCTION__,msg.msg_namelen,msg.msg_controllen,msg.msg_flags);
continue; continue;
} }
_net("Rx Received control message { len=%u level=%u type=%u }", _net("Rx Received control message { len=%Zu level=%u type=%u }",
emsg.cmsg.cmsg_len,emsg.cmsg.cmsg_level,emsg.cmsg.cmsg_type); emsg.cmsg.cmsg_len,emsg.cmsg.cmsg_level,emsg.cmsg.cmsg_type);
if (sin.sin_family!=AF_INET) { if (sin.sin_family!=AF_INET) {
...@@ -715,7 +715,7 @@ static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans) ...@@ -715,7 +715,7 @@ static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans)
} }
if (msg.msg_controllen<sizeof(emsg.cmsg)+sizeof(emsg.ee)) { if (msg.msg_controllen<sizeof(emsg.cmsg)+sizeof(emsg.ee)) {
printk("%s: short error message (%u)\n",__FUNCTION__,msg.msg_controllen); printk("%s: short error message (%Zu)\n",__FUNCTION__,msg.msg_controllen);
_leave(""); _leave("");
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment