Commit fed4d59b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] Reserve elfcorehdr memory in CONFIG_CRASH_DUMP
  [IA64] fix boot panic caused by offline CPUs
  [IA64] reorder Kconfig options to match x86
  [IA64] Build VT-D iommu support into generic kernel
  [IA64] remove dead BIO_VMERGE_BOUNDARY definition
  [IA64] remove duplicated #include from pci-dma.c
  [IA64] use common header for software IO/TLB
  [IA64] fix the difference between node_mem_map and node_start_pfn
  [IA64] Add error_recovery_info field to SAL section header
  [IA64] Add UV watchlist support.
  [IA64] Simplify SGI uv vs. sn2 driver issues
parents 8ec96e7b 17c1f07e
...@@ -148,6 +148,7 @@ config IA64_GENERIC ...@@ -148,6 +148,7 @@ config IA64_GENERIC
select ACPI_NUMA select ACPI_NUMA
select SWIOTLB select SWIOTLB
select PCI_MSI select PCI_MSI
select DMAR
help help
This selects the system type of your hardware. A "generic" kernel This selects the system type of your hardware. A "generic" kernel
will run on any supported IA-64 system. However, if you configure will run on any supported IA-64 system. However, if you configure
...@@ -585,7 +586,7 @@ source "fs/Kconfig.binfmt" ...@@ -585,7 +586,7 @@ source "fs/Kconfig.binfmt"
endmenu endmenu
menu "Power management and ACPI" menu "Power management and ACPI options"
source "kernel/power/Kconfig" source "kernel/power/Kconfig"
...@@ -641,6 +642,8 @@ source "net/Kconfig" ...@@ -641,6 +642,8 @@ source "net/Kconfig"
source "drivers/Kconfig" source "drivers/Kconfig"
source "arch/ia64/hp/sim/Kconfig"
config MSPEC config MSPEC
tristate "Memory special operations driver" tristate "Memory special operations driver"
depends on IA64 depends on IA64
...@@ -652,6 +655,12 @@ config MSPEC ...@@ -652,6 +655,12 @@ config MSPEC
source "fs/Kconfig" source "fs/Kconfig"
source "arch/ia64/Kconfig.debug"
source "security/Kconfig"
source "crypto/Kconfig"
source "arch/ia64/kvm/Kconfig" source "arch/ia64/kvm/Kconfig"
source "lib/Kconfig" source "lib/Kconfig"
...@@ -678,11 +687,3 @@ config IRQ_PER_CPU ...@@ -678,11 +687,3 @@ config IRQ_PER_CPU
config IOMMU_HELPER config IOMMU_HELPER
def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB) def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB)
source "arch/ia64/hp/sim/Kconfig"
source "arch/ia64/Kconfig.debug"
source "security/Kconfig"
source "crypto/Kconfig"
...@@ -13,19 +13,12 @@ ...@@ -13,19 +13,12 @@
*/ */
#include <linux/device.h> #include <linux/device.h>
#include <linux/swiotlb.h>
#include <asm/machvec.h> #include <asm/machvec.h>
/* swiotlb declarations & definitions: */ /* swiotlb declarations & definitions: */
extern int swiotlb_late_init_with_default_size (size_t size); extern int swiotlb_late_init_with_default_size (size_t size);
extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent;
extern ia64_mv_dma_free_coherent swiotlb_free_coherent;
extern ia64_mv_dma_map_single_attrs swiotlb_map_single_attrs;
extern ia64_mv_dma_unmap_single_attrs swiotlb_unmap_single_attrs;
extern ia64_mv_dma_map_sg_attrs swiotlb_map_sg_attrs;
extern ia64_mv_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs;
extern ia64_mv_dma_supported swiotlb_dma_supported;
extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error;
/* hwiommu declarations & definitions: */ /* hwiommu declarations & definitions: */
......
...@@ -434,28 +434,4 @@ extern void memset_io(volatile void __iomem *s, int c, long n); ...@@ -434,28 +434,4 @@ extern void memset_io(volatile void __iomem *s, int c, long n);
# endif /* __KERNEL__ */ # endif /* __KERNEL__ */
/*
* Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing. It is said that
* BIO-level virtual merging can give up to 4% performance boost (not verified for ia64).
* On the other hand, we know that I/O MMU bypassing gives ~8% performance improvement on
* SPECweb-like workloads on zx1-based machines. Thus, for now we favor I/O MMU bypassing
* over BIO-level virtual merging.
*/
extern unsigned long ia64_max_iommu_merge_mask;
#if 1
#define BIO_VMERGE_BOUNDARY 0
#else
/*
* It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be
* replaced by dma_merge_mask() or something of that sort. Note: the only way
* BIO_VMERGE_BOUNDARY is used is to mask off bits. Effectively, our definition gets
* expanded into:
*
* addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & ia64_max_iommu_vmerge_mask)
*
* which is precisely what we want.
*/
#define BIO_VMERGE_BOUNDARY (ia64_max_iommu_merge_mask + 1)
#endif
#endif /* _ASM_IA64_IO_H */ #endif /* _ASM_IA64_IO_H */
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#define _ASM_IA64_MACHVEC_H #define _ASM_IA64_MACHVEC_H
#include <linux/types.h> #include <linux/types.h>
#include <linux/swiotlb.h>
/* forward declarations: */ /* forward declarations: */
struct device; struct device;
...@@ -297,27 +298,6 @@ extern void machvec_init_from_cmdline(const char *cmdline); ...@@ -297,27 +298,6 @@ extern void machvec_init_from_cmdline(const char *cmdline);
# error Unknown configuration. Update arch/ia64/include/asm/machvec.h. # error Unknown configuration. Update arch/ia64/include/asm/machvec.h.
# endif /* CONFIG_IA64_GENERIC */ # endif /* CONFIG_IA64_GENERIC */
/*
* Declare default routines which aren't declared anywhere else:
*/
extern ia64_mv_dma_init swiotlb_init;
extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent;
extern ia64_mv_dma_free_coherent swiotlb_free_coherent;
extern ia64_mv_dma_map_single swiotlb_map_single;
extern ia64_mv_dma_map_single_attrs swiotlb_map_single_attrs;
extern ia64_mv_dma_unmap_single swiotlb_unmap_single;
extern ia64_mv_dma_unmap_single_attrs swiotlb_unmap_single_attrs;
extern ia64_mv_dma_map_sg swiotlb_map_sg;
extern ia64_mv_dma_map_sg_attrs swiotlb_map_sg_attrs;
extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg;
extern ia64_mv_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs;
extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu;
extern ia64_mv_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu;
extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device;
extern ia64_mv_dma_sync_sg_for_device swiotlb_sync_sg_for_device;
extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error;
extern ia64_mv_dma_supported swiotlb_dma_supported;
/* /*
* Define default versions so we can extend machvec for new platforms without having * Define default versions so we can extend machvec for new platforms without having
* to update the machvec files for all existing platforms. * to update the machvec files for all existing platforms.
......
...@@ -48,7 +48,6 @@ extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end); ...@@ -48,7 +48,6 @@ extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end);
*/ */
#define GRANULEROUNDDOWN(n) ((n) & ~(IA64_GRANULE_SIZE-1)) #define GRANULEROUNDDOWN(n) ((n) & ~(IA64_GRANULE_SIZE-1))
#define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1)) #define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1))
#define ORDERROUNDDOWN(n) ((n) & ~((PAGE_SIZE<<MAX_ORDER)-1))
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
extern void call_pernode_memory (unsigned long start, unsigned long len, void *func); extern void call_pernode_memory (unsigned long start, unsigned long len, void *func);
......
...@@ -337,11 +337,24 @@ typedef struct sal_log_record_header { ...@@ -337,11 +337,24 @@ typedef struct sal_log_record_header {
#define sal_log_severity_fatal 1 #define sal_log_severity_fatal 1
#define sal_log_severity_corrected 2 #define sal_log_severity_corrected 2
/*
* Error Recovery Info (ERI) bit decode. From SAL Spec section B.2.2 Table B-3
* Error Section Error_Recovery_Info Field Definition.
*/
#define ERI_NOT_VALID 0x0 /* Error Recovery Field is not valid */
#define ERI_NOT_ACCESSIBLE 0x30 /* Resource not accessible */
#define ERI_CONTAINMENT_WARN 0x22 /* Corrupt data propagated */
#define ERI_UNCORRECTED_ERROR 0x20 /* Uncorrected error */
#define ERI_COMPONENT_RESET 0x24 /* Component must be reset */
#define ERI_CORR_ERROR_LOG 0x21 /* Corrected error, needs logging */
#define ERI_CORR_ERROR_THRESH 0x29 /* Corrected error threshold exceeded */
/* Definition of log section header structures */ /* Definition of log section header structures */
typedef struct sal_log_sec_header { typedef struct sal_log_sec_header {
efi_guid_t guid; /* Unique Section ID */ efi_guid_t guid; /* Unique Section ID */
sal_log_revision_t revision; /* Major and Minor revision of Section */ sal_log_revision_t revision; /* Major and Minor revision of Section */
u16 reserved; u8 error_recovery_info; /* Platform error recovery status */
u8 reserved;
u32 len; /* Section length */ u32 len; /* Section length */
} sal_log_section_hdr_t; } sal_log_section_hdr_t;
......
...@@ -90,6 +90,8 @@ ...@@ -90,6 +90,8 @@
#define SN_SAL_SET_CPU_NUMBER 0x02000068 #define SN_SAL_SET_CPU_NUMBER 0x02000068
#define SN_SAL_KERNEL_LAUNCH_EVENT 0x02000069 #define SN_SAL_KERNEL_LAUNCH_EVENT 0x02000069
#define SN_SAL_WATCHLIST_ALLOC 0x02000070
#define SN_SAL_WATCHLIST_FREE 0x02000071
/* /*
* Service-specific constants * Service-specific constants
...@@ -1185,4 +1187,47 @@ ia64_sn_kernel_launch_event(void) ...@@ -1185,4 +1187,47 @@ ia64_sn_kernel_launch_event(void)
SAL_CALL_NOLOCK(rv, SN_SAL_KERNEL_LAUNCH_EVENT, 0, 0, 0, 0, 0, 0, 0); SAL_CALL_NOLOCK(rv, SN_SAL_KERNEL_LAUNCH_EVENT, 0, 0, 0, 0, 0, 0, 0);
return rv.status; return rv.status;
} }
union sn_watchlist_u {
u64 val;
struct {
u64 blade : 16,
size : 32,
filler : 16;
};
};
static inline int
sn_mq_watchlist_alloc(int blade, void *mq, unsigned int mq_size,
unsigned long *intr_mmr_offset)
{
struct ia64_sal_retval rv;
unsigned long addr;
union sn_watchlist_u size_blade;
int watchlist;
addr = (unsigned long)mq;
size_blade.size = mq_size;
size_blade.blade = blade;
/*
* bios returns watchlist number or negative error number.
*/
ia64_sal_oemcall_nolock(&rv, SN_SAL_WATCHLIST_ALLOC, addr,
size_blade.val, (u64)intr_mmr_offset,
(u64)&watchlist, 0, 0, 0);
if (rv.status < 0)
return rv.status;
return watchlist;
}
static inline int
sn_mq_watchlist_free(int blade, int watchlist_num)
{
struct ia64_sal_retval rv;
ia64_sal_oemcall_nolock(&rv, SN_SAL_WATCHLIST_FREE, blade,
watchlist_num, 0, 0, 0, 0, 0);
return rv.status;
}
#endif /* _ASM_IA64_SN_SN_SAL_H */ #endif /* _ASM_IA64_SN_SN_SAL_H */
...@@ -678,6 +678,30 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table) ...@@ -678,6 +678,30 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
return 0; return 0;
} }
int __init early_acpi_boot_init(void)
{
int ret;
/*
* do a partial walk of MADT to determine how many CPUs
* we have including offline CPUs
*/
if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
printk(KERN_ERR PREFIX "Can't find MADT\n");
return 0;
}
ret = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
acpi_parse_lsapic, NR_CPUS);
if (ret < 1)
printk(KERN_ERR PREFIX
"Error parsing MADT - no LAPIC entries\n");
return 0;
}
int __init acpi_boot_init(void) int __init acpi_boot_init(void)
{ {
...@@ -701,11 +725,6 @@ int __init acpi_boot_init(void) ...@@ -701,11 +725,6 @@ int __init acpi_boot_init(void)
printk(KERN_ERR PREFIX printk(KERN_ERR PREFIX
"Error parsing LAPIC address override entry\n"); "Error parsing LAPIC address override entry\n");
if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_lsapic, NR_CPUS)
< 1)
printk(KERN_ERR PREFIX
"Error parsing MADT - no LAPIC entries\n");
if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0) if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0)
< 0) < 0)
printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
......
...@@ -12,13 +12,11 @@ ...@@ -12,13 +12,11 @@
#include <asm/machvec.h> #include <asm/machvec.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <asm/machvec.h>
#include <asm/system.h> #include <asm/system.h>
#ifdef CONFIG_DMAR #ifdef CONFIG_DMAR
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/string.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/iommu.h> #include <asm/iommu.h>
......
...@@ -359,7 +359,7 @@ reserve_memory (void) ...@@ -359,7 +359,7 @@ reserve_memory (void)
} }
#endif #endif
#ifdef CONFIG_CRASH_KERNEL #ifdef CONFIG_CRASH_DUMP
if (reserve_elfcorehdr(&rsvd_region[n].start, if (reserve_elfcorehdr(&rsvd_region[n].start,
&rsvd_region[n].end) == 0) &rsvd_region[n].end) == 0)
n++; n++;
...@@ -561,8 +561,12 @@ setup_arch (char **cmdline_p) ...@@ -561,8 +561,12 @@ setup_arch (char **cmdline_p)
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
/* Initialize the ACPI boot-time table parser */ /* Initialize the ACPI boot-time table parser */
acpi_table_init(); acpi_table_init();
early_acpi_boot_init();
# ifdef CONFIG_ACPI_NUMA # ifdef CONFIG_ACPI_NUMA
acpi_numa_init(); acpi_numa_init();
#ifdef CONFIG_ACPI_HOTPLUG_CPU
prefill_possible_map();
#endif
per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
32 : cpus_weight(early_cpu_possible_map)), 32 : cpus_weight(early_cpu_possible_map)),
additional_cpus > 0 ? additional_cpus : 0); additional_cpus > 0 ? additional_cpus : 0);
...@@ -853,9 +857,6 @@ void __init ...@@ -853,9 +857,6 @@ void __init
setup_per_cpu_areas (void) setup_per_cpu_areas (void)
{ {
/* start_kernel() requires this... */ /* start_kernel() requires this... */
#ifdef CONFIG_ACPI_HOTPLUG_CPU
prefill_possible_map();
#endif
} }
/* /*
......
...@@ -635,7 +635,6 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n ...@@ -635,7 +635,6 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
(min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT; (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
#endif #endif
start = GRANULEROUNDDOWN(start); start = GRANULEROUNDDOWN(start);
start = ORDERROUNDDOWN(start);
end = GRANULEROUNDUP(end); end = GRANULEROUNDUP(end);
mem_data[node].max_pfn = max(mem_data[node].max_pfn, mem_data[node].max_pfn = max(mem_data[node].max_pfn,
end >> PAGE_SHIFT); end >> PAGE_SHIFT);
......
...@@ -19,6 +19,12 @@ EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); ...@@ -19,6 +19,12 @@ EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
#ifdef CONFIG_IA64_SGI_UV #ifdef CONFIG_IA64_SGI_UV
int sn_prom_type; int sn_prom_type;
long sn_partition_id;
EXPORT_SYMBOL(sn_partition_id);
long sn_coherency_id;
EXPORT_SYMBOL_GPL(sn_coherency_id);
long sn_region_size;
EXPORT_SYMBOL(sn_region_size);
#endif #endif
struct redir_addr { struct redir_addr {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment