Commit a947e23a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-urgent-for-linus' of...

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, asm: Clean up desc.h a bit
  x86, amd: Do not enable ARAT feature on AMD processors below family 0x12
  x86: Move do_page_fault()'s error path under unlikely()
  x86, efi: Retain boot service code until after switching to virtual mode
  x86: Remove unnecessary check in detect_ht()
  x86: Reorder mm_context_t to remove x86_64 alignment padding and thus shrink mm_struct
  x86, UV: Clean up uv_tlb.c
  x86, UV: Add support for SGI UV2 hub chip
  x86, cpufeature: Update CPU feature RDRND to RDRAND
parents 08a8b796 9a3865b1
...@@ -125,7 +125,7 @@ ...@@ -125,7 +125,7 @@
#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */ #define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */ #define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
#define X86_FEATURE_F16C (4*32+29) /* 16-bit fp conversions */ #define X86_FEATURE_F16C (4*32+29) /* 16-bit fp conversions */
#define X86_FEATURE_RDRND (4*32+30) /* The RDRAND instruction */ #define X86_FEATURE_RDRAND (4*32+30) /* The RDRAND instruction */
#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */ #define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
......
...@@ -4,30 +4,33 @@ ...@@ -4,30 +4,33 @@
#include <asm/desc_defs.h> #include <asm/desc_defs.h>
#include <asm/ldt.h> #include <asm/ldt.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#include <linux/smp.h> #include <linux/smp.h>
static inline void fill_ldt(struct desc_struct *desc, static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info)
const struct user_desc *info) {
{ desc->limit0 = info->limit & 0x0ffff;
desc->limit0 = info->limit & 0x0ffff;
desc->base0 = info->base_addr & 0x0000ffff; desc->base0 = (info->base_addr & 0x0000ffff);
desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
desc->type = (info->read_exec_only ^ 1) << 1; desc->type = (info->read_exec_only ^ 1) << 1;
desc->type |= info->contents << 2; desc->type |= info->contents << 2;
desc->s = 1;
desc->dpl = 0x3; desc->s = 1;
desc->p = info->seg_not_present ^ 1; desc->dpl = 0x3;
desc->limit = (info->limit & 0xf0000) >> 16; desc->p = info->seg_not_present ^ 1;
desc->avl = info->useable; desc->limit = (info->limit & 0xf0000) >> 16;
desc->d = info->seg_32bit; desc->avl = info->useable;
desc->g = info->limit_in_pages; desc->d = info->seg_32bit;
desc->base2 = (info->base_addr & 0xff000000) >> 24; desc->g = info->limit_in_pages;
desc->base2 = (info->base_addr & 0xff000000) >> 24;
/* /*
* Don't allow setting of the lm bit. It is useless anyway * Don't allow setting of the lm bit. It is useless anyway
* because 64bit system calls require __USER_CS: * because 64bit system calls require __USER_CS:
*/ */
desc->l = 0; desc->l = 0;
} }
extern struct desc_ptr idt_descr; extern struct desc_ptr idt_descr;
...@@ -36,6 +39,7 @@ extern gate_desc idt_table[]; ...@@ -36,6 +39,7 @@ extern gate_desc idt_table[];
struct gdt_page { struct gdt_page {
struct desc_struct gdt[GDT_ENTRIES]; struct desc_struct gdt[GDT_ENTRIES];
} __attribute__((aligned(PAGE_SIZE))); } __attribute__((aligned(PAGE_SIZE)));
DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page); DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
...@@ -48,16 +52,16 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) ...@@ -48,16 +52,16 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func, static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
unsigned dpl, unsigned ist, unsigned seg) unsigned dpl, unsigned ist, unsigned seg)
{ {
gate->offset_low = PTR_LOW(func); gate->offset_low = PTR_LOW(func);
gate->segment = __KERNEL_CS; gate->segment = __KERNEL_CS;
gate->ist = ist; gate->ist = ist;
gate->p = 1; gate->p = 1;
gate->dpl = dpl; gate->dpl = dpl;
gate->zero0 = 0; gate->zero0 = 0;
gate->zero1 = 0; gate->zero1 = 0;
gate->type = type; gate->type = type;
gate->offset_middle = PTR_MIDDLE(func); gate->offset_middle = PTR_MIDDLE(func);
gate->offset_high = PTR_HIGH(func); gate->offset_high = PTR_HIGH(func);
} }
#else #else
...@@ -66,8 +70,7 @@ static inline void pack_gate(gate_desc *gate, unsigned char type, ...@@ -66,8 +70,7 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
unsigned short seg) unsigned short seg)
{ {
gate->a = (seg << 16) | (base & 0xffff); gate->a = (seg << 16) | (base & 0xffff);
gate->b = (base & 0xffff0000) | gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
(((0x80 | type | (dpl << 5)) & 0xff) << 8);
} }
#endif #endif
...@@ -75,31 +78,29 @@ static inline void pack_gate(gate_desc *gate, unsigned char type, ...@@ -75,31 +78,29 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
static inline int desc_empty(const void *ptr) static inline int desc_empty(const void *ptr)
{ {
const u32 *desc = ptr; const u32 *desc = ptr;
return !(desc[0] | desc[1]); return !(desc[0] | desc[1]);
} }
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h> #include <asm/paravirt.h>
#else #else
#define load_TR_desc() native_load_tr_desc() #define load_TR_desc() native_load_tr_desc()
#define load_gdt(dtr) native_load_gdt(dtr) #define load_gdt(dtr) native_load_gdt(dtr)
#define load_idt(dtr) native_load_idt(dtr) #define load_idt(dtr) native_load_idt(dtr)
#define load_tr(tr) asm volatile("ltr %0"::"m" (tr)) #define load_tr(tr) asm volatile("ltr %0"::"m" (tr))
#define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt)) #define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt))
#define store_gdt(dtr) native_store_gdt(dtr) #define store_gdt(dtr) native_store_gdt(dtr)
#define store_idt(dtr) native_store_idt(dtr) #define store_idt(dtr) native_store_idt(dtr)
#define store_tr(tr) (tr = native_store_tr()) #define store_tr(tr) (tr = native_store_tr())
#define load_TLS(t, cpu) native_load_tls(t, cpu) #define load_TLS(t, cpu) native_load_tls(t, cpu)
#define set_ldt native_set_ldt #define set_ldt native_set_ldt
#define write_ldt_entry(dt, entry, desc) \ #define write_ldt_entry(dt, entry, desc) native_write_ldt_entry(dt, entry, desc)
native_write_ldt_entry(dt, entry, desc) #define write_gdt_entry(dt, entry, desc, type) native_write_gdt_entry(dt, entry, desc, type)
#define write_gdt_entry(dt, entry, desc, type) \ #define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g)
native_write_gdt_entry(dt, entry, desc, type)
#define write_idt_entry(dt, entry, g) \
native_write_idt_entry(dt, entry, g)
static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries) static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
{ {
...@@ -112,33 +113,27 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) ...@@ -112,33 +113,27 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
#define store_ldt(ldt) asm("sldt %0" : "=m"(ldt)) #define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))
static inline void native_write_idt_entry(gate_desc *idt, int entry, static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
const gate_desc *gate)
{ {
memcpy(&idt[entry], gate, sizeof(*gate)); memcpy(&idt[entry], gate, sizeof(*gate));
} }
static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
const void *desc)
{ {
memcpy(&ldt[entry], desc, 8); memcpy(&ldt[entry], desc, 8);
} }
static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry, static inline void
const void *desc, int type) native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type)
{ {
unsigned int size; unsigned int size;
switch (type) { switch (type) {
case DESC_TSS: case DESC_TSS: size = sizeof(tss_desc); break;
size = sizeof(tss_desc); case DESC_LDT: size = sizeof(ldt_desc); break;
break; default: size = sizeof(*gdt); break;
case DESC_LDT:
size = sizeof(ldt_desc);
break;
default:
size = sizeof(struct desc_struct);
break;
} }
memcpy(&gdt[entry], desc, size); memcpy(&gdt[entry], desc, size);
} }
...@@ -154,20 +149,21 @@ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base, ...@@ -154,20 +149,21 @@ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
} }
static inline void set_tssldt_descriptor(void *d, unsigned long addr, static inline void set_tssldt_descriptor(void *d, unsigned long addr, unsigned type, unsigned size)
unsigned type, unsigned size)
{ {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
struct ldttss_desc64 *desc = d; struct ldttss_desc64 *desc = d;
memset(desc, 0, sizeof(*desc)); memset(desc, 0, sizeof(*desc));
desc->limit0 = size & 0xFFFF;
desc->base0 = PTR_LOW(addr); desc->limit0 = size & 0xFFFF;
desc->base1 = PTR_MIDDLE(addr) & 0xFF; desc->base0 = PTR_LOW(addr);
desc->type = type; desc->base1 = PTR_MIDDLE(addr) & 0xFF;
desc->p = 1; desc->type = type;
desc->limit1 = (size >> 16) & 0xF; desc->p = 1;
desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF; desc->limit1 = (size >> 16) & 0xF;
desc->base3 = PTR_HIGH(addr); desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF;
desc->base3 = PTR_HIGH(addr);
#else #else
pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0); pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0);
#endif #endif
...@@ -237,14 +233,16 @@ static inline void native_store_idt(struct desc_ptr *dtr) ...@@ -237,14 +233,16 @@ static inline void native_store_idt(struct desc_ptr *dtr)
static inline unsigned long native_store_tr(void) static inline unsigned long native_store_tr(void)
{ {
unsigned long tr; unsigned long tr;
asm volatile("str %0":"=r" (tr)); asm volatile("str %0":"=r" (tr));
return tr; return tr;
} }
static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
{ {
unsigned int i;
struct desc_struct *gdt = get_cpu_gdt_table(cpu); struct desc_struct *gdt = get_cpu_gdt_table(cpu);
unsigned int i;
for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
...@@ -313,6 +311,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr, ...@@ -313,6 +311,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
unsigned dpl, unsigned ist, unsigned seg) unsigned dpl, unsigned ist, unsigned seg)
{ {
gate_desc s; gate_desc s;
pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg); pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
/* /*
* does not need to be atomic because it is only done once at * does not need to be atomic because it is only done once at
...@@ -343,8 +342,9 @@ static inline void alloc_system_vector(int vector) ...@@ -343,8 +342,9 @@ static inline void alloc_system_vector(int vector)
set_bit(vector, used_vectors); set_bit(vector, used_vectors);
if (first_system_vector > vector) if (first_system_vector > vector)
first_system_vector = vector; first_system_vector = vector;
} else } else {
BUG(); BUG();
}
} }
static inline void alloc_intr_gate(unsigned int n, void *addr) static inline void alloc_intr_gate(unsigned int n, void *addr)
......
...@@ -11,14 +11,14 @@ ...@@ -11,14 +11,14 @@
typedef struct { typedef struct {
void *ldt; void *ldt;
int size; int size;
struct mutex lock;
void *vdso;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
/* True if mm supports a task running in 32 bit compatibility mode. */ /* True if mm supports a task running in 32 bit compatibility mode. */
unsigned short ia32_compat; unsigned short ia32_compat;
#endif #endif
struct mutex lock;
void *vdso;
} mm_context_t; } mm_context_t;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
This diff is collapsed.
...@@ -77,8 +77,9 @@ ...@@ -77,8 +77,9 @@
* *
* 1111110000000000 * 1111110000000000
* 5432109876543210 * 5432109876543210
* pppppppppplc0cch Nehalem-EX * pppppppppplc0cch Nehalem-EX (12 bits in hdw reg)
* ppppppppplcc0cch Westmere-EX * ppppppppplcc0cch Westmere-EX (12 bits in hdw reg)
* pppppppppppcccch SandyBridge (15 bits in hdw reg)
* sssssssssss * sssssssssss
* *
* p = pnode bits * p = pnode bits
...@@ -87,7 +88,7 @@ ...@@ -87,7 +88,7 @@
* h = hyperthread * h = hyperthread
* s = bits that are in the SOCKET_ID CSR * s = bits that are in the SOCKET_ID CSR
* *
* Note: Processor only supports 12 bits in the APICID register. The ACPI * Note: Processor may support fewer bits in the APICID register. The ACPI
* tables hold all 16 bits. Software needs to be aware of this. * tables hold all 16 bits. Software needs to be aware of this.
* *
* Unless otherwise specified, all references to APICID refer to * Unless otherwise specified, all references to APICID refer to
...@@ -138,6 +139,8 @@ struct uv_hub_info_s { ...@@ -138,6 +139,8 @@ struct uv_hub_info_s {
unsigned long global_mmr_base; unsigned long global_mmr_base;
unsigned long gpa_mask; unsigned long gpa_mask;
unsigned int gnode_extra; unsigned int gnode_extra;
unsigned char hub_revision;
unsigned char apic_pnode_shift;
unsigned long gnode_upper; unsigned long gnode_upper;
unsigned long lowmem_remap_top; unsigned long lowmem_remap_top;
unsigned long lowmem_remap_base; unsigned long lowmem_remap_base;
...@@ -149,13 +152,31 @@ struct uv_hub_info_s { ...@@ -149,13 +152,31 @@ struct uv_hub_info_s {
unsigned char m_val; unsigned char m_val;
unsigned char n_val; unsigned char n_val;
struct uv_scir_s scir; struct uv_scir_s scir;
unsigned char apic_pnode_shift;
}; };
DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
#define uv_hub_info (&__get_cpu_var(__uv_hub_info)) #define uv_hub_info (&__get_cpu_var(__uv_hub_info))
#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
/*
* Hub revisions less than UV2_HUB_REVISION_BASE are UV1 hubs. All UV2
* hubs have revision numbers greater than or equal to UV2_HUB_REVISION_BASE.
* This is a software convention - NOT the hardware revision numbers in
* the hub chip.
*/
#define UV1_HUB_REVISION_BASE 1
#define UV2_HUB_REVISION_BASE 3
static inline int is_uv1_hub(void)
{
return uv_hub_info->hub_revision < UV2_HUB_REVISION_BASE;
}
static inline int is_uv2_hub(void)
{
return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE;
}
union uvh_apicid { union uvh_apicid {
unsigned long v; unsigned long v;
struct uvh_apicid_s { struct uvh_apicid_s {
...@@ -180,11 +201,25 @@ union uvh_apicid { ...@@ -180,11 +201,25 @@ union uvh_apicid {
#define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra) #define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra)
#define UV_PNODE_TO_NASID(p) (UV_PNODE_TO_GNODE(p) << 1) #define UV_PNODE_TO_NASID(p) (UV_PNODE_TO_GNODE(p) << 1)
#define UV_LOCAL_MMR_BASE 0xf4000000UL #define UV1_LOCAL_MMR_BASE 0xf4000000UL
#define UV_GLOBAL_MMR32_BASE 0xf8000000UL #define UV1_GLOBAL_MMR32_BASE 0xf8000000UL
#define UV1_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
#define UV1_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
#define UV2_LOCAL_MMR_BASE 0xfa000000UL
#define UV2_GLOBAL_MMR32_BASE 0xfc000000UL
#define UV2_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
#define UV2_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024)
#define UV_LOCAL_MMR_BASE (is_uv1_hub() ? UV1_LOCAL_MMR_BASE \
: UV2_LOCAL_MMR_BASE)
#define UV_GLOBAL_MMR32_BASE (is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE \
: UV2_GLOBAL_MMR32_BASE)
#define UV_LOCAL_MMR_SIZE (is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \
UV2_LOCAL_MMR_SIZE)
#define UV_GLOBAL_MMR32_SIZE (is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE :\
UV2_GLOBAL_MMR32_SIZE)
#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base) #define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base)
#define UV_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
#define UV_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
#define UV_GLOBAL_GRU_MMR_BASE 0x4000000 #define UV_GLOBAL_GRU_MMR_BASE 0x4000000
...@@ -300,6 +335,17 @@ static inline int uv_apicid_to_pnode(int apicid) ...@@ -300,6 +335,17 @@ static inline int uv_apicid_to_pnode(int apicid)
return (apicid >> uv_hub_info->apic_pnode_shift); return (apicid >> uv_hub_info->apic_pnode_shift);
} }
/*
* Convert an apicid to the socket number on the blade
*/
static inline int uv_apicid_to_socket(int apicid)
{
if (is_uv1_hub())
return (apicid >> (uv_hub_info->apic_pnode_shift - 1)) & 1;
else
return 0;
}
/* /*
* Access global MMRs using the low memory MMR32 space. This region supports * Access global MMRs using the low memory MMR32 space. This region supports
* faster MMR access but not all MMRs are accessible in this space. * faster MMR access but not all MMRs are accessible in this space.
...@@ -519,14 +565,13 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector) ...@@ -519,14 +565,13 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
/* /*
* Get the minimum revision number of the hub chips within the partition. * Get the minimum revision number of the hub chips within the partition.
* 1 - initial rev 1.0 silicon * 1 - UV1 rev 1.0 initial silicon
* 2 - rev 2.0 production silicon * 2 - UV1 rev 2.0 production silicon
* 3 - UV2 rev 1.0 initial silicon
*/ */
static inline int uv_get_min_hub_revision_id(void) static inline int uv_get_min_hub_revision_id(void)
{ {
extern int uv_min_hub_revision_id; return uv_hub_info->hub_revision;
return uv_min_hub_revision_id;
} }
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
......
This diff is collapsed.
...@@ -91,6 +91,10 @@ static int __init early_get_pnodeid(void) ...@@ -91,6 +91,10 @@ static int __init early_get_pnodeid(void)
m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR); m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR);
uv_min_hub_revision_id = node_id.s.revision; uv_min_hub_revision_id = node_id.s.revision;
if (node_id.s.part_number == UV2_HUB_PART_NUMBER)
uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
uv_hub_info->hub_revision = uv_min_hub_revision_id;
pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1); pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
return pnode; return pnode;
} }
...@@ -112,17 +116,25 @@ static void __init early_get_apic_pnode_shift(void) ...@@ -112,17 +116,25 @@ static void __init early_get_apic_pnode_shift(void)
*/ */
static void __init uv_set_apicid_hibit(void) static void __init uv_set_apicid_hibit(void)
{ {
union uvh_lb_target_physical_apic_id_mask_u apicid_mask; union uv1h_lb_target_physical_apic_id_mask_u apicid_mask;
apicid_mask.v = uv_early_read_mmr(UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK); if (is_uv1_hub()) {
uv_apicid_hibits = apicid_mask.s.bit_enables & UV_APICID_HIBIT_MASK; apicid_mask.v =
uv_early_read_mmr(UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK);
uv_apicid_hibits =
apicid_mask.s1.bit_enables & UV_APICID_HIBIT_MASK;
}
} }
static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{ {
int pnodeid; int pnodeid, is_uv1, is_uv2;
if (!strcmp(oem_id, "SGI")) { is_uv1 = !strcmp(oem_id, "SGI");
is_uv2 = !strcmp(oem_id, "SGI2");
if (is_uv1 || is_uv2) {
uv_hub_info->hub_revision =
is_uv1 ? UV1_HUB_REVISION_BASE : UV2_HUB_REVISION_BASE;
pnodeid = early_get_pnodeid(); pnodeid = early_get_pnodeid();
early_get_apic_pnode_shift(); early_get_apic_pnode_shift();
x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range; x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
...@@ -484,12 +496,19 @@ static __init void map_mmr_high(int max_pnode) ...@@ -484,12 +496,19 @@ static __init void map_mmr_high(int max_pnode)
static __init void map_mmioh_high(int max_pnode) static __init void map_mmioh_high(int max_pnode)
{ {
union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh; union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
int shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT; int shift;
mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
if (mmioh.s.enable) if (is_uv1_hub() && mmioh.s1.enable) {
map_high("MMIOH", mmioh.s.base, shift, mmioh.s.m_io, shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
map_high("MMIOH", mmioh.s1.base, shift, mmioh.s1.m_io,
max_pnode, map_uc);
}
if (is_uv2_hub() && mmioh.s2.enable) {
shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
map_high("MMIOH", mmioh.s2.base, shift, mmioh.s2.m_io,
max_pnode, map_uc); max_pnode, map_uc);
}
} }
static __init void map_low_mmrs(void) static __init void map_low_mmrs(void)
...@@ -736,13 +755,14 @@ void __init uv_system_init(void) ...@@ -736,13 +755,14 @@ void __init uv_system_init(void)
unsigned long mmr_base, present, paddr; unsigned long mmr_base, present, paddr;
unsigned short pnode_mask, pnode_io_mask; unsigned short pnode_mask, pnode_io_mask;
printk(KERN_INFO "UV: Found %s hub\n", is_uv1_hub() ? "UV1" : "UV2");
map_low_mmrs(); map_low_mmrs();
m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR ); m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
m_val = m_n_config.s.m_skt; m_val = m_n_config.s.m_skt;
n_val = m_n_config.s.n_skt; n_val = m_n_config.s.n_skt;
mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
n_io = mmioh.s.n_io; n_io = is_uv1_hub() ? mmioh.s1.n_io : mmioh.s2.n_io;
mmr_base = mmr_base =
uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
~UV_MMR_ENABLE; ~UV_MMR_ENABLE;
...@@ -811,6 +831,8 @@ void __init uv_system_init(void) ...@@ -811,6 +831,8 @@ void __init uv_system_init(void)
*/ */
uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask; uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
uv_cpu_hub_info(cpu)->apic_pnode_shift = uvh_apicid.s.pnode_shift; uv_cpu_hub_info(cpu)->apic_pnode_shift = uvh_apicid.s.pnode_shift;
uv_cpu_hub_info(cpu)->hub_revision = uv_hub_info->hub_revision;
pnode = uv_apicid_to_pnode(apicid); pnode = uv_apicid_to_pnode(apicid);
blade = boot_pnode_to_blade(pnode); blade = boot_pnode_to_blade(pnode);
lcpu = uv_blade_info[blade].nr_possible_cpus; lcpu = uv_blade_info[blade].nr_possible_cpus;
......
...@@ -612,8 +612,11 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) ...@@ -612,8 +612,11 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
} }
#endif #endif
/* As a rule processors have APIC timer running in deep C states */ /*
if (c->x86 > 0xf && !cpu_has_amd_erratum(amd_erratum_400)) * Family 0x12 and above processors have APIC timer
* running in deep C states.
*/
if (c->x86 > 0x11)
set_cpu_cap(c, X86_FEATURE_ARAT); set_cpu_cap(c, X86_FEATURE_ARAT);
/* /*
......
...@@ -477,13 +477,6 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) ...@@ -477,13 +477,6 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
if (smp_num_siblings <= 1) if (smp_num_siblings <= 1)
goto out; goto out;
if (smp_num_siblings > nr_cpu_ids) {
pr_warning("CPU: Unsupported number of siblings %d",
smp_num_siblings);
smp_num_siblings = 1;
return;
}
index_msb = get_count_order(smp_num_siblings); index_msb = get_count_order(smp_num_siblings);
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
......
...@@ -910,6 +910,13 @@ void __init setup_arch(char **cmdline_p) ...@@ -910,6 +910,13 @@ void __init setup_arch(char **cmdline_p)
memblock.current_limit = get_max_mapped(); memblock.current_limit = get_max_mapped();
memblock_x86_fill(); memblock_x86_fill();
/*
* The EFI specification says that boot service code won't be called
* after ExitBootServices(). This is, in fact, a lie.
*/
if (efi_enabled)
efi_reserve_boot_services();
/* preallocate 4k for mptable mpc */ /* preallocate 4k for mptable mpc */
early_reserve_e820_mpc_new(); early_reserve_e820_mpc_new();
......
...@@ -823,16 +823,30 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, ...@@ -823,16 +823,30 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
force_sig_info_fault(SIGBUS, code, address, tsk, fault); force_sig_info_fault(SIGBUS, code, address, tsk, fault);
} }
static noinline void static noinline int
mm_fault_error(struct pt_regs *regs, unsigned long error_code, mm_fault_error(struct pt_regs *regs, unsigned long error_code,
unsigned long address, unsigned int fault) unsigned long address, unsigned int fault)
{ {
/*
* Pagefault was interrupted by SIGKILL. We have no reason to
* continue pagefault.
*/
if (fatal_signal_pending(current)) {
if (!(fault & VM_FAULT_RETRY))
up_read(&current->mm->mmap_sem);
if (!(error_code & PF_USER))
no_context(regs, error_code, address);
return 1;
}
if (!(fault & VM_FAULT_ERROR))
return 0;
if (fault & VM_FAULT_OOM) { if (fault & VM_FAULT_OOM) {
/* Kernel mode? Handle exceptions or die: */ /* Kernel mode? Handle exceptions or die: */
if (!(error_code & PF_USER)) { if (!(error_code & PF_USER)) {
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
no_context(regs, error_code, address); no_context(regs, error_code, address);
return; return 1;
} }
out_of_memory(regs, error_code, address); out_of_memory(regs, error_code, address);
...@@ -843,6 +857,7 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, ...@@ -843,6 +857,7 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
else else
BUG(); BUG();
} }
return 1;
} }
static int spurious_fault_check(unsigned long error_code, pte_t *pte) static int spurious_fault_check(unsigned long error_code, pte_t *pte)
...@@ -1133,19 +1148,9 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -1133,19 +1148,9 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
mm_fault_error(regs, error_code, address, fault); if (mm_fault_error(regs, error_code, address, fault))
return; return;
}
/*
* Pagefault was interrupted by SIGKILL. We have no reason to
* continue pagefault.
*/
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
if (!(error_code & PF_USER))
no_context(regs, error_code, address);
return;
} }
/* /*
......
...@@ -304,6 +304,40 @@ static void __init print_efi_memmap(void) ...@@ -304,6 +304,40 @@ static void __init print_efi_memmap(void)
} }
#endif /* EFI_DEBUG */ #endif /* EFI_DEBUG */
void __init efi_reserve_boot_services(void)
{
void *p;
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
efi_memory_desc_t *md = p;
unsigned long long start = md->phys_addr;
unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
if (md->type != EFI_BOOT_SERVICES_CODE &&
md->type != EFI_BOOT_SERVICES_DATA)
continue;
memblock_x86_reserve_range(start, start + size, "EFI Boot");
}
}
static void __init efi_free_boot_services(void)
{
void *p;
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
efi_memory_desc_t *md = p;
unsigned long long start = md->phys_addr;
unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
if (md->type != EFI_BOOT_SERVICES_CODE &&
md->type != EFI_BOOT_SERVICES_DATA)
continue;
free_bootmem_late(start, size);
}
}
void __init efi_init(void) void __init efi_init(void)
{ {
efi_config_table_t *config_tables; efi_config_table_t *config_tables;
...@@ -536,7 +570,9 @@ void __init efi_enter_virtual_mode(void) ...@@ -536,7 +570,9 @@ void __init efi_enter_virtual_mode(void)
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p; md = p;
if (!(md->attribute & EFI_MEMORY_RUNTIME)) if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
md->type != EFI_BOOT_SERVICES_CODE &&
md->type != EFI_BOOT_SERVICES_DATA)
continue; continue;
size = md->num_pages << EFI_PAGE_SHIFT; size = md->num_pages << EFI_PAGE_SHIFT;
...@@ -592,6 +628,13 @@ void __init efi_enter_virtual_mode(void) ...@@ -592,6 +628,13 @@ void __init efi_enter_virtual_mode(void)
panic("EFI call to SetVirtualAddressMap() failed!"); panic("EFI call to SetVirtualAddressMap() failed!");
} }
/*
* Thankfully, it does seem that no runtime services other than
* SetVirtualAddressMap() will touch boot services code, so we can
* get rid of it all at this point
*/
efi_free_boot_services();
/* /*
* Now that EFI is in virtual mode, update the function * Now that EFI is in virtual mode, update the function
* pointers in the runtime service table to the new virtual addresses. * pointers in the runtime service table to the new virtual addresses.
......
...@@ -49,10 +49,11 @@ static void __init early_code_mapping_set_exec(int executable) ...@@ -49,10 +49,11 @@ static void __init early_code_mapping_set_exec(int executable)
if (!(__supported_pte_mask & _PAGE_NX)) if (!(__supported_pte_mask & _PAGE_NX))
return; return;
/* Make EFI runtime service code area executable */ /* Make EFI service code area executable */
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p; md = p;
if (md->type == EFI_RUNTIME_SERVICES_CODE) if (md->type == EFI_RUNTIME_SERVICES_CODE ||
md->type == EFI_BOOT_SERVICES_CODE)
efi_set_executable(md, executable); efi_set_executable(md, executable);
} }
} }
......
This diff is collapsed.
...@@ -99,8 +99,12 @@ static void uv_rtc_send_IPI(int cpu) ...@@ -99,8 +99,12 @@ static void uv_rtc_send_IPI(int cpu)
/* Check for an RTC interrupt pending */ /* Check for an RTC interrupt pending */
static int uv_intr_pending(int pnode) static int uv_intr_pending(int pnode)
{ {
return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) & if (is_uv1_hub())
UVH_EVENT_OCCURRED0_RTC1_MASK; return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) &
UV1H_EVENT_OCCURRED0_RTC1_MASK;
else
return uv_read_global_mmr64(pnode, UV2H_EVENT_OCCURRED2) &
UV2H_EVENT_OCCURRED2_RTC_1_MASK;
} }
/* Setup interrupt and return non-zero if early expiration occurred. */ /* Setup interrupt and return non-zero if early expiration occurred. */
...@@ -114,8 +118,12 @@ static int uv_setup_intr(int cpu, u64 expires) ...@@ -114,8 +118,12 @@ static int uv_setup_intr(int cpu, u64 expires)
UVH_RTC1_INT_CONFIG_M_MASK); UVH_RTC1_INT_CONFIG_M_MASK);
uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L); uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L);
uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS, if (is_uv1_hub())
UVH_EVENT_OCCURRED0_RTC1_MASK); uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS,
UV1H_EVENT_OCCURRED0_RTC1_MASK);
else
uv_write_global_mmr64(pnode, UV2H_EVENT_OCCURRED2_ALIAS,
UV2H_EVENT_OCCURRED2_RTC_1_MASK);
val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) | val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT); ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
......
...@@ -299,6 +299,7 @@ extern void efi_initialize_iomem_resources(struct resource *code_resource, ...@@ -299,6 +299,7 @@ extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource); struct resource *data_resource, struct resource *bss_resource);
extern unsigned long efi_get_time(void); extern unsigned long efi_get_time(void);
extern int efi_set_rtc_mmss(unsigned long nowtime); extern int efi_set_rtc_mmss(unsigned long nowtime);
extern void efi_reserve_boot_services(void);
extern struct efi_memory_map memmap; extern struct efi_memory_map memmap;
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment