Commit c12489ad authored by Steve French's avatar Steve French

Merge with /pub/scm/linux/kernel/git/torvalds/linux-2.6.git

parents ff7feac9 1e185b97
......@@ -386,7 +386,7 @@ X!Edrivers/pnp/system.c
<chapter id="blkdev">
<title>Block Devices</title>
!Edrivers/block/ll_rw_blk.c
!Eblock/ll_rw_blk.c
</chapter>
<chapter id="miscdev">
......
......@@ -1063,8 +1063,8 @@ Aside:
4.4 I/O contexts
I/O contexts provide a dynamically allocated per process data area. They may
be used in I/O schedulers, and in the block layer (could be used for IO statis,
priorities for example). See *io_context in drivers/block/ll_rw_blk.c, and
as-iosched.c for an example of usage in an i/o scheduler.
priorities for example). See *io_context in block/ll_rw_blk.c, and as-iosched.c
for an example of usage in an i/o scheduler.
5. Scalability related changes
......
......@@ -140,3 +140,12 @@ What: EXPORT_SYMBOL(lookup_hash)
When: January 2006
Why: Too low-level interface. Use lookup_one_len or lookup_create instead.
Who: Christoph Hellwig <hch@lst.de>
---------------------------
What: START_ARRAY ioctl for md
When: July 2006
Files: drivers/md/md.c
Why: Not reliable by design - can fail when most needed.
Alternatives exist
Who: NeilBrown <neilb@suse.de>
......@@ -32,7 +32,10 @@ the disk is not available then you have three options :-
has restarted. Messy but it is the only option if you have not
planned for a crash. Alternatively, you can take a picture of
the screen with a digital camera - not nice, but better than
nothing.
nothing. If the messages scroll off the top of the console, you
may find that booting with a higher resolution (eg, vga=791)
will allow you to read more of the text. (Caveat: This needs vesafb,
so won't help for 'early' oopses)
(2) Boot with a serial console (see Documentation/serial-console.txt),
run a null modem to a second machine and capture the output there
......
......@@ -7,10 +7,12 @@ Machine check
mce=off disable machine check
mce=bootlog Enable logging of machine checks left over from booting.
Disabled by default because some BIOS leave bogus ones.
Disabled by default on AMD because some BIOS leave bogus ones.
If your BIOS doesn't do that it's a good idea to enable though
to make sure you log even machine check events that result
in a reboot.
in a reboot. On Intel systems it is enabled by default.
mce=nobootlog
Disable boot machine check logging.
mce=tolerancelevel (number)
0: always panic, 1: panic if deadlock possible,
2: try to avoid panic, 3: never panic or exit (for testing)
......@@ -122,6 +124,9 @@ SMP
cpumask=MASK only use cpus with bits set in mask
additional_cpus=NUM Allow NUM more CPUs for hotplug
(defaults are specified by the BIOS or half the available CPUs)
NUMA
numa=off Only set up a single NUMA node spanning all memory.
......@@ -188,6 +193,9 @@ Debugging
kstack=N Print that many words from the kernel stack in oops dumps.
pagefaulttrace Dump all page faults. Only useful for extreme debugging
and will create a lot of output.
Misc
noreplacement Don't replace instructions with more appropiate ones
......
......@@ -6,7 +6,7 @@ Virtual memory map with 4 level page tables:
0000000000000000 - 00007fffffffffff (=47bits) user space, different per mm
hole caused by [48:63] sign extension
ffff800000000000 - ffff80ffffffffff (=40bits) guard hole
ffff810000000000 - ffffc0ffffffffff (=46bits) direct mapping of phys. memory
ffff810000000000 - ffffc0ffffffffff (=46bits) direct mapping of all phys. memory
ffffc10000000000 - ffffc1ffffffffff (=40bits) hole
ffffc20000000000 - ffffe1ffffffffff (=45bits) vmalloc/ioremap space
... unused hole ...
......@@ -14,6 +14,10 @@ ffffffff80000000 - ffffffff82800000 (=40MB) kernel text mapping, from phys 0
... unused hole ...
ffffffff88000000 - fffffffffff00000 (=1919MB) module mapping space
The direct mapping covers all memory in the system upto the highest
memory address (this means in some cases it can also include PCI memory
holes)
vmalloc space is lazily synchronized into the different PML4 pages of
the processes using the page fault handler, with init_level4_pgt as
reference.
......
......@@ -652,25 +652,11 @@ endmenu
menu "Power management options"
config PM
bool "Power Management support"
---help---
"Power Management" means that parts of your computer are shut
off or put into a power conserving "sleep" mode if they are not
being used. There are two competing standards for doing this: APM
and ACPI. If you want to use either one, say Y here and then also
to the requisite support below.
Power Management is most important for battery powered laptop
computers; if you have a laptop, check out the Linux Laptop home
page on the WWW at <http://www.linux-on-laptops.com/> or
Tuxmobil - Linux on Mobile Computers at <http://www.tuxmobil.org/>
and the Battery Powered Linux mini-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
source "kernel/power/Kconfig"
config APM
tristate "Advanced Power Management Emulation"
depends on PM
depends on PM_LEGACY
---help---
APM is a BIOS specification for saving power using several different
techniques. This is mostly useful for battery powered laptops with
......
......@@ -1266,7 +1266,7 @@ static void __exit sa1111_exit(void)
bus_unregister(&sa1111_bus_type);
}
module_init(sa1111_init);
subsys_initcall(sa1111_init);
module_exit(sa1111_exit);
MODULE_DESCRIPTION("Intel Corporation SA1111 core driver");
......
......@@ -39,17 +39,14 @@
#ifdef CONFIG_X86_64
static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
}
extern void __init clustered_apic_check(void);
static inline int ioapic_setup_disabled(void)
{
return 0;
}
extern int gsi_irq_sharing(int gsi);
#include <asm/proto.h>
static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; }
#else /* X86 */
#ifdef CONFIG_X86_LOCAL_APIC
......@@ -57,6 +54,8 @@ static inline int ioapic_setup_disabled(void)
#include <mach_mpparse.h>
#endif /* CONFIG_X86_LOCAL_APIC */
static inline int gsi_irq_sharing(int gsi) { return gsi; }
#endif /* X86 */
#define BAD_MADT_ENTRY(entry, end) ( \
......@@ -459,7 +458,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
*irq = IO_APIC_VECTOR(gsi);
else
#endif
*irq = gsi;
*irq = gsi_irq_sharing(gsi);
return 0;
}
......@@ -543,7 +542,7 @@ acpi_scan_rsdp(unsigned long start, unsigned long length)
* RSDP signature.
*/
for (offset = 0; offset < length; offset += 16) {
if (strncmp((char *)(start + offset), "RSD PTR ", sig_len))
if (strncmp((char *)(phys_to_virt(start) + offset), "RSD PTR ", sig_len))
continue;
return (start + offset);
}
......
......@@ -206,9 +206,9 @@ static void __init init_amd(struct cpuinfo_x86 *c)
display_cacheinfo(c);
if (cpuid_eax(0x80000000) >= 0x80000008) {
c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
if (c->x86_num_cores & (c->x86_num_cores - 1))
c->x86_num_cores = 1;
c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
if (c->x86_max_cores & (c->x86_max_cores - 1))
c->x86_max_cores = 1;
}
#ifdef CONFIG_X86_HT
......@@ -217,15 +217,15 @@ static void __init init_amd(struct cpuinfo_x86 *c)
* distingush the cores. Assumes number of cores is a power
* of two.
*/
if (c->x86_num_cores > 1) {
if (c->x86_max_cores > 1) {
int cpu = smp_processor_id();
unsigned bits = 0;
while ((1 << bits) < c->x86_num_cores)
while ((1 << bits) < c->x86_max_cores)
bits++;
cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1);
phys_proc_id[cpu] >>= bits;
printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
cpu, c->x86_num_cores, cpu_core_id[cpu]);
cpu, c->x86_max_cores, cpu_core_id[cpu]);
}
#endif
}
......
......@@ -231,10 +231,10 @@ static void __init early_cpu_detect(void)
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
c->x86 = (tfms >> 8) & 15;
c->x86_model = (tfms >> 4) & 15;
if (c->x86 == 0xf) {
if (c->x86 == 0xf)
c->x86 += (tfms >> 20) & 0xff;
if (c->x86 >= 0x6)
c->x86_model += ((tfms >> 16) & 0xF) << 4;
}
c->x86_mask = tfms & 15;
if (cap0 & (1<<19))
c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
......@@ -333,7 +333,7 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c)
c->x86_model = c->x86_mask = 0; /* So far unknown... */
c->x86_vendor_id[0] = '\0'; /* Unset */
c->x86_model_id[0] = '\0'; /* Unset */
c->x86_num_cores = 1;
c->x86_max_cores = 1;
memset(&c->x86_capability, 0, sizeof c->x86_capability);
if (!have_cpuid_p()) {
......@@ -443,52 +443,44 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c)
void __devinit detect_ht(struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;
int index_msb, tmp;
int index_msb, core_bits;
int cpu = smp_processor_id();
cpuid(1, &eax, &ebx, &ecx, &edx);
c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
return;
cpuid(1, &eax, &ebx, &ecx, &edx);
smp_num_siblings = (ebx & 0xff0000) >> 16;
if (smp_num_siblings == 1) {
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
} else if (smp_num_siblings > 1 ) {
index_msb = 31;
if (smp_num_siblings > NR_CPUS) {
printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
smp_num_siblings = 1;
return;
}
tmp = smp_num_siblings;
while ((tmp & 0x80000000 ) == 0) {
tmp <<=1 ;
index_msb--;
}
if (smp_num_siblings & (smp_num_siblings - 1))
index_msb++;
index_msb = get_count_order(smp_num_siblings);
phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
phys_proc_id[cpu]);
smp_num_siblings = smp_num_siblings / c->x86_num_cores;
smp_num_siblings = smp_num_siblings / c->x86_max_cores;
tmp = smp_num_siblings;
index_msb = 31;
while ((tmp & 0x80000000) == 0) {
tmp <<=1 ;
index_msb--;
}
index_msb = get_count_order(smp_num_siblings) ;
if (smp_num_siblings & (smp_num_siblings - 1))
index_msb++;
core_bits = get_count_order(c->x86_max_cores);
cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
((1 << core_bits) - 1);
if (c->x86_num_cores > 1)
if (c->x86_max_cores > 1)
printk(KERN_INFO "CPU: Processor Core ID: %d\n",
cpu_core_id[cpu]);
}
......
......@@ -158,7 +158,7 @@ static void __devinit init_intel(struct cpuinfo_x86 *c)
if ( p )
strcpy(c->x86_model_id, p);
c->x86_num_cores = num_cpu_cores(c);
c->x86_max_cores = num_cpu_cores(c);
detect_ht(c);
......
......@@ -293,29 +293,45 @@ static struct _cpuid4_info *cpuid4_info[NR_CPUS];
#ifdef CONFIG_SMP
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
{
struct _cpuid4_info *this_leaf;
struct _cpuid4_info *this_leaf, *sibling_leaf;
unsigned long num_threads_sharing;
#ifdef CONFIG_X86_HT
struct cpuinfo_x86 *c = cpu_data + cpu;
#endif
int index_msb, i;
struct cpuinfo_x86 *c = cpu_data;
this_leaf = CPUID4_INFO_IDX(cpu, index);
num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
if (num_threads_sharing == 1)
cpu_set(cpu, this_leaf->shared_cpu_map);
#ifdef CONFIG_X86_HT
else if (num_threads_sharing == smp_num_siblings)
this_leaf->shared_cpu_map = cpu_sibling_map[cpu];
else if (num_threads_sharing == (c->x86_num_cores * smp_num_siblings))
this_leaf->shared_cpu_map = cpu_core_map[cpu];
else
printk(KERN_DEBUG "Number of CPUs sharing cache didn't match "
"any known set of CPUs\n");
#endif
else {
index_msb = get_count_order(num_threads_sharing);
for_each_online_cpu(i) {
if (c[i].apicid >> index_msb ==
c[cpu].apicid >> index_msb) {
cpu_set(i, this_leaf->shared_cpu_map);
if (i != cpu && cpuid4_info[i]) {
sibling_leaf = CPUID4_INFO_IDX(i, index);
cpu_set(cpu, sibling_leaf->shared_cpu_map);
}
}
}
}
}
static void __devinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
{
struct _cpuid4_info *this_leaf, *sibling_leaf;
int sibling;
this_leaf = CPUID4_INFO_IDX(cpu, index);
for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
sibling_leaf = CPUID4_INFO_IDX(sibling, index);
cpu_clear(cpu, sibling_leaf->shared_cpu_map);
}
}
#else
static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
static void __init cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
#endif
static void free_cache_attributes(unsigned int cpu)
......@@ -574,8 +590,10 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
unsigned int cpu = sys_dev->id;
unsigned long i;
for (i = 0; i < num_cache_leaves; i++)
for (i = 0; i < num_cache_leaves; i++) {
cache_remove_shared_cpu_map(cpu, i);
kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
}
kobject_unregister(cache_kobject[cpu]);
cpuid4_cache_sysfs_exit(cpu);
return;
......
......@@ -626,6 +626,14 @@ void __init mtrr_bp_init(void)
if (cpuid_eax(0x80000000) >= 0x80000008) {
u32 phys_addr;
phys_addr = cpuid_eax(0x80000008) & 0xff;
/* CPUID workaround for Intel 0F33/0F34 CPU */
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 0xF &&
boot_cpu_data.x86_model == 0x3 &&
(boot_cpu_data.x86_mask == 0x3 ||
boot_cpu_data.x86_mask == 0x4))
phys_addr = 36;
size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
size_and_mask = ~size_or_mask & 0xfff00000;
} else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
......
......@@ -94,12 +94,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (c->x86_cache_size >= 0)
seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
#ifdef CONFIG_X86_HT
if (c->x86_num_cores * smp_num_siblings > 1) {
if (c->x86_max_cores * smp_num_siblings > 1) {
seq_printf(m, "physical id\t: %d\n", phys_proc_id[n]);
seq_printf(m, "siblings\t: %d\n",
c->x86_num_cores * smp_num_siblings);
seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n]));
seq_printf(m, "core id\t\t: %d\n", cpu_core_id[n]);
seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores);
seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
}
#endif
......
......@@ -21,6 +21,7 @@
#include <asm/hardirq.h>
#include <asm/nmi.h>
#include <asm/hw_irq.h>
#include <asm/apic.h>
#include <mach_ipi.h>
......@@ -147,6 +148,7 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
regs = &fixed_regs;
}
crash_save_this_cpu(regs, cpu);
disable_local_APIC();
atomic_dec(&waiting_for_crash_ipi);
/* Assume hlt works */
halt();
......@@ -186,6 +188,7 @@ static void nmi_shootdown_cpus(void)
}
/* Leave the nmi callback set */
disable_local_APIC();
}
#else
static void nmi_shootdown_cpus(void)
......@@ -210,5 +213,9 @@ void machine_crash_shutdown(struct pt_regs *regs)
/* Make a note of crashing cpu. Will be used in NMI callback.*/
crashing_cpu = smp_processor_id();
nmi_shootdown_cpus();
lapic_shutdown();
#if defined(CONFIG_X86_IO_APIC)
disable_IO_APIC();
#endif
crash_save_self(regs);
}
......@@ -72,9 +72,11 @@ int phys_proc_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
/* Core ID of each logical CPU */
int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
/* representing HT siblings of each logical CPU */
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_sibling_map);
/* representing HT and core siblings of each logical CPU */
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
......@@ -442,36 +444,61 @@ static void __devinit smp_callin(void)
static int cpucount;
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
static inline void
set_cpu_sibling_map(int cpu)
{
int i;
struct cpuinfo_x86 *c = cpu_data;
cpu_set(cpu, cpu_sibling_setup_map);
if (smp_num_siblings > 1) {
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_isset(i, cpu_callout_map))
continue;
if (cpu_core_id[cpu] == cpu_core_id[i]) {
for_each_cpu_mask(i, cpu_sibling_setup_map) {
if (phys_proc_id[cpu] == phys_proc_id[i] &&
cpu_core_id[cpu] == cpu_core_id[i]) {
cpu_set(i, cpu_sibling_map[cpu]);
cpu_set(cpu, cpu_sibling_map[i]);
cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]);
}
}
} else {
cpu_set(cpu, cpu_sibling_map[cpu]);
}
if (current_cpu_data.x86_num_cores > 1) {
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_isset(i, cpu_callout_map))
continue;
if (current_cpu_data.x86_max_cores == 1) {
cpu_core_map[cpu] = cpu_sibling_map[cpu];
c[cpu].booted_cores = 1;
return;
}
for_each_cpu_mask(i, cpu_sibling_setup_map) {
if (phys_proc_id[cpu] == phys_proc_id[i]) {
cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]);
/*
* Does this new cpu bringup a new core?
*/
if (cpus_weight(cpu_sibling_map[cpu]) == 1) {
/*
* for each core in package, increment
* the booted_cores for this new cpu
*/
if (first_cpu(cpu_sibling_map[i]) == i)
c[cpu].booted_cores++;
/*
* increment the core count for all
* the other cpus in this package
*/
if (i != cpu)
c[i].booted_cores++;
} else if (i != cpu && !c[cpu].booted_cores)
c[cpu].booted_cores = c[i].booted_cores;
}
}
} else {
cpu_core_map[cpu] = cpu_sibling_map[cpu];
}
}
/*
......@@ -1095,11 +1122,8 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
current_thread_info()->cpu = 0;
smp_tune_scheduling();
cpus_clear(cpu_sibling_map[0]);
cpu_set(0, cpu_sibling_map[0]);
cpus_clear(cpu_core_map[0]);
cpu_set(0, cpu_core_map[0]);
set_cpu_sibling_map(0);
/*
* If we couldn't find an SMP configuration at boot time,
......@@ -1278,15 +1302,24 @@ static void
remove_siblinginfo(int cpu)
{
int sibling;
struct cpuinfo_x86 *c = cpu_data;
for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
cpu_clear(cpu, cpu_core_map[sibling]);
/*
* last thread sibling in this cpu core going down
*/
if (cpus_weight(cpu_sibling_map[cpu]) == 1)
c[sibling].booted_cores--;
}
for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
cpu_clear(cpu, cpu_sibling_map[sibling]);
for_each_cpu_mask(sibling, cpu_core_map[cpu])
cpu_clear(cpu, cpu_core_map[sibling]);
cpus_clear(cpu_sibling_map[cpu]);
cpus_clear(cpu_core_map[cpu]);
phys_proc_id[cpu] = BAD_APICID;
cpu_core_id[cpu] = BAD_APICID;
cpu_clear(cpu, cpu_sibling_setup_map);
}
int __cpu_disable(void)
......
......@@ -137,8 +137,8 @@ static void __init parse_memory_affinity_structure (char *sratp)
"enabled and removable" : "enabled" ) );
}
#if MAX_NR_ZONES != 3
#error "MAX_NR_ZONES != 3, chunk_to_zone requires review"
#if MAX_NR_ZONES != 4
#error "MAX_NR_ZONES != 4, chunk_to_zone requires review"
#endif
/* Take a chunk of pages from page frame cstart to cend and count the number
* of pages in each zone, returned via zones[].
......
......@@ -58,6 +58,10 @@ config IA64_UNCACHED_ALLOCATOR
bool
select GENERIC_ALLOCATOR
config ZONE_DMA_IS_DMA32
bool
default y
choice
prompt "System type"
default IA64_GENERIC
......
......@@ -202,12 +202,9 @@ default_idle (void)
{
local_irq_enable();
while (!need_resched()) {
if (can_do_pal_halt) {
local_irq_disable();
if (!need_resched())
if (can_do_pal_halt)
safe_halt();
local_irq_enable();
} else
else
cpu_relax();
}
}
......@@ -272,10 +269,14 @@ cpu_idle (void)
{
void (*mark_idle)(int) = ia64_mark_idle;
int cpu = smp_processor_id();
set_thread_flag(TIF_POLLING_NRFLAG);
/* endless idle loop with no priority at all */
while (1) {
if (can_do_pal_halt)
clear_thread_flag(TIF_POLLING_NRFLAG);
else
set_thread_flag(TIF_POLLING_NRFLAG);
if (!need_resched()) {
void (*idle)(void);
#ifdef CONFIG_SMP
......
......@@ -261,7 +261,7 @@ config PPC_ISERIES
config EMBEDDED6xx
bool "Embedded 6xx/7xx/7xxx-based board"
depends on PPC32
depends on PPC32 && BROKEN
config APUS
bool "Amiga-APUS"
......@@ -305,7 +305,7 @@ config PPC_PMAC64
config PPC_PREP
bool " PowerPC Reference Platform (PReP) based machines"
depends on PPC_MULTIPLATFORM && PPC32
depends on PPC_MULTIPLATFORM && PPC32 && BROKEN
select PPC_I8259
select PPC_INDIRECT_PCI
default y
......@@ -932,6 +932,7 @@ source "arch/powerpc/oprofile/Kconfig"
config KPROBES
bool "Kprobes (EXPERIMENTAL)"
depends on PPC64
help
Kprobes allows you to trap at almost any kernel address and
execute a callback function. register_kprobe() establishes
......
......@@ -187,7 +187,7 @@ archprepare: checkbin
# Temporary hack until we have migrated to asm-powerpc
include/asm: arch/$(ARCH)/include/asm
arch/$(ARCH)/include/asm:
arch/$(ARCH)/include/asm: FORCE
$(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi
$(Q)ln -fsn $(srctree)/include/asm-$(OLDARCH) arch/$(ARCH)/include/asm
......
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.14-rc4
# Thu Oct 20 08:32:17 2005
# Linux kernel version: 2.6.15-rc1
# Mon Nov 14 15:27:00 2005
#
CONFIG_PPC64=y
CONFIG_64BIT=y
CONFIG_PPC_MERGE=y
CONFIG_MMU=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_ISA_DMA=y
CONFIG_PPC=y
CONFIG_EARLY_PRINTK=y
CONFIG_COMPAT=y
CONFIG_SYSVIPC_COMPAT=y
CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
CONFIG_ARCH_MAY_HAVE_PC_FDC=y
CONFIG_FORCE_MAX_ZONEORDER=13
#
# Processor support
#
# CONFIG_POWER4_ONLY is not set
CONFIG_POWER3=y
CONFIG_POWER4=y
CONFIG_PPC_FPU=y
CONFIG_ALTIVEC=y
CONFIG_PPC_STD_MMU=y
CONFIG_SMP=y
CONFIG_NR_CPUS=128
#
# Code maturity level options
......@@ -68,75 +83,103 @@ CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_KMOD=y
CONFIG_STOP_MACHINE=y
CONFIG_SYSVIPC_COMPAT=y
#
# Block layer
#
#
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_DEADLINE is not set
# CONFIG_DEFAULT_CFQ is not set
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="anticipatory"
#
# Platform support
#
# CONFIG_PPC_ISERIES is not set
CONFIG_PPC_MULTIPLATFORM=y
# CONFIG_PPC_ISERIES is not set
# CONFIG_EMBEDDED6xx is not set
# CONFIG_APUS is not set
CONFIG_PPC_PSERIES=y
# CONFIG_PPC_BPA is not set
# CONFIG_PPC_PMAC is not set
# CONFIG_PPC_MAPLE is not set
CONFIG_PPC=y
CONFIG_PPC64=y
# CONFIG_PPC_CELL is not set
CONFIG_PPC_OF=y
CONFIG_XICS=y
# CONFIG_U3_DART is not set
CONFIG_MPIC=y
CONFIG_ALTIVEC=y
CONFIG_PPC_SPLPAR=y
CONFIG_KEXEC=y
CONFIG_PPC_RTAS=y
CONFIG_RTAS_ERROR_LOGGING=y
CONFIG_RTAS_PROC=y
CONFIG_RTAS_FLASH=m
# CONFIG_MMIO_NVRAM is not set
CONFIG_IBMVIO=y
# CONFIG_U3_DART is not set
# CONFIG_BOOTX_TEXT is not set
# CONFIG_POWER4_ONLY is not set
# CONFIG_PPC_MPC106 is not set
# CONFIG_GENERIC_TBSYNC is not set
# CONFIG_CPU_FREQ is not set
# CONFIG_WANT_EARLY_SERIAL is not set
#
# Kernel options
#
# CONFIG_HZ_100 is not set
CONFIG_HZ_250=y
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
# CONFIG_PREEMPT_BKL is not set
CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_IOMMU_VMERGE=y
CONFIG_SMP=y
CONFIG_NR_CPUS=128
CONFIG_HOTPLUG_CPU=y
CONFIG_KEXEC=y
# CONFIG_IRQ_ALL_CPUS is not set
CONFIG_PPC_SPLPAR=y
CONFIG_EEH=y
CONFIG_SCANLOG=m
CONFIG_LPARCFG=y
CONFIG_NUMA=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_SELECT_MEMORY_MODEL=y
# CONFIG_FLATMEM_MANUAL is not set
CONFIG_DISCONTIGMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_DISCONTIGMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
# CONFIG_DISCONTIGMEM_MANUAL is not set
CONFIG_SPARSEMEM_MANUAL=y
CONFIG_SPARSEMEM=y
CONFIG_NEED_MULTIPLE_NODES=y
CONFIG_HAVE_MEMORY_PRESENT=y
# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_SPARSEMEM_EXTREME=y
# CONFIG_MEMORY_HOTPLUG is not set
CONFIG_SPLIT_PTLOCK_CPUS=4096
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
CONFIG_NODES_SPAN_OTHER_NODES=y
CONFIG_NUMA=y
# CONFIG_PPC_64K_PAGES is not set
CONFIG_SCHED_SMT=y
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
# CONFIG_PREEMPT_BKL is not set
# CONFIG_HZ_100 is not set
CONFIG_HZ_250=y
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
CONFIG_EEH=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_PPC_RTAS=y
CONFIG_RTAS_PROC=y
CONFIG_RTAS_FLASH=m
CONFIG_SCANLOG=m
CONFIG_LPARCFG=y
CONFIG_SECCOMP=y
CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
CONFIG_HOTPLUG_CPU=y
CONFIG_PROC_DEVICETREE=y
# CONFIG_CMDLINE_BOOL is not set
# CONFIG_PM is not set
CONFIG_SECCOMP=y
CONFIG_ISA_DMA_API=y
#
# Bus Options
# Bus options
#
CONFIG_GENERIC_ISA_DMA=y
CONFIG_PPC_I8259=y
# CONFIG_PPC_INDIRECT_PCI is not set
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
CONFIG_PCI_LEGACY_PROC=y
......@@ -156,6 +199,7 @@ CONFIG_HOTPLUG_PCI=m
# CONFIG_HOTPLUG_PCI_SHPC is not set
CONFIG_HOTPLUG_PCI_RPA=m
CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
CONFIG_KERNEL_START=0xc000000000000000
#
# Networking
......@@ -197,6 +241,10 @@ CONFIG_TCP_CONG_BIC=y
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_DEBUG is not set
#
# Core Netfilter Configuration
#
CONFIG_NETFILTER_NETLINK=y
CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NETFILTER_NETLINK_LOG=m
......@@ -299,6 +347,10 @@ CONFIG_LLC=y
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
#
# QoS and/or fair queueing
#
# CONFIG_NET_SCHED is not set
CONFIG_NET_CLS_ROUTE=y
......@@ -368,14 +420,6 @@ CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CDROM_PKTCDVD is not set
#
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
# CONFIG_ATA_OVER_ETH is not set
#
......@@ -473,6 +517,7 @@ CONFIG_SCSI_ISCSI_ATTRS=m
#
# SCSI low-level drivers
#
# CONFIG_ISCSI_TCP is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_ACARD is not set
......@@ -559,6 +604,7 @@ CONFIG_DM_MULTIPATH_EMC=m
#
# Macintosh device drivers
#
# CONFIG_WINDFARM is not set
#
# Network device support
......@@ -645,7 +691,6 @@ CONFIG_IXGB=m
# CONFIG_IXGB_NAPI is not set
CONFIG_S2IO=m
# CONFIG_S2IO_NAPI is not set
# CONFIG_2BUFF_MODE is not set
#
# Token Ring devices
......@@ -674,6 +719,7 @@ CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
# CONFIG_PPP_MPPE is not set
CONFIG_PPPOE=m
# CONFIG_SLIP is not set
# CONFIG_NET_FC is not set
......@@ -784,6 +830,8 @@ CONFIG_HVCS=m
#
# CONFIG_WATCHDOG is not set
# CONFIG_RTC is not set
CONFIG_GEN_RTC=y
# CONFIG_GEN_RTC_X is not set
# CONFIG_DTLK is not set
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
......@@ -801,6 +849,7 @@ CONFIG_MAX_RAW_DEVS=1024
# TPM devices
#
# CONFIG_TCG_TPM is not set
# CONFIG_TELCLOCK is not set
#
# I2C support
......@@ -852,6 +901,7 @@ CONFIG_I2C_ALGOBIT=y
# CONFIG_SENSORS_PCF8591 is not set
# CONFIG_SENSORS_RTC8564 is not set
# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_RTC_X1205_I2C is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
......@@ -893,7 +943,6 @@ CONFIG_FB=y
CONFIG_FB_CFB_FILLRECT=y
CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_IMAGEBLIT=y
CONFIG_FB_SOFT_CURSOR=y
CONFIG_FB_MACMODES=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
......@@ -905,6 +954,7 @@ CONFIG_FB_OF=y
# CONFIG_FB_ASILIANT is not set
# CONFIG_FB_IMSTT is not set
# CONFIG_FB_VGA16 is not set
# CONFIG_FB_S1D13XXX is not set
# CONFIG_FB_NVIDIA is not set
# CONFIG_FB_RIVA is not set
CONFIG_FB_MATROX=y
......@@ -927,7 +977,6 @@ CONFIG_FB_RADEON_I2C=y
# CONFIG_FB_VOODOO1 is not set
# CONFIG_FB_CYBLA is not set
# CONFIG_FB_TRIDENT is not set
# CONFIG_FB_S1D13XXX is not set
# CONFIG_FB_VIRTUAL is not set
#
......@@ -936,6 +985,7 @@ CONFIG_FB_RADEON_I2C=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE=y
# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
# CONFIG_FONTS is not set
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
......@@ -990,12 +1040,15 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
#
# USB Device Class drivers
#
# CONFIG_USB_BLUETOOTH_TTY is not set
# CONFIG_USB_ACM is not set
# CONFIG_USB_PRINTER is not set
#
# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
#
#
# may also be needed; see USB_STORAGE Help for more information
#
CONFIG_USB_STORAGE=y
# CONFIG_USB_STORAGE_DEBUG is not set
......@@ -1106,6 +1159,7 @@ CONFIG_INFINIBAND_MTHCA=m
# CONFIG_INFINIBAND_MTHCA_DEBUG is not set
CONFIG_INFINIBAND_IPOIB=m
# CONFIG_INFINIBAND_IPOIB_DEBUG is not set
# CONFIG_INFINIBAND_SRP is not set
#
# SN Devices
......@@ -1288,10 +1342,25 @@ CONFIG_NLS_ISO8859_1=y
# CONFIG_NLS_UTF8 is not set
#
# Profiling support
# Library routines
#
CONFIG_CRC_CCITT=m
# CONFIG_CRC16 is not set
CONFIG_CRC32=y
CONFIG_LIBCRC32C=m
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=m
CONFIG_TEXTSEARCH=y
CONFIG_TEXTSEARCH_KMP=m
CONFIG_TEXTSEARCH_BM=m
CONFIG_TEXTSEARCH_FSM=m
#
# Instrumentation Support
#
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
# CONFIG_KPROBES is not set
#
# Kernel hacking
......@@ -1308,14 +1377,15 @@ CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_DEBUG_KOBJECT is not set
# CONFIG_DEBUG_INFO is not set
CONFIG_DEBUG_FS=y
# CONFIG_DEBUG_VM is not set
# CONFIG_RCU_TORTURE_TEST is not set
CONFIG_DEBUG_STACKOVERFLOW=y
# CONFIG_KPROBES is not set
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUGGER=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
# CONFIG_PPCDBG is not set
CONFIG_IRQSTACKS=y
# CONFIG_BOOTX_TEXT is not set
#
# Security options
......@@ -1355,17 +1425,3 @@ CONFIG_CRYPTO_TEST=m
#
# Hardware crypto devices
#
#
# Library routines
#
CONFIG_CRC_CCITT=m
# CONFIG_CRC16 is not set
CONFIG_CRC32=y
CONFIG_LIBCRC32C=m
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=m
CONFIG_TEXTSEARCH=y
CONFIG_TEXTSEARCH_KMP=m
CONFIG_TEXTSEARCH_BM=m
CONFIG_TEXTSEARCH_FSM=m
......@@ -25,7 +25,7 @@ obj-$(CONFIG_PPC_OF) += of_device.o
procfs-$(CONFIG_PPC64) := proc_ppc64.o
obj-$(CONFIG_PROC_FS) += $(procfs-y)
rtaspci-$(CONFIG_PPC64) := rtas_pci.o
obj-$(CONFIG_PPC_RTAS) += rtas.o $(rtaspci-y)
obj-$(CONFIG_PPC_RTAS) += rtas.o rtas-rtc.o $(rtaspci-y)
obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
obj-$(CONFIG_LPARCFG) += lparcfg.o
......@@ -49,12 +49,23 @@ extra-y += vmlinux.lds
obj-y += process.o init_task.o time.o \
prom.o traps.o setup-common.o
obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o systbl.o
obj-$(CONFIG_PPC64) += misc_64.o
obj-$(CONFIG_PPC64) += misc_64.o dma_64.o iommu.o
obj-$(CONFIG_PPC_OF) += prom_init.o
obj-$(CONFIG_MODULES) += ppc_ksyms.o
obj-$(CONFIG_BOOTX_TEXT) += btext.o
obj-$(CONFIG_6xx) += idle_6xx.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_KPROBES) += kprobes.o
module-$(CONFIG_PPC64) += module_64.o
obj-$(CONFIG_MODULES) += $(module-y)
pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o pci_iommu.o \
pci_direct_iommu.o iomap.o
obj-$(CONFIG_PCI) += $(pci64-y)
kexec64-$(CONFIG_PPC64) += machine_kexec_64.o
obj-$(CONFIG_KEXEC) += $(kexec64-y)
ifeq ($(CONFIG_PPC_ISERIES),y)
$(obj)/head_64.o: $(obj)/lparmap.s
......@@ -62,11 +73,8 @@ AFLAGS_head_64.o += -I$(obj)
endif
else
# stuff used from here for ARCH=ppc or ARCH=ppc64
# stuff used from here for ARCH=ppc
smpobj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PPC64) += traps.o process.o init_task.o time.o \
setup-common.o $(smpobj-y)
endif
......
......@@ -270,13 +270,15 @@ int main(void)
DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec));
DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec));
DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec));
DEFINE(TSPC64_TV_SEC, offsetof(struct timespec, tv_sec));
DEFINE(TSPC64_TV_NSEC, offsetof(struct timespec, tv_nsec));
DEFINE(TSPC32_TV_SEC, offsetof(struct compat_timespec, tv_sec));
DEFINE(TSPC32_TV_NSEC, offsetof(struct compat_timespec, tv_nsec));
#else
DEFINE(TVAL32_TV_SEC, offsetof(struct timeval, tv_sec));
DEFINE(TVAL32_TV_USEC, offsetof(struct timeval, tv_usec));
DEFINE(TSPEC32_TV_SEC, offsetof(struct timespec, tv_sec));
DEFINE(TSPEC32_TV_NSEC, offsetof(struct timespec, tv_nsec));
DEFINE(TSPC32_TV_SEC, offsetof(struct timespec, tv_sec));
DEFINE(TSPC32_TV_NSEC, offsetof(struct timespec, tv_nsec));
#endif
/* timeval/timezone offsets for use by vdso */
DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
......
......@@ -71,6 +71,11 @@
#include <asm/paca.h>
#endif
int __irq_offset_value;
#ifdef CONFIG_PPC32
EXPORT_SYMBOL(__irq_offset_value);
#endif
static int ppc_spurious_interrupts;
#if defined(CONFIG_PPC_ISERIES) && defined(CONFIG_SMP)
......@@ -98,7 +103,6 @@ extern atomic_t ipi_sent;
EXPORT_SYMBOL(irq_desc);
int distribute_irqs = 1;
int __irq_offset_value;
u64 ppc64_interrupt_controller;
#endif /* CONFIG_PPC64 */
......@@ -311,7 +315,6 @@ void __init init_IRQ(void)
}
#ifdef CONFIG_PPC64
#ifndef CONFIG_PPC_ISERIES
/*
* Virtual IRQ mapping code, used on systems with XICS interrupt controllers.
*/
......@@ -420,8 +423,6 @@ unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
}
#endif /* CONFIG_PPC_ISERIES */
#ifdef CONFIG_IRQSTACKS
struct thread_info *softirq_ctx[NR_CPUS];
struct thread_info *hardirq_ctx[NR_CPUS];
......
......@@ -42,32 +42,6 @@
/* #define LPARCFG_DEBUG */
/* find a better place for this function... */
static void log_plpar_hcall_return(unsigned long rc, char *tag)
{
if (rc == 0) /* success, return */
return;
/* check for null tag ? */
if (rc == H_Hardware)
printk(KERN_INFO
"plpar-hcall (%s) failed with hardware fault\n", tag);
else if (rc == H_Function)
printk(KERN_INFO
"plpar-hcall (%s) failed; function not allowed\n", tag);
else if (rc == H_Authority)
printk(KERN_INFO
"plpar-hcall (%s) failed; not authorized to this function\n",
tag);
else if (rc == H_Parameter)
printk(KERN_INFO "plpar-hcall (%s) failed; Bad parameter(s)\n",
tag);
else
printk(KERN_INFO
"plpar-hcall (%s) failed with unexpected rc(0x%lx)\n",
tag, rc);
}
static struct proc_dir_entry *proc_ppc64_lparcfg;
#define LPARCFG_BUFF_SIZE 4096
......@@ -172,6 +146,31 @@ static int lparcfg_data(struct seq_file *m, void *v)
/*
* Methods used to fetch LPAR data when running on a pSeries platform.
*/
/* find a better place for this function... */
static void log_plpar_hcall_return(unsigned long rc, char *tag)
{
if (rc == 0) /* success, return */
return;
/* check for null tag ? */
if (rc == H_Hardware)
printk(KERN_INFO
"plpar-hcall (%s) failed with hardware fault\n", tag);
else if (rc == H_Function)
printk(KERN_INFO
"plpar-hcall (%s) failed; function not allowed\n", tag);
else if (rc == H_Authority)
printk(KERN_INFO
"plpar-hcall (%s) failed; not authorized to this function\n",
tag);
else if (rc == H_Parameter)
printk(KERN_INFO "plpar-hcall (%s) failed; Bad parameter(s)\n",
tag);
else
printk(KERN_INFO
"plpar-hcall (%s) failed with unexpected rc(0x%lx)\n",
tag, rc);
}
/*
* H_GET_PPP hcall returns info in 4 parms.
......
......@@ -185,8 +185,8 @@ void kexec_copy_flush(struct kimage *image)
*/
void kexec_smp_down(void *arg)
{
if (ppc_md.cpu_irq_down)
ppc_md.cpu_irq_down(1);
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0, 1);
local_irq_disable();
kexec_smp_wait();
......@@ -233,8 +233,8 @@ static void kexec_prepare_cpus(void)
}
/* after we tell the others to go down */
if (ppc_md.cpu_irq_down)
ppc_md.cpu_irq_down(0);
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0, 0);
put_cpu();
......@@ -255,8 +255,8 @@ static void kexec_prepare_cpus(void)
* UP to an SMP kernel.
*/
smp_release_cpus();
if (ppc_md.cpu_irq_down)
ppc_md.cpu_irq_down(0);
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0, 0);
local_irq_disable();
}
......@@ -305,3 +305,54 @@ void machine_kexec(struct kimage *image)
ppc_md.hpte_clear_all);
/* NOTREACHED */
}
/* Values we need to export to the second kernel via the device tree. */
static unsigned long htab_base, htab_size, kernel_end;
static struct property htab_base_prop = {
.name = "linux,htab-base",
.length = sizeof(unsigned long),
.value = (unsigned char *)&htab_base,
};
static struct property htab_size_prop = {
.name = "linux,htab-size",
.length = sizeof(unsigned long),
.value = (unsigned char *)&htab_size,
};
static struct property kernel_end_prop = {
.name = "linux,kernel-end",
.length = sizeof(unsigned long),
.value = (unsigned char *)&kernel_end,
};
static void __init export_htab_values(void)
{
struct device_node *node;
node = of_find_node_by_path("/chosen");
if (!node)
return;
kernel_end = __pa(_end);
prom_add_property(node, &kernel_end_prop);
/* On machines with no htab htab_address is NULL */
if (NULL == htab_address)
goto out;
htab_base = __pa(htab_address);
prom_add_property(node, &htab_base_prop);
htab_size = 1UL << ppc64_pft_size;
prom_add_property(node, &htab_size_prop);
out:
of_node_put(node);
}
void __init kexec_setup(void)
{
export_htab_values();
}
......@@ -105,6 +105,13 @@ EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__strnlen_user);
#ifndef __powerpc64__
EXPORT_SYMBOL(__ide_mm_insl);
EXPORT_SYMBOL(__ide_mm_outsw);
EXPORT_SYMBOL(__ide_mm_insw);
EXPORT_SYMBOL(__ide_mm_outsl);
#endif
EXPORT_SYMBOL(_insb);
EXPORT_SYMBOL(_outsb);
EXPORT_SYMBOL(_insw);
......
......@@ -1368,6 +1368,7 @@ prom_n_addr_cells(struct device_node* np)
/* No #address-cells property for the root node, default to 1 */
return 1;
}
EXPORT_SYMBOL(prom_n_addr_cells);
int
prom_n_size_cells(struct device_node* np)
......@@ -1383,6 +1384,7 @@ prom_n_size_cells(struct device_node* np)
/* No #size-cells property for the root node, default to 1 */
return 1;
}
EXPORT_SYMBOL(prom_n_size_cells);
/**
* Work out the sense (active-low level / active-high edge)
......
#include <linux/kernel.h>
#include <linux/time.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/rtc.h>
#include <linux/delay.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/time.h>
#define MAX_RTC_WAIT 5000 /* 5 sec */
#define RTAS_CLOCK_BUSY (-2)
unsigned long __init rtas_get_boot_time(void)
{
int ret[8];
int error, wait_time;
unsigned long max_wait_tb;
max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
wait_time = rtas_extended_busy_delay_time(error);
/* This is boot time so we spin. */
udelay(wait_time*1000);
error = RTAS_CLOCK_BUSY;
}
} while (error == RTAS_CLOCK_BUSY && (get_tb() < max_wait_tb));
if (error != 0 && printk_ratelimit()) {
printk(KERN_WARNING "error: reading the clock failed (%d)\n",
error);
return 0;
}
return mktime(ret[0], ret[1], ret[2], ret[3], ret[4], ret[5]);
}
/* NOTE: get_rtc_time will get an error if executed in interrupt context
* and if a delay is needed to read the clock. In this case we just
* silently return without updating rtc_tm.
*/
void rtas_get_rtc_time(struct rtc_time *rtc_tm)
{
int ret[8];
int error, wait_time;
unsigned long max_wait_tb;
max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
if (in_interrupt() && printk_ratelimit()) {
memset(&rtc_tm, 0, sizeof(struct rtc_time));
printk(KERN_WARNING "error: reading clock"
" would delay interrupt\n");
return; /* delay not allowed */
}
wait_time = rtas_extended_busy_delay_time(error);
msleep(wait_time);
error = RTAS_CLOCK_BUSY;
}
} while (error == RTAS_CLOCK_BUSY && (get_tb() < max_wait_tb));
if (error != 0 && printk_ratelimit()) {
printk(KERN_WARNING "error: reading the clock failed (%d)\n",
error);
return;
}
rtc_tm->tm_sec = ret[5];
rtc_tm->tm_min = ret[4];
rtc_tm->tm_hour = ret[3];
rtc_tm->tm_mday = ret[2];
rtc_tm->tm_mon = ret[1] - 1;
rtc_tm->tm_year = ret[0] - 1900;
}
int rtas_set_rtc_time(struct rtc_time *tm)
{
int error, wait_time;
unsigned long max_wait_tb;
max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_token("set-time-of-day"), 7, 1, NULL,
tm->tm_year + 1900, tm->tm_mon + 1,
tm->tm_mday, tm->tm_hour, tm->tm_min,
tm->tm_sec, 0);
if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
if (in_interrupt())
return 1; /* probably decrementer */
wait_time = rtas_extended_busy_delay_time(error);
msleep(wait_time);
error = RTAS_CLOCK_BUSY;
}
} while (error == RTAS_CLOCK_BUSY && (get_tb() < max_wait_tb));
if (error != 0 && printk_ratelimit())
printk(KERN_WARNING "error: setting the clock failed (%d)\n",
error);
return 0;
}
......@@ -57,10 +57,6 @@ extern void power4_idle(void);
boot_infos_t *boot_infos;
struct ide_machdep_calls ppc_ide_md;
/* XXX should go elsewhere */
int __irq_offset_value;
EXPORT_SYMBOL(__irq_offset_value);
int boot_cpuid;
EXPORT_SYMBOL_GPL(boot_cpuid);
int boot_cpuid_phys;
......
......@@ -59,6 +59,7 @@
#include <asm/firmware.h>
#include <asm/xmon.h>
#include <asm/udbg.h>
#include <asm/kexec.h>
#include "setup.h"
......@@ -415,6 +416,10 @@ void __init setup_system(void)
*/
unflatten_device_tree();
#ifdef CONFIG_KEXEC
kexec_setup(); /* requires unflattened device tree. */
#endif
/*
* Fill the ppc64_caches & systemcfg structures with informations
* retreived from the device-tree. Need to be called before
......
......@@ -403,8 +403,6 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
ELF_NFPREG * sizeof(double)))
return 1;
current->thread.fpscr.val = 0; /* turn off all fp exceptions */
#ifdef CONFIG_ALTIVEC
/* save altivec registers */
if (current->thread.used_vr) {
......@@ -818,6 +816,9 @@ static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
goto badframe;
regs->link = (unsigned long) frame->tramp;
}
current->thread.fpscr.val = 0; /* turn off all fp exceptions */
if (put_user(regs->gpr[1], (u32 __user *)newsp))
goto badframe;
regs->gpr[1] = newsp;
......@@ -1097,6 +1098,8 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
regs->link = (unsigned long) frame->mctx.tramp;
}
current->thread.fpscr.val = 0; /* turn off all fp exceptions */
if (put_user(regs->gpr[1], (u32 __user *)newsp))
goto badframe;
regs->gpr[1] = newsp;
......
......@@ -131,9 +131,6 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
flush_fp_to_thread(current);
/* Make sure signal doesn't get spurrious FP exceptions */
current->thread.fpscr.val = 0;
#ifdef CONFIG_ALTIVEC
err |= __put_user(v_regs, &sc->v_regs);
......@@ -423,6 +420,9 @@ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
if (err)
goto badframe;
/* Make sure signal handler doesn't get spurious FP exceptions */
current->thread.fpscr.val = 0;
/* Set up to return from userspace. */
if (vdso64_rt_sigtramp && current->thread.vdso_base) {
regs->link = current->thread.vdso_base + vdso64_rt_sigtramp;
......
......@@ -77,8 +77,9 @@ V_FUNCTION_BEGIN(__kernel_get_tbfreq)
mflr r12
.cfi_register lr,r12
bl __get_datapage@local
lwz r3,CFG_TB_TICKS_PER_SEC(r3)
lwz r4,(CFG_TB_TICKS_PER_SEC + 4)(r3)
lwz r3,CFG_TB_TICKS_PER_SEC(r3)
mtlr r12
blr
.cfi_endproc
V_FUNCTION_END(__kernel_get_tbfreq)
......@@ -83,7 +83,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
/* Check for supported clock IDs */
cmpli cr0,r3,CLOCK_REALTIME
cmpli cr1,r3,CLOCK_MONOTONIC
cror cr0,cr0,cr1
cror cr0*4+eq,cr0*4+eq,cr1*4+eq
bne cr0,99f
mflr r12 /* r12 saves lr */
......@@ -91,7 +91,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
mr r10,r3 /* r10 saves id */
mr r11,r4 /* r11 saves tp */
bl __get_datapage@local /* get data page */
mr r9, r3 /* datapage ptr in r9 */
mr r9,r3 /* datapage ptr in r9 */
beq cr1,50f /* if monotonic -> jump there */
/*
......@@ -173,10 +173,14 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
add r4,r4,r7
lis r5,NSEC_PER_SEC@h
ori r5,r5,NSEC_PER_SEC@l
cmpli cr0,r4,r5
cmpl cr0,r4,r5
cmpli cr1,r4,0
blt 1f
subf r4,r5,r4
addi r3,r3,1
1: bge cr1,1f
addi r3,r3,-1
add r4,r4,r5
1: stw r3,TSPC32_TV_SEC(r11)
stw r4,TSPC32_TV_NSEC(r11)
......@@ -210,7 +214,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
/* Check for supported clock IDs */
cmpwi cr0,r3,CLOCK_REALTIME
cmpwi cr1,r3,CLOCK_MONOTONIC
cror cr0,cr0,cr1
cror cr0*4+eq,cr0*4+eq,cr1*4+eq
bne cr0,99f
li r3,0
......
......@@ -80,5 +80,6 @@ V_FUNCTION_BEGIN(__kernel_get_tbfreq)
bl V_LOCAL_FUNC(__get_datapage)
ld r3,CFG_TB_TICKS_PER_SEC(r3)
mtlr r12
blr
.cfi_endproc
V_FUNCTION_END(__kernel_get_tbfreq)
/*
/*
* Userland implementation of gettimeofday() for 64 bits processes in a
* ppc64 kernel for use in the vDSO
*
......@@ -68,7 +69,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
/* Check for supported clock IDs */
cmpwi cr0,r3,CLOCK_REALTIME
cmpwi cr1,r3,CLOCK_MONOTONIC
cror cr0,cr0,cr1
cror cr0*4+eq,cr0*4+eq,cr1*4+eq
bne cr0,99f
mflr r12 /* r12 saves lr */
......@@ -84,16 +85,17 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */
lis r7,0x3b9a /* r7 = 1000000000 = NSEC_PER_SEC */
ori r7,r7,0xca00
lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */
ori r7,r7,16960
rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */
rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */
std r5,TSPC64_TV_SEC(r11) /* store sec in tv */
subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */
mulld r0,r0,r7 /* nsec = (xsec * NSEC_PER_SEC) /
mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) /
* XSEC_PER_SEC
*/
rldicl r0,r0,44,20
mulli r0,r0,1000 /* nsec = usec * 1000 */
std r0,TSPC64_TV_NSEC(r11) /* store nsec in tp */
mtlr r12
......@@ -106,15 +108,16 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
50: bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */
lis r7,0x3b9a /* r7 = 1000000000 = NSEC_PER_SEC */
ori r7,r7,0xca00
lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */
ori r7,r7,16960
rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */
rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */
subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */
mulld r0,r0,r7 /* nsec = (xsec * NSEC_PER_SEC) /
mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) /
* XSEC_PER_SEC
*/
rldicl r6,r0,44,20
mulli r6,r6,1000 /* nsec = usec * 1000 */
/* now we must fixup using wall to monotonic. We need to snapshot
* that value and do the counter trick again. Fortunately, we still
......@@ -123,8 +126,8 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
* can be used
*/
lwz r4,WTOM_CLOCK_SEC(r9)
lwz r7,WTOM_CLOCK_NSEC(r9)
lwa r4,WTOM_CLOCK_SEC(r3)
lwa r7,WTOM_CLOCK_NSEC(r3)
/* We now have our result in r4,r7. We create a fake dependency
* on that result and re-check the counter
......@@ -144,10 +147,14 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
add r7,r7,r6
lis r9,NSEC_PER_SEC@h
ori r9,r9,NSEC_PER_SEC@l
cmpli cr0,r7,r9
cmpl cr0,r7,r9
cmpli cr1,r7,0
blt 1f
subf r7,r9,r7
addi r4,r4,1
1: bge cr1,1f
addi r4,r4,-1
add r7,r7,r9
1: std r4,TSPC64_TV_SEC(r11)
std r7,TSPC64_TV_NSEC(r11)
......@@ -181,7 +188,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
/* Check for supported clock IDs */
cmpwi cr0,r3,CLOCK_REALTIME
cmpwi cr1,r3,CLOCK_MONOTONIC
cror cr0,cr0,cr1
cror cr0*4+eq,cr0*4+eq,cr1*4+eq
bne cr0,99f
li r3,0
......
......@@ -42,13 +42,6 @@
#include "irq.h"
#include "call_pci.h"
/* This maps virtual irq numbers to real irqs */
unsigned int virt_irq_to_real_map[NR_IRQS];
/* The next available virtual irq number */
/* Note: the pcnet32 driver assumes irq numbers < 2 aren't valid. :( */
static int next_virtual_irq = 2;
static long Pci_Interrupt_Count;
static long Pci_Event_Count;
......@@ -350,26 +343,14 @@ static hw_irq_controller iSeries_IRQ_handler = {
int __init iSeries_allocate_IRQ(HvBusNumber busNumber,
HvSubBusNumber subBusNumber, HvAgentId deviceId)
{
unsigned int realirq, virtirq;
int virtirq;
unsigned int realirq;
u8 idsel = (deviceId >> 4);
u8 function = deviceId & 7;
virtirq = next_virtual_irq++;
realirq = ((busNumber - 1) << 6) + ((idsel - 1) << 3) + function;
virt_irq_to_real_map[virtirq] = realirq;
virtirq = virt_irq_create_mapping(realirq);
irq_desc[virtirq].handler = &iSeries_IRQ_handler;
return virtirq;
}
int virt_irq_create_mapping(unsigned int real_irq)
{
BUG(); /* Don't call this on iSeries, yet */
return 0;
}
void virt_irq_init(void)
{
return;
}
......@@ -39,7 +39,6 @@
#include <asm/sections.h>
#include <asm/iommu.h>
#include <asm/firmware.h>
#include <asm/systemcfg.h>
#include <asm/system.h>
#include <asm/time.h>
#include <asm/paca.h>
......@@ -548,8 +547,6 @@ static unsigned long __init build_iSeries_Memory_Map(void)
*/
static void __init iSeries_setup_arch(void)
{
unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
if (get_paca()->lppaca.shared_proc) {
ppc_md.idle_loop = iseries_shared_idle;
printk(KERN_INFO "Using shared processor idle loop\n");
......@@ -565,9 +562,6 @@ static void __init iSeries_setup_arch(void)
itVpdAreas.xSlicMaxLogicalProcs);
printk("Max physical processors = %d\n",
itVpdAreas.xSlicMaxPhysicalProcs);
_systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR;
printk("Processor version = %x\n", _systemcfg->processor);
}
static void iSeries_show_cpuinfo(struct seq_file *m)
......
......@@ -102,7 +102,7 @@ static unsigned long from_rtc_time(struct rtc_time *tm)
static unsigned long cuda_get_time(void)
{
struct adb_request req;
unsigned long now;
unsigned int now;
if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
return 0;
......@@ -113,7 +113,7 @@ static unsigned long cuda_get_time(void)
req.reply_len);
now = (req.reply[3] << 24) + (req.reply[4] << 16)
+ (req.reply[5] << 8) + req.reply[6];
return now - RTC_OFFSET;
return ((unsigned long)now) - RTC_OFFSET;
}
#define cuda_get_rtc_time(tm) to_rtc_time(cuda_get_time(), (tm))
......@@ -146,7 +146,7 @@ static int cuda_set_rtc_time(struct rtc_time *tm)
static unsigned long pmu_get_time(void)
{
struct adb_request req;
unsigned long now;
unsigned int now;
if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
return 0;
......@@ -156,7 +156,7 @@ static unsigned long pmu_get_time(void)
req.reply_len);
now = (req.reply[0] << 24) + (req.reply[1] << 16)
+ (req.reply[2] << 8) + req.reply[3];
return now - RTC_OFFSET;
return ((unsigned long)now) - RTC_OFFSET;
}
#define pmu_get_rtc_time(tm) to_rtc_time(pmu_get_time(), (tm))
......@@ -199,6 +199,7 @@ static unsigned long smu_get_time(void)
#define smu_set_rtc_time(tm, spin) 0
#endif
/* Can't be __init, it's called when suspending and resuming */
unsigned long pmac_get_boot_time(void)
{
/* Get the time from the RTC, used only at boot time */
......
......@@ -5,3 +5,6 @@ obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_XICS) += xics.o
obj-$(CONFIG_SCANLOG) += scanlog.o
obj-$(CONFIG_EEH) += eeh.o eeh_event.o
obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
obj-$(CONFIG_HVCS) += hvcserver.o
......@@ -200,14 +200,12 @@ static void __init pSeries_setup_arch(void)
if (ppc64_interrupt_controller == IC_OPEN_PIC) {
ppc_md.init_IRQ = pSeries_init_mpic;
ppc_md.get_irq = mpic_get_irq;
ppc_md.cpu_irq_down = mpic_teardown_this_cpu;
/* Allocate the mpic now, so that find_and_init_phbs() can
* fill the ISUs */
pSeries_setup_mpic();
} else {
ppc_md.init_IRQ = xics_init_IRQ;
ppc_md.get_irq = xics_get_irq;
ppc_md.cpu_irq_down = xics_teardown_cpu;
}
#ifdef CONFIG_SMP
......@@ -595,6 +593,27 @@ static int pSeries_pci_probe_mode(struct pci_bus *bus)
return PCI_PROBE_NORMAL;
}
#ifdef CONFIG_KEXEC
static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
{
/* Don't risk a hypervisor call if we're crashing */
if (!crash_shutdown) {
unsigned long vpa = __pa(&get_paca()->lppaca);
if (unregister_vpa(hard_smp_processor_id(), vpa)) {
printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
"failed\n", smp_processor_id(),
hard_smp_processor_id());
}
}
if (ppc64_interrupt_controller == IC_OPEN_PIC)
mpic_teardown_this_cpu(secondary);
else
xics_teardown_cpu(secondary);
}
#endif
struct machdep_calls __initdata pSeries_md = {
.probe = pSeries_probe,
.setup_arch = pSeries_setup_arch,
......@@ -617,4 +636,7 @@ struct machdep_calls __initdata pSeries_md = {
.check_legacy_ioport = pSeries_check_legacy_ioport,
.system_reset_exception = pSeries_system_reset_exception,
.machine_check_exception = pSeries_machine_check_exception,
#ifdef CONFIG_KEXEC
.kexec_cpu_down = pseries_kexec_cpu_down,
#endif
};
This diff is collapsed.
......@@ -2,45 +2,6 @@
# Makefile for the linux ppc64 kernel.
#
ifneq ($(CONFIG_PPC_MERGE),y)
EXTRA_CFLAGS += -mno-minimal-toc
extra-y := head.o vmlinux.lds
obj-y := misc.o prom.o
endif
obj-y += idle.o dma.o \
align.o \
rtc.o \
iommu.o
pci-obj-$(CONFIG_PPC_MULTIPLATFORM) += pci_dn.o pci_direct_iommu.o
obj-$(CONFIG_PCI) += pci.o pci_iommu.o iomap.o $(pci-obj-y)
obj-y += idle.o align.o
obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o
ifneq ($(CONFIG_PPC_MERGE),y)
obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
endif
obj-$(CONFIG_KEXEC) += machine_kexec.o
obj-$(CONFIG_MODULES) += module.o
ifneq ($(CONFIG_PPC_MERGE),y)
obj-$(CONFIG_MODULES) += ppc_ksyms.o
endif
obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
ifneq ($(CONFIG_PPC_MERGE),y)
obj-$(CONFIG_BOOTX_TEXT) += btext.o
endif
obj-$(CONFIG_HVCS) += hvcserver.o
obj-$(CONFIG_KPROBES) += kprobes.o
ifneq ($(CONFIG_PPC_MERGE),y)
ifeq ($(CONFIG_PPC_ISERIES),y)
arch/ppc64/kernel/head.o: arch/powerpc/kernel/lparmap.s
AFLAGS_head.o += -Iarch/powerpc/kernel
endif
endif
/*
* This program is used to generate definitions needed by
* assembly language modules.
*
* We use the technique used in the OSF Mach kernel code:
* generate asm statements containing #defines,
* compile this file to assembler, and then extract the
* #defines from the assembly-language output.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/time.h>
#include <linux/hardirq.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/paca.h>
#include <asm/lppaca.h>
#include <asm/iseries/hv_lp_event.h>
#include <asm/rtas.h>
#include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/systemcfg.h>
#include <asm/compat.h>
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define BLANK() asm volatile("\n->" : : )
int main(void)
{
/* thread struct on stack */
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
/* task_struct->thread */
DEFINE(THREAD, offsetof(struct task_struct, thread));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
DEFINE(KSP, offsetof(struct thread_struct, ksp));
DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid));
#ifdef CONFIG_ALTIVEC
DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
#endif /* CONFIG_ALTIVEC */
DEFINE(MM, offsetof(struct task_struct, mm));
DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size));
DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size));
DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page));
DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
/* paca */
DEFINE(PACA_SIZE, sizeof(struct paca_struct));
DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
DEFINE(PACACURRENT, offsetof(struct paca_struct, __current));
DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled));
DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
#ifdef CONFIG_PPC_64K_PAGES
DEFINE(PACAPGDIR, offsetof(struct paca_struct, pgdir));
#endif
#ifdef CONFIG_HUGETLB_PAGE
DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
#endif /* CONFIG_HUGETLB_PAGE */
DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr));
DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
DEFINE(PACA_EXDSI, offsetof(struct paca_struct, exdsi));
DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca));
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
/* RTAS */
DEFINE(RTASBASE, offsetof(struct rtas_t, base));
DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
/* Interrupt register frame */
DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
/* 288 = # of volatile regs, int & fp, for leaf routines */
/* which do not stack a frame. See the PPC64 ABI. */
DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 288);
/* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0]));
DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1]));
DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2]));
DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3]));
DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4]));
DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5]));
DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6]));
DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7]));
DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8]));
DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9]));
DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10]));
DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11]));
DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12]));
DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13]));
/*
* Note: these symbols include _ because they overlap with special
* register names
*/
DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip));
DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr));
DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr));
DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link));
DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr));
DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer));
DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe));
/* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs));
DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
DEFINE(CLONE_VM, CLONE_VM);
DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
/* About the CPU features table */
DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec));
DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask));
DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value));
DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
/* systemcfg offsets for use by vdso */
DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct systemcfg, tb_orig_stamp));
DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct systemcfg, tb_ticks_per_sec));
DEFINE(CFG_TB_TO_XS, offsetof(struct systemcfg, tb_to_xs));
DEFINE(CFG_STAMP_XSEC, offsetof(struct systemcfg, stamp_xsec));
DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct systemcfg, tb_update_count));
DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct systemcfg, tz_minuteswest));
DEFINE(CFG_TZ_DSTTIME, offsetof(struct systemcfg, tz_dsttime));
DEFINE(CFG_SYSCALL_MAP32, offsetof(struct systemcfg, syscall_map_32));
DEFINE(CFG_SYSCALL_MAP64, offsetof(struct systemcfg, syscall_map_64));
/* timeval/timezone offsets for use by vdso */
DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec));
DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec));
DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec));
DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec));
DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
return 0;
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* c 2001 PPC 64 Team, IBM Corp
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/console.h>
#include <net/checksum.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/hw_irq.h>
#include <asm/abs_addr.h>
#include <asm/cacheflush.h>
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strncpy);
EXPORT_SYMBOL(strcat);
EXPORT_SYMBOL(strncat);
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(strpbrk);
EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strnlen);
EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_generic);
EXPORT_SYMBOL(ip_fast_csum);
EXPORT_SYMBOL(csum_tcpudp_magic);
EXPORT_SYMBOL(__copy_tofrom_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(reloc_offset);
EXPORT_SYMBOL(_insb);
EXPORT_SYMBOL(_outsb);
EXPORT_SYMBOL(_insw);
EXPORT_SYMBOL(_outsw);
EXPORT_SYMBOL(_insl);
EXPORT_SYMBOL(_outsl);
EXPORT_SYMBOL(_insw_ns);
EXPORT_SYMBOL(_outsw_ns);
EXPORT_SYMBOL(_insl_ns);
EXPORT_SYMBOL(_outsl_ns);
EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(giveup_fpu);
#ifdef CONFIG_ALTIVEC
EXPORT_SYMBOL(giveup_altivec);
#endif
EXPORT_SYMBOL(__flush_icache_range);
EXPORT_SYMBOL(flush_dcache_range);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(timer_interrupt);
EXPORT_SYMBOL(console_drivers);
This diff is collapsed.
This diff is collapsed.
/*
* Real Time Clock interface for PPC64.
*
* Based on rtc.c by Paul Gortmaker
*
* This driver allows use of the real time clock
* from user space. It exports the /dev/rtc
* interface supporting various ioctl() and also the
* /proc/driver/rtc pseudo-file for status information.
*
* Interface does not support RTC interrupts nor an alarm.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* 1.0 Mike Corrigan: IBM iSeries rtc support
* 1.1 Dave Engebretsen: IBM pSeries rtc support
*/
#define RTC_VERSION "1.1"
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/ioport.h>
#include <linux/fcntl.h>
#include <linux/mc146818rtc.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <linux/bcd.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/time.h>
#include <asm/rtas.h>
#include <asm/machdep.h>
/*
* We sponge a minor off of the misc major. No need slurping
* up another valuable major dev number for this. If you add
* an ioctl, make sure you don't conflict with SPARC's RTC
* ioctls.
*/
static ssize_t rtc_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos);
static int rtc_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg);
static int rtc_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data);
/*
* If this driver ever becomes modularised, it will be really nice
* to make the epoch retain its value across module reload...
*/
static unsigned long epoch = 1900; /* year corresponding to 0x00 */
static const unsigned char days_in_mo[] =
{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
/*
* Now all the various file operations that we export.
*/
static ssize_t rtc_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
return -EIO;
}
static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
unsigned long arg)
{
struct rtc_time wtime;
switch (cmd) {
case RTC_RD_TIME: /* Read the time/date from RTC */
{
memset(&wtime, 0, sizeof(struct rtc_time));
ppc_md.get_rtc_time(&wtime);
break;
}
case RTC_SET_TIME: /* Set the RTC */
{
struct rtc_time rtc_tm;
unsigned char mon, day, hrs, min, sec, leap_yr;
unsigned int yrs;
if (!capable(CAP_SYS_TIME))
return -EACCES;
if (copy_from_user(&rtc_tm, (struct rtc_time __user *)arg,
sizeof(struct rtc_time)))
return -EFAULT;
yrs = rtc_tm.tm_year;
mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */
day = rtc_tm.tm_mday;
hrs = rtc_tm.tm_hour;
min = rtc_tm.tm_min;
sec = rtc_tm.tm_sec;
if (yrs < 70)
return -EINVAL;
leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));
if ((mon > 12) || (day == 0))
return -EINVAL;
if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
return -EINVAL;
if ((hrs >= 24) || (min >= 60) || (sec >= 60))
return -EINVAL;
if ( yrs > 169 )
return -EINVAL;
ppc_md.set_rtc_time(&rtc_tm);
return 0;
}
case RTC_EPOCH_READ: /* Read the epoch. */
{
return put_user (epoch, (unsigned long __user *)arg);
}
case RTC_EPOCH_SET: /* Set the epoch. */
{
/*
* There were no RTC clocks before 1900.
*/
if (arg < 1900)
return -EINVAL;
if (!capable(CAP_SYS_TIME))
return -EACCES;
epoch = arg;
return 0;
}
default:
return -EINVAL;
}
return copy_to_user((void __user *)arg, &wtime, sizeof wtime) ? -EFAULT : 0;
}
static int rtc_open(struct inode *inode, struct file *file)
{
nonseekable_open(inode, file);
return 0;
}
static int rtc_release(struct inode *inode, struct file *file)
{
return 0;
}
/*
* The various file operations we support.
*/
static struct file_operations rtc_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = rtc_read,
.ioctl = rtc_ioctl,
.open = rtc_open,
.release = rtc_release,
};
static struct miscdevice rtc_dev = {
.minor = RTC_MINOR,
.name = "rtc",
.fops = &rtc_fops
};
static int __init rtc_init(void)
{
int retval;
retval = misc_register(&rtc_dev);
if(retval < 0)
return retval;
#ifdef CONFIG_PROC_FS
if (create_proc_read_entry("driver/rtc", 0, NULL, rtc_read_proc, NULL)
== NULL) {
misc_deregister(&rtc_dev);
return -ENOMEM;
}
#endif
printk(KERN_INFO "i/pSeries Real Time Clock Driver v" RTC_VERSION "\n");
return 0;
}
static void __exit rtc_exit (void)
{
remove_proc_entry ("driver/rtc", NULL);
misc_deregister(&rtc_dev);
}
module_init(rtc_init);
module_exit(rtc_exit);
/*
* Info exported via "/proc/driver/rtc".
*/
static int rtc_proc_output (char *buf)
{
char *p;
struct rtc_time tm;
p = buf;
ppc_md.get_rtc_time(&tm);
/*
* There is no way to tell if the luser has the RTC set for local
* time or for Universal Standard Time (GMT). Probably local though.
*/
p += sprintf(p,
"rtc_time\t: %02d:%02d:%02d\n"
"rtc_date\t: %04d-%02d-%02d\n"
"rtc_epoch\t: %04lu\n",
tm.tm_hour, tm.tm_min, tm.tm_sec,
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, epoch);
p += sprintf(p,
"DST_enable\t: no\n"
"BCD\t\t: yes\n"
"24hr\t\t: yes\n" );
return p - buf;
}
static int rtc_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
int len = rtc_proc_output (page);
if (len <= off+count) *eof = 1;
*start = page + off;
len -= off;
if (len>count) len = count;
if (len<0) len = 0;
return len;
}
#ifdef CONFIG_PPC_RTAS
#define MAX_RTC_WAIT 5000 /* 5 sec */
#define RTAS_CLOCK_BUSY (-2)
unsigned long rtas_get_boot_time(void)
{
int ret[8];
int error, wait_time;
unsigned long max_wait_tb;
max_wait_tb = __get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
wait_time = rtas_extended_busy_delay_time(error);
/* This is boot time so we spin. */
udelay(wait_time*1000);
error = RTAS_CLOCK_BUSY;
}
} while (error == RTAS_CLOCK_BUSY && (__get_tb() < max_wait_tb));
if (error != 0 && printk_ratelimit()) {
printk(KERN_WARNING "error: reading the clock failed (%d)\n",
error);
return 0;
}
return mktime(ret[0], ret[1], ret[2], ret[3], ret[4], ret[5]);
}
/* NOTE: get_rtc_time will get an error if executed in interrupt context
* and if a delay is needed to read the clock. In this case we just
* silently return without updating rtc_tm.
*/
void rtas_get_rtc_time(struct rtc_time *rtc_tm)
{
int ret[8];
int error, wait_time;
unsigned long max_wait_tb;
max_wait_tb = __get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
if (in_interrupt() && printk_ratelimit()) {
printk(KERN_WARNING "error: reading clock would delay interrupt\n");
return; /* delay not allowed */
}
wait_time = rtas_extended_busy_delay_time(error);
msleep_interruptible(wait_time);
error = RTAS_CLOCK_BUSY;
}
} while (error == RTAS_CLOCK_BUSY && (__get_tb() < max_wait_tb));
if (error != 0 && printk_ratelimit()) {
printk(KERN_WARNING "error: reading the clock failed (%d)\n",
error);
return;
}
rtc_tm->tm_sec = ret[5];
rtc_tm->tm_min = ret[4];
rtc_tm->tm_hour = ret[3];
rtc_tm->tm_mday = ret[2];
rtc_tm->tm_mon = ret[1] - 1;
rtc_tm->tm_year = ret[0] - 1900;
}
int rtas_set_rtc_time(struct rtc_time *tm)
{
int error, wait_time;
unsigned long max_wait_tb;
max_wait_tb = __get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_token("set-time-of-day"), 7, 1, NULL,
tm->tm_year + 1900, tm->tm_mon + 1,
tm->tm_mday, tm->tm_hour, tm->tm_min,
tm->tm_sec, 0);
if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
if (in_interrupt())
return 1; /* probably decrementer */
wait_time = rtas_extended_busy_delay_time(error);
msleep_interruptible(wait_time);
error = RTAS_CLOCK_BUSY;
}
} while (error == RTAS_CLOCK_BUSY && (__get_tb() < max_wait_tb));
if (error != 0 && printk_ratelimit())
printk(KERN_WARNING "error: setting the clock failed (%d)\n",
error);
return 0;
}
#endif
/*
*
*
* PowerPC-specific semaphore code.
*
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
* to eliminate the SMP races in the old version between the updates
* of `count' and `waking'. Now we use negative `count' values to
* indicate that some process(es) are waiting for the semaphore.
*/
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/module.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>
#include <asm/errno.h>
/*
* Atomically update sem->count.
* This does the equivalent of the following:
*
* old_count = sem->count;
* tmp = MAX(old_count, 0) + incr;
* sem->count = tmp;
* return old_count;
*/
static inline int __sem_update_count(struct semaphore *sem, int incr)
{
int old_count, tmp;
__asm__ __volatile__("\n"
"1: lwarx %0,0,%3\n"
" srawi %1,%0,31\n"
" andc %1,%0,%1\n"
" add %1,%1,%4\n"
" stwcx. %1,0,%3\n"
" bne 1b"
: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
: "r" (&sem->count), "r" (incr), "m" (sem->count)
: "cc");
return old_count;
}
void __up(struct semaphore *sem)
{
/*
* Note that we incremented count in up() before we came here,
* but that was ineffective since the result was <= 0, and
* any negative value of count is equivalent to 0.
* This ends up setting count to 1, unless count is now > 0
* (i.e. because some other cpu has called up() in the meantime),
* in which case we just increment count.
*/
__sem_update_count(sem, 1);
wake_up(&sem->wait);
}
EXPORT_SYMBOL(__up);
/*
* Note that when we come in to __down or __down_interruptible,
* we have already decremented count, but that decrement was
* ineffective since the result was < 0, and any negative value
* of count is equivalent to 0.
* Thus it is only when we decrement count from some value > 0
* that we have actually got the semaphore.
*/
void __sched __down(struct semaphore *sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait);
/*
* Try to get the semaphore. If the count is > 0, then we've
* got the semaphore; we decrement count and exit the loop.
* If the count is 0 or negative, we set it to -1, indicating
* that we are asleep, and then sleep.
*/
while (__sem_update_count(sem, -1) <= 0) {
schedule();
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}
remove_wait_queue(&sem->wait, &wait);
__set_task_state(tsk, TASK_RUNNING);
/*
* If there are any more sleepers, wake one of them up so
* that it can either get the semaphore, or set count to -1
* indicating that there are still processes sleeping.
*/
wake_up(&sem->wait);
}
EXPORT_SYMBOL(__down);
int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
__set_task_state(tsk, TASK_INTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait);
while (__sem_update_count(sem, -1) <= 0) {
if (signal_pending(current)) {
/*
* A signal is pending - give up trying.
* Set sem->count to 0 if it is negative,
* since we are no longer sleeping.
*/
__sem_update_count(sem, 0);
retval = -EINTR;
break;
}
schedule();
set_task_state(tsk, TASK_INTERRUPTIBLE);
}
remove_wait_queue(&sem->wait, &wait);
__set_task_state(tsk, TASK_RUNNING);
wake_up(&sem->wait);
return retval;
}
EXPORT_SYMBOL(__down_interruptible);
This diff is collapsed.
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
OUTPUT_ARCH(powerpc:common64)
jiffies = jiffies_64;
SECTIONS
{
/* Sections to be discarded. */
/DISCARD/ : {
*(.exitcall.exit)
}
/* Read-only sections, merged into text segment: */
.text : {
*(.text .text.*)
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.fixup)
. = ALIGN(PAGE_SIZE);
_etext = .;
}
__ex_table : {
__start___ex_table = .;
*(__ex_table)
__stop___ex_table = .;
}
__bug_table : {
__start___bug_table = .;
*(__bug_table)
__stop___bug_table = .;
}
__ftr_fixup : {
__start___ftr_fixup = .;
*(__ftr_fixup)
__stop___ftr_fixup = .;
}
RODATA
/* will be freed after init */
. = ALIGN(PAGE_SIZE);
__init_begin = .;
.init.text : {
_sinittext = .;
*(.init.text)
_einittext = .;
}
.init.data : {
*(.init.data)
}
. = ALIGN(16);
.init.setup : {
__setup_start = .;
*(.init.setup)
__setup_end = .;
}
.initcall.init : {
__initcall_start = .;
*(.initcall1.init)
*(.initcall2.init)
*(.initcall3.init)
*(.initcall4.init)
*(.initcall5.init)
*(.initcall6.init)
*(.initcall7.init)
__initcall_end = .;
}
.con_initcall.init : {
__con_initcall_start = .;
*(.con_initcall.init)
__con_initcall_end = .;
}
SECURITY_INIT
. = ALIGN(PAGE_SIZE);
.init.ramfs : {
__initramfs_start = .;
*(.init.ramfs)
__initramfs_end = .;
}
.data.percpu : {
__per_cpu_start = .;
*(.data.percpu)
__per_cpu_end = .;
}
. = ALIGN(PAGE_SIZE);
. = ALIGN(16384);
__init_end = .;
/* freed after init ends here */
/* Read/write sections */
. = ALIGN(PAGE_SIZE);
. = ALIGN(16384);
_sdata = .;
/* The initial task and kernel stack */
.data.init_task : {
*(.data.init_task)
}
. = ALIGN(PAGE_SIZE);
.data.page_aligned : {
*(.data.page_aligned)
}
.data.cacheline_aligned : {
*(.data.cacheline_aligned)
}
.data : {
*(.data .data.rel* .toc1)
*(.branch_lt)
}
.opd : {
*(.opd)
}
.got : {
__toc_start = .;
*(.got)
*(.toc)
. = ALIGN(PAGE_SIZE);
_edata = .;
}
. = ALIGN(PAGE_SIZE);
.bss : {
__bss_start = .;
*(.bss)
__bss_stop = .;
}
. = ALIGN(PAGE_SIZE);
_end = . ;
}
This diff is collapsed.
......@@ -23,6 +23,14 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
config GENERIC_HARDIRQS
bool
default y
config GENERIC_IRQ_PROBE
bool
default y
# Turn off some random 386 crap that can affect device config
config ISA
bool
......
This diff is collapsed.
This diff is collapsed.
......@@ -2,15 +2,6 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
# !SMP for now because the context switch early causes GPF in segment reloading
# and the GS base checking does the wrong thing then, causing a hang.
config CHECKING
bool "Additional run-time checks"
depends on DEBUG_KERNEL && !SMP
help
Enables some internal consistency checks for kernel debugging.
You should normally say N.
config INIT_DEBUG
bool "Debug __init statements"
depends on DEBUG_KERNEL
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -11,6 +11,7 @@ obj-y := process.o signal.o entry.o traps.o irq.o \
obj-$(CONFIG_X86_MCE) += mce.o
obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o
obj-$(CONFIG_X86_MCE_AMD) += mce_amd.o
obj-$(CONFIG_MTRR) += ../../i386/kernel/cpu/mtrr/
obj-$(CONFIG_ACPI) += acpi/
obj-$(CONFIG_X86_MSR) += msr.o
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment