Commit cf77e988 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] Drop duplicated "config IOMMU_HELPER"
  [IA64] invoke oom-killer from page fault
  [IA64] use __ratelimit
  [IA64] Use set_cpus_allowed_ptr
  [IA64] Use set_cpus_allowed_ptr
  [IA64] arch/ia64/hp/common/sba_iommu.c: Rename dev_info to adi
  [IA64] removing redundant ifdef
parents 1d3c6ff4 2a2ae242
...@@ -59,9 +59,6 @@ config NEED_DMA_MAP_STATE ...@@ -59,9 +59,6 @@ config NEED_DMA_MAP_STATE
config SWIOTLB config SWIOTLB
bool bool
config IOMMU_HELPER
bool
config GENERIC_LOCKBREAK config GENERIC_LOCKBREAK
def_bool n def_bool n
......
...@@ -2046,13 +2046,13 @@ acpi_sba_ioc_add(struct acpi_device *device) ...@@ -2046,13 +2046,13 @@ acpi_sba_ioc_add(struct acpi_device *device)
struct ioc *ioc; struct ioc *ioc;
acpi_status status; acpi_status status;
u64 hpa, length; u64 hpa, length;
struct acpi_device_info *dev_info; struct acpi_device_info *adi;
status = hp_acpi_csr_space(device->handle, &hpa, &length); status = hp_acpi_csr_space(device->handle, &hpa, &length);
if (ACPI_FAILURE(status)) if (ACPI_FAILURE(status))
return 1; return 1;
status = acpi_get_object_info(device->handle, &dev_info); status = acpi_get_object_info(device->handle, &adi);
if (ACPI_FAILURE(status)) if (ACPI_FAILURE(status))
return 1; return 1;
...@@ -2060,13 +2060,13 @@ acpi_sba_ioc_add(struct acpi_device *device) ...@@ -2060,13 +2060,13 @@ acpi_sba_ioc_add(struct acpi_device *device)
* For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
* root bridges, and its CSR space includes the IOC function. * root bridges, and its CSR space includes the IOC function.
*/ */
if (strncmp("HWP0001", dev_info->hardware_id.string, 7) == 0) { if (strncmp("HWP0001", adi->hardware_id.string, 7) == 0) {
hpa += ZX1_IOC_OFFSET; hpa += ZX1_IOC_OFFSET;
/* zx1 based systems default to kernel page size iommu pages */ /* zx1 based systems default to kernel page size iommu pages */
if (!iovp_shift) if (!iovp_shift)
iovp_shift = min(PAGE_SHIFT, 16); iovp_shift = min(PAGE_SHIFT, 16);
} }
kfree(dev_info); kfree(adi);
/* /*
* default anything not caught above or specified on cmdline to 4k * default anything not caught above or specified on cmdline to 4k
......
...@@ -19,16 +19,12 @@ ...@@ -19,16 +19,12 @@
static inline int pfn_to_nid(unsigned long pfn) static inline int pfn_to_nid(unsigned long pfn)
{ {
#ifdef CONFIG_NUMA
extern int paddr_to_nid(unsigned long); extern int paddr_to_nid(unsigned long);
int nid = paddr_to_nid(pfn << PAGE_SHIFT); int nid = paddr_to_nid(pfn << PAGE_SHIFT);
if (nid < 0) if (nid < 0)
return 0; return 0;
else else
return nid; return nid;
#else
return 0;
#endif
} }
#ifdef CONFIG_IA64_DIG /* DIG systems are small */ #ifdef CONFIG_IA64_DIG /* DIG systems are small */
......
...@@ -113,7 +113,7 @@ processor_get_freq ( ...@@ -113,7 +113,7 @@ processor_get_freq (
dprintk("processor_get_freq\n"); dprintk("processor_get_freq\n");
saved_mask = current->cpus_allowed; saved_mask = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(cpu)); set_cpus_allowed_ptr(current, cpumask_of(cpu));
if (smp_processor_id() != cpu) if (smp_processor_id() != cpu)
goto migrate_end; goto migrate_end;
...@@ -121,7 +121,7 @@ processor_get_freq ( ...@@ -121,7 +121,7 @@ processor_get_freq (
ret = processor_get_pstate(&value); ret = processor_get_pstate(&value);
if (ret) { if (ret) {
set_cpus_allowed(current, saved_mask); set_cpus_allowed_ptr(current, &saved_mask);
printk(KERN_WARNING "get performance failed with error %d\n", printk(KERN_WARNING "get performance failed with error %d\n",
ret); ret);
ret = 0; ret = 0;
...@@ -131,7 +131,7 @@ processor_get_freq ( ...@@ -131,7 +131,7 @@ processor_get_freq (
ret = (clock_freq*1000); ret = (clock_freq*1000);
migrate_end: migrate_end:
set_cpus_allowed(current, saved_mask); set_cpus_allowed_ptr(current, &saved_mask);
return ret; return ret;
} }
...@@ -151,7 +151,7 @@ processor_set_freq ( ...@@ -151,7 +151,7 @@ processor_set_freq (
dprintk("processor_set_freq\n"); dprintk("processor_set_freq\n");
saved_mask = current->cpus_allowed; saved_mask = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(cpu)); set_cpus_allowed_ptr(current, cpumask_of(cpu));
if (smp_processor_id() != cpu) { if (smp_processor_id() != cpu) {
retval = -EAGAIN; retval = -EAGAIN;
goto migrate_end; goto migrate_end;
...@@ -208,7 +208,7 @@ processor_set_freq ( ...@@ -208,7 +208,7 @@ processor_set_freq (
retval = 0; retval = 0;
migrate_end: migrate_end:
set_cpus_allowed(current, saved_mask); set_cpus_allowed_ptr(current, &saved_mask);
return (retval); return (retval);
} }
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/ratelimit.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/intrinsics.h> #include <asm/intrinsics.h>
...@@ -467,13 +468,9 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) ...@@ -467,13 +468,9 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
sp = ia64_getreg(_IA64_REG_SP); sp = ia64_getreg(_IA64_REG_SP);
if ((sp - bsp) < 1024) { if ((sp - bsp) < 1024) {
static unsigned char count; static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
static long last_time;
if (time_after(jiffies, last_time + 5 * HZ)) if (__ratelimit(&ratelimit)) {
count = 0;
if (++count < 5) {
last_time = jiffies;
printk("ia64_handle_irq: DANGER: less than " printk("ia64_handle_irq: DANGER: less than "
"1KB of free stack space!!\n" "1KB of free stack space!!\n"
"(bsp=0x%lx, sp=%lx)\n", bsp, sp); "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
......
...@@ -404,10 +404,9 @@ static void ...@@ -404,10 +404,9 @@ static void
call_on_cpu(int cpu, void (*fn)(void *), void *arg) call_on_cpu(int cpu, void (*fn)(void *), void *arg)
{ {
cpumask_t save_cpus_allowed = current->cpus_allowed; cpumask_t save_cpus_allowed = current->cpus_allowed;
cpumask_t new_cpus_allowed = cpumask_of_cpu(cpu); set_cpus_allowed_ptr(current, cpumask_of(cpu));
set_cpus_allowed(current, new_cpus_allowed);
(*fn)(arg); (*fn)(arg);
set_cpus_allowed(current, save_cpus_allowed); set_cpus_allowed_ptr(current, &save_cpus_allowed);
} }
static void static void
......
...@@ -361,12 +361,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) ...@@ -361,12 +361,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
return 0; return 0;
oldmask = current->cpus_allowed; oldmask = current->cpus_allowed;
retval = set_cpus_allowed(current, cpumask_of_cpu(cpu)); retval = set_cpus_allowed_ptr(current, cpumask_of(cpu));
if (unlikely(retval)) if (unlikely(retval))
return retval; return retval;
retval = cpu_cache_sysfs_init(cpu); retval = cpu_cache_sysfs_init(cpu);
set_cpus_allowed(current, oldmask); set_cpus_allowed_ptr(current, &oldmask);
if (unlikely(retval < 0)) if (unlikely(retval < 0))
return retval; return retval;
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/tty.h> #include <linux/tty.h>
#include <linux/ratelimit.h>
#include <asm/intrinsics.h> #include <asm/intrinsics.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -1283,24 +1284,9 @@ emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs) ...@@ -1283,24 +1284,9 @@ emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
/* /*
* Make sure we log the unaligned access, so that user/sysadmin can notice it and * Make sure we log the unaligned access, so that user/sysadmin can notice it and
* eventually fix the program. However, we don't want to do that for every access so we * eventually fix the program. However, we don't want to do that for every access so we
* pace it with jiffies. This isn't really MP-safe, but it doesn't really have to be * pace it with jiffies.
* either...
*/ */
static int static DEFINE_RATELIMIT_STATE(logging_rate_limit, 5 * HZ, 5);
within_logging_rate_limit (void)
{
static unsigned long count, last_time;
if (time_after(jiffies, last_time + 5 * HZ))
count = 0;
if (count < 5) {
last_time = jiffies;
count++;
return 1;
}
return 0;
}
void void
ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
...@@ -1337,7 +1323,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) ...@@ -1337,7 +1323,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
if (!no_unaligned_warning && if (!no_unaligned_warning &&
!(current->thread.flags & IA64_THREAD_UAC_NOPRINT) && !(current->thread.flags & IA64_THREAD_UAC_NOPRINT) &&
within_logging_rate_limit()) __ratelimit(&logging_rate_limit))
{ {
char buf[200]; /* comm[] is at most 16 bytes... */ char buf[200]; /* comm[] is at most 16 bytes... */
size_t len; size_t len;
...@@ -1370,7 +1356,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) ...@@ -1370,7 +1356,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
} }
} }
} else { } else {
if (within_logging_rate_limit()) { if (__ratelimit(&logging_rate_limit)) {
printk(KERN_WARNING "kernel unaligned access to 0x%016lx, ip=0x%016lx\n", printk(KERN_WARNING "kernel unaligned access to 0x%016lx, ip=0x%016lx\n",
ifa, regs->cr_iip + ipsr->ri); ifa, regs->cr_iip + ipsr->ri);
if (unaligned_dump_stack) if (unaligned_dump_stack)
......
...@@ -148,7 +148,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re ...@@ -148,7 +148,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if ((vma->vm_flags & mask) != mask) if ((vma->vm_flags & mask) != mask)
goto bad_area; goto bad_area;
survive:
/* /*
* If for any reason at all we couldn't handle the fault, make * If for any reason at all we couldn't handle the fault, make
* sure we exit gracefully rather than endlessly redo the * sure we exit gracefully rather than endlessly redo the
...@@ -276,13 +275,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re ...@@ -276,13 +275,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
out_of_memory: out_of_memory:
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
if (is_global_init(current)) { if (!user_mode(regs))
yield(); goto no_context;
down_read(&mm->mmap_sem); pagefault_out_of_memory();
goto survive;
}
printk(KERN_CRIT "VM: killing process %s\n", current->comm);
if (user_mode(regs))
do_group_exit(SIGKILL);
goto no_context;
} }
...@@ -629,9 +629,9 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info) ...@@ -629,9 +629,9 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
else { else {
/* migrate the task before calling SAL */ /* migrate the task before calling SAL */
save_allowed = current->cpus_allowed; save_allowed = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(cpu)); set_cpus_allowed_ptr(current, cpumask_of(cpu));
sn_hwperf_call_sal(op_info); sn_hwperf_call_sal(op_info);
set_cpus_allowed(current, save_allowed); set_cpus_allowed_ptr(current, &save_allowed);
} }
} }
r = op_info->ret; r = op_info->ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment