Commit 96edcf31 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'fixes-3.9-late' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull late parisc fixes from Helge Deller:
 "I know it's *very* late in the 3.9 release cycle, but since there
  aren't that many people testing the parisc linux kernel, a few (for
  our port) critical issues just showed up a few days back for the first
  time.

  What's in it?
   - add missing __ucmpdi2 symbol, which is required for btrfs on 32bit
     kernel.
   - change kunmap() macro to static inline function.  This fixes a
     debian/gcc-4.4 build error.
   - add locking when doing PTE updates.  This fixes random userspace
     crashes.
   - disable (optional) -mlong-calls compiler option for modules, else
     modules can't be loaded at runtime.
   - a smart patch by Will Deacon which fixes 64bit put_user() warnings
     on 32bit kernel."

* 'fixes-3.9-late' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  parisc: use spin_lock_irqsave/spin_unlock_irqrestore for PTE updates
  parisc: disable -mlong-calls compiler option for kernel modules
  parisc: uaccess: fix compiler warnings caused by __put_user casting
  parisc: Change kunmap macro to static inline function
  parisc: Provide __ucmpdi2 to resolve undefined references in 32 bit builds.
parents f464246d bda079d3
......@@ -65,8 +65,10 @@ ifndef CONFIG_FUNCTION_TRACER
endif
# Use long jumps instead of long branches (needed if your linker fails to
# link a too big vmlinux executable)
cflags-$(CONFIG_MLONGCALLS) += -mlong-calls
# link a too big vmlinux executable). Not enabled for building modules.
ifdef CONFIG_MLONGCALLS
KBUILD_CFLAGS_KERNEL += -mlong-calls
endif
# select which processor to optimise for
cflags-$(CONFIG_PA7100) += -march=1.1 -mschedule=7100
......
......@@ -140,7 +140,10 @@ static inline void *kmap(struct page *page)
return page_address(page);
}
#define kunmap(page) kunmap_parisc(page_address(page))
static inline void kunmap(struct page *page)
{
kunmap_parisc(page_address(page));
}
static inline void *kmap_atomic(struct page *page)
{
......
......@@ -16,6 +16,8 @@
#include <asm/processor.h>
#include <asm/cache.h>
extern spinlock_t pa_dbit_lock;
/*
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
* memory. For the return value to be meaningful, ADDR must be >=
......@@ -44,8 +46,11 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
#define set_pte_at(mm, addr, ptep, pteval) \
do { \
unsigned long flags; \
spin_lock_irqsave(&pa_dbit_lock, flags); \
set_pte(ptep, pteval); \
purge_tlb_entries(mm, addr); \
spin_unlock_irqrestore(&pa_dbit_lock, flags); \
} while (0)
#endif /* !__ASSEMBLY__ */
......@@ -435,48 +440,46 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
#ifdef CONFIG_SMP
pte_t pte;
unsigned long flags;
if (!pte_young(*ptep))
return 0;
return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep));
#else
pte_t pte = *ptep;
if (!pte_young(pte))
spin_lock_irqsave(&pa_dbit_lock, flags);
pte = *ptep;
if (!pte_young(pte)) {
spin_unlock_irqrestore(&pa_dbit_lock, flags);
return 0;
set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
}
set_pte(ptep, pte_mkold(pte));
purge_tlb_entries(vma->vm_mm, addr);
spin_unlock_irqrestore(&pa_dbit_lock, flags);
return 1;
#endif
}
extern spinlock_t pa_dbit_lock;
struct mm_struct;
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t old_pte;
unsigned long flags;
spin_lock(&pa_dbit_lock);
spin_lock_irqsave(&pa_dbit_lock, flags);
old_pte = *ptep;
pte_clear(mm,addr,ptep);
spin_unlock(&pa_dbit_lock);
purge_tlb_entries(mm, addr);
spin_unlock_irqrestore(&pa_dbit_lock, flags);
return old_pte;
}
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
#ifdef CONFIG_SMP
unsigned long new, old;
do {
old = pte_val(*ptep);
new = pte_val(pte_wrprotect(__pte (old)));
} while (cmpxchg((unsigned long *) ptep, old, new) != old);
unsigned long flags;
spin_lock_irqsave(&pa_dbit_lock, flags);
set_pte(ptep, pte_wrprotect(*ptep));
purge_tlb_entries(mm, addr);
#else
pte_t old_pte = *ptep;
set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
#endif
spin_unlock_irqrestore(&pa_dbit_lock, flags);
}
#define pte_same(A,B) (pte_val(A) == pte_val(B))
......
......@@ -181,30 +181,24 @@ struct exception_data {
#if !defined(CONFIG_64BIT)
#define __put_kernel_asm64(__val,ptr) do { \
u64 __val64 = (u64)(__val); \
u32 hi = (__val64) >> 32; \
u32 lo = (__val64) & 0xffffffff; \
__asm__ __volatile__ ( \
"\n1:\tstw %2,0(%1)" \
"\n2:\tstw %3,4(%1)\n\t" \
"\n2:\tstw %R2,4(%1)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
: "=r"(__pu_err) \
: "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
: "r"(ptr), "r"(__val), "0"(__pu_err) \
: "r1"); \
} while (0)
#define __put_user_asm64(__val,ptr) do { \
u64 __val64 = (u64)(__val); \
u32 hi = (__val64) >> 32; \
u32 lo = (__val64) & 0xffffffff; \
__asm__ __volatile__ ( \
"\n1:\tstw %2,0(%%sr3,%1)" \
"\n2:\tstw %3,4(%%sr3,%1)\n\t" \
"\n2:\tstw %R2,4(%%sr3,%1)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
: "=r"(__pu_err) \
: "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
: "r"(ptr), "r"(__val), "0"(__pu_err) \
: "r1"); \
} while (0)
......
......@@ -421,14 +421,11 @@ void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
/* Note: purge_tlb_entries can be called at startup with
no context. */
/* Disable preemption while we play with %sr1. */
preempt_disable();
mtsp(mm->context, 1);
purge_tlb_start(flags);
mtsp(mm->context, 1);
pdtlb(addr);
pitlb(addr);
purge_tlb_end(flags);
preempt_enable();
}
EXPORT_SYMBOL(purge_tlb_entries);
......
......@@ -120,11 +120,13 @@ extern void __ashrdi3(void);
extern void __ashldi3(void);
extern void __lshrdi3(void);
extern void __muldi3(void);
extern void __ucmpdi2(void);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL(__ucmpdi2);
asmlinkage void * __canonicalize_funcptr_for_compare(void *);
EXPORT_SYMBOL(__canonicalize_funcptr_for_compare);
......
......@@ -2,6 +2,7 @@
# Makefile for parisc-specific library files
#
lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o
lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \
ucmpdi2.o
obj-y := iomap.o
#include <linux/module.h>
union ull_union {
unsigned long long ull;
struct {
unsigned int high;
unsigned int low;
} ui;
};
int __ucmpdi2(unsigned long long a, unsigned long long b)
{
union ull_union au = {.ull = a};
union ull_union bu = {.ull = b};
if (au.ui.high < bu.ui.high)
return 0;
else if (au.ui.high > bu.ui.high)
return 2;
if (au.ui.low < bu.ui.low)
return 0;
else if (au.ui.low > bu.ui.low)
return 2;
return 1;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment