Commit de06dbfa authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:
 "Another mixture of changes this time around:

   - Split XIP linker file from main linker file to make it more
     maintainable, and various XIP fixes, and clean up a resulting
     macro.

   - Decompressor cleanups from Masahiro Yamada

   - Avoid printing an error for a missing L2 cache

   - Remove some duplicated symbols in System.map, and move
     vectors/stubs back into kernel VMA

   - Various low priority fixes from Arnd

   - Updates to allow bus match functions to return negative errno
     values, touching some drivers and the driver core.  Greg has acked
     these changes.

   - Virtualisation platform udpates form Jean-Philippe Brucker.

   - Security enhancements from Kees Cook

   - Rework some Kconfig dependencies and move PSCI idle management code
     out of arch/arm into drivers/firmware/psci.c

   - ARM DMA mapping updates, touching media, acked by Mauro.

   - Fix places in ARM code which should be using virt_to_idmap() so
     that Keystone2 can work.

   - Fix Marvell Tauros2 to work again with non-DT boots.

   - Provide a delay timer for ARM Orion platforms"

* 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (45 commits)
  ARM: 8546/1: dma-mapping: refactor to fix coherent+cma+gfp=0
  ARM: 8547/1: dma-mapping: store buffer information
  ARM: 8543/1: decompressor: rename suffix_y to compress-y
  ARM: 8542/1: decompressor: merge piggy.*.S and simplify Makefile
  ARM: 8541/1: decompressor: drop redundant FORCE in Makefile
  ARM: 8540/1: decompressor: use clean-files instead of extra-y to clean files
  ARM: 8539/1: decompressor: drop more unneeded assignments to "targets"
  ARM: 8538/1: decompressor: drop unneeded assignments to "targets"
  ARM: 8532/1: uncompress: mark putc as inline
  ARM: 8531/1: turn init_new_context into an inline function
  ARM: 8530/1: remove VIRT_TO_BUS
  ARM: 8537/1: drop unused DEBUG_RODATA from XIP_KERNEL
  ARM: 8536/1: mm: hide __start_rodata_section_aligned for non-debug builds
  ARM: 8535/1: mm: DEBUG_RODATA makes no sense with XIP_KERNEL
  ARM: 8534/1: virt: fix hyp-stub build for pre-ARMv7 CPUs
  ARM: make the physical-relative calculation more obvious
  ARM: 8512/1: proc-v7.S: Adjust stack address when XIP_KERNEL
  ARM: 8411/1: Add default SPARSEMEM settings
  ARM: 8503/1: clk_register_clkdev: remove format string interface
  ARM: 8529/1: remove 'i' and 'zi' targets
  ...
parents b31a3bc3 1b3bf847
...@@ -100,3 +100,29 @@ allocated by dma_alloc_attrs() function from individual pages if it can ...@@ -100,3 +100,29 @@ allocated by dma_alloc_attrs() function from individual pages if it can
be mapped as contiguous chunk into device dma address space. By be mapped as contiguous chunk into device dma address space. By
specifying this attribute the allocated buffer is forced to be contiguous specifying this attribute the allocated buffer is forced to be contiguous
also in physical memory. also in physical memory.
DMA_ATTR_ALLOC_SINGLE_PAGES
---------------------------
This is a hint to the DMA-mapping subsystem that it's probably not worth
the time to try to allocate memory to in a way that gives better TLB
efficiency (AKA it's not worth trying to build the mapping out of larger
pages). You might want to specify this if:
- You know that the accesses to this memory won't thrash the TLB.
You might know that the accesses are likely to be sequential or
that they aren't sequential but it's unlikely you'll ping-pong
between many addresses that are likely to be in different physical
pages.
- You know that the penalty of TLB misses while accessing the
memory will be small enough to be inconsequential. If you are
doing a heavy operation like decryption or decompression this
might be the case.
- You know that the DMA mapping is fairly transitory. If you expect
the mapping to have a short lifetime then it may be worth it to
optimize allocation (avoid coming up with large pages) instead of
getting the slight performance win of larger pages.
Setting this hint doesn't guarantee that you won't get huge pages, but it
means that we won't try quite as hard to get them.
NOTE: At the moment DMA_ATTR_ALLOC_SINGLE_PAGES is only implemented on ARM,
though ARM64 patches will likely be posted soon.
...@@ -340,8 +340,10 @@ comparison: ...@@ -340,8 +340,10 @@ comparison:
int (*match)(struct device * dev, struct device_driver * drv); int (*match)(struct device * dev, struct device_driver * drv);
match should return '1' if the driver supports the device, and '0' match should return positive value if the driver supports the device,
otherwise. and zero otherwise. It may also return error code (for example
-EPROBE_DEFER) if determining that given driver supports the device is
not possible.
When a device is registered, the bus's list of drivers is iterated When a device is registered, the bus's list of drivers is iterated
over. bus->match() is called for each one until a match is found. over. bus->match() is called for each one until a match is found.
......
...@@ -572,7 +572,6 @@ config ARCH_RPC ...@@ -572,7 +572,6 @@ config ARCH_RPC
select NEED_MACH_IO_H select NEED_MACH_IO_H
select NEED_MACH_MEMORY_H select NEED_MACH_MEMORY_H
select NO_IOPORT_MAP select NO_IOPORT_MAP
select VIRT_TO_BUS
help help
On the Acorn Risc-PC, Linux can support the internal IDE disk and On the Acorn Risc-PC, Linux can support the internal IDE disk and
CD-ROM interface, serial and parallel port, and the floppy drive. CD-ROM interface, serial and parallel port, and the floppy drive.
...@@ -1336,7 +1335,6 @@ config BIG_LITTLE ...@@ -1336,7 +1335,6 @@ config BIG_LITTLE
config BL_SWITCHER config BL_SWITCHER
bool "big.LITTLE switcher support" bool "big.LITTLE switcher support"
depends on BIG_LITTLE && MCPM && HOTPLUG_CPU && ARM_GIC depends on BIG_LITTLE && MCPM && HOTPLUG_CPU && ARM_GIC
select ARM_CPU_SUSPEND
select CPU_PM select CPU_PM
help help
The big.LITTLE "switcher" provides the core functionality to The big.LITTLE "switcher" provides the core functionality to
...@@ -2110,7 +2108,8 @@ config ARCH_SUSPEND_POSSIBLE ...@@ -2110,7 +2108,8 @@ config ARCH_SUSPEND_POSSIBLE
def_bool y def_bool y
config ARM_CPU_SUSPEND config ARM_CPU_SUSPEND
def_bool PM_SLEEP def_bool PM_SLEEP || BL_SWITCHER || ARM_PSCI_FW
depends on ARCH_SUSPEND_POSSIBLE
config ARCH_HIBERNATION_POSSIBLE config ARCH_HIBERNATION_POSSIBLE
bool bool
......
...@@ -352,7 +352,6 @@ archclean: ...@@ -352,7 +352,6 @@ archclean:
# My testing targets (bypasses dependencies) # My testing targets (bypasses dependencies)
bp:; $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/bootpImage bp:; $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/bootpImage
i zi:; $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
define archhelp define archhelp
......
...@@ -88,7 +88,7 @@ $(obj)/bootpImage: $(obj)/bootp/bootp FORCE ...@@ -88,7 +88,7 @@ $(obj)/bootpImage: $(obj)/bootp/bootp FORCE
$(call if_changed,objcopy) $(call if_changed,objcopy)
@$(kecho) ' Kernel: $@ is ready' @$(kecho) ' Kernel: $@ is ready'
PHONY += initrd FORCE PHONY += initrd
initrd: initrd:
@test "$(INITRD_PHYS)" != "" || \ @test "$(INITRD_PHYS)" != "" || \
(echo This machine does not support INITRD; exit -1) (echo This machine does not support INITRD; exit -1)
...@@ -107,12 +107,4 @@ uinstall: ...@@ -107,12 +107,4 @@ uinstall:
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
$(obj)/uImage System.map "$(INSTALL_PATH)" $(obj)/uImage System.map "$(INSTALL_PATH)"
zi:
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
$(obj)/zImage System.map "$(INSTALL_PATH)"
i:
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
$(obj)/Image System.map "$(INSTALL_PATH)"
subdir- := bootp compressed dts subdir- := bootp compressed dts
...@@ -3,11 +3,7 @@ bswapsdi2.S ...@@ -3,11 +3,7 @@ bswapsdi2.S
font.c font.c
lib1funcs.S lib1funcs.S
hyp-stub.S hyp-stub.S
piggy.gzip piggy_data
piggy.lzo
piggy.lzma
piggy.xzkern
piggy.lz4
vmlinux vmlinux
vmlinux.lds vmlinux.lds
......
...@@ -66,11 +66,11 @@ endif ...@@ -66,11 +66,11 @@ endif
CPPFLAGS_vmlinux.lds := -DTEXT_START="$(ZTEXTADDR)" -DBSS_START="$(ZBSSADDR)" CPPFLAGS_vmlinux.lds := -DTEXT_START="$(ZTEXTADDR)" -DBSS_START="$(ZBSSADDR)"
suffix_$(CONFIG_KERNEL_GZIP) = gzip compress-$(CONFIG_KERNEL_GZIP) = gzip
suffix_$(CONFIG_KERNEL_LZO) = lzo compress-$(CONFIG_KERNEL_LZO) = lzo
suffix_$(CONFIG_KERNEL_LZMA) = lzma compress-$(CONFIG_KERNEL_LZMA) = lzma
suffix_$(CONFIG_KERNEL_XZ) = xzkern compress-$(CONFIG_KERNEL_XZ) = xzkern
suffix_$(CONFIG_KERNEL_LZ4) = lz4 compress-$(CONFIG_KERNEL_LZ4) = lz4
# Borrowed libfdt files for the ATAG compatibility mode # Borrowed libfdt files for the ATAG compatibility mode
...@@ -89,15 +89,12 @@ ifeq ($(CONFIG_ARM_ATAG_DTB_COMPAT),y) ...@@ -89,15 +89,12 @@ ifeq ($(CONFIG_ARM_ATAG_DTB_COMPAT),y)
OBJS += $(libfdt_objs) atags_to_fdt.o OBJS += $(libfdt_objs) atags_to_fdt.o
endif endif
targets := vmlinux vmlinux.lds \ targets := vmlinux vmlinux.lds piggy_data piggy.o \
piggy.$(suffix_y) piggy.$(suffix_y).o \ lib1funcs.o ashldi3.o bswapsdi2.o \
lib1funcs.o lib1funcs.S ashldi3.o ashldi3.S bswapsdi2.o \ head.o $(OBJS)
bswapsdi2.S font.o font.c head.o misc.o $(OBJS)
# Make sure files are removed during clean clean-files += piggy_data lib1funcs.S ashldi3.S bswapsdi2.S \
extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern piggy.lz4 \ $(libfdt) $(libfdt_hdrs) hyp-stub.S
lib1funcs.S ashldi3.S bswapsdi2.S $(libfdt) $(libfdt_hdrs) \
hyp-stub.S
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
...@@ -178,17 +175,17 @@ fi ...@@ -178,17 +175,17 @@ fi
efi-obj-$(CONFIG_EFI_STUB) := $(objtree)/drivers/firmware/efi/libstub/lib.a efi-obj-$(CONFIG_EFI_STUB) := $(objtree)/drivers/firmware/efi/libstub/lib.a
$(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o \ $(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.o \
$(addprefix $(obj)/, $(OBJS)) $(lib1funcs) $(ashldi3) \ $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) $(ashldi3) \
$(bswapsdi2) $(efi-obj-y) FORCE $(bswapsdi2) $(efi-obj-y) FORCE
@$(check_for_multiple_zreladdr) @$(check_for_multiple_zreladdr)
$(call if_changed,ld) $(call if_changed,ld)
@$(check_for_bad_syms) @$(check_for_bad_syms)
$(obj)/piggy.$(suffix_y): $(obj)/../Image FORCE $(obj)/piggy_data: $(obj)/../Image FORCE
$(call if_changed,$(suffix_y)) $(call if_changed,$(compress-y))
$(obj)/piggy.$(suffix_y).o: $(obj)/piggy.$(suffix_y) FORCE $(obj)/piggy.o: $(obj)/piggy_data
CFLAGS_font.o := -Dstatic= CFLAGS_font.o := -Dstatic=
......
.section .piggydata,#alloc .section .piggydata,#alloc
.globl input_data .globl input_data
input_data: input_data:
.incbin "arch/arm/boot/compressed/piggy.gzip" .incbin "arch/arm/boot/compressed/piggy_data"
.globl input_data_end .globl input_data_end
input_data_end: input_data_end:
.section .piggydata,#alloc
.globl input_data
input_data:
.incbin "arch/arm/boot/compressed/piggy.lz4"
.globl input_data_end
input_data_end:
.section .piggydata,#alloc
.globl input_data
input_data:
.incbin "arch/arm/boot/compressed/piggy.lzma"
.globl input_data_end
input_data_end:
.section .piggydata,#alloc
.globl input_data
input_data:
.incbin "arch/arm/boot/compressed/piggy.lzo"
.globl input_data_end
input_data_end:
.section .piggydata,#alloc
.globl input_data
input_data:
.incbin "arch/arm/boot/compressed/piggy.xzkern"
.globl input_data_end
input_data_end:
...@@ -1290,7 +1290,7 @@ static int sa1111_match(struct device *_dev, struct device_driver *_drv) ...@@ -1290,7 +1290,7 @@ static int sa1111_match(struct device *_dev, struct device_driver *_drv)
struct sa1111_dev *dev = SA1111_DEV(_dev); struct sa1111_dev *dev = SA1111_DEV(_dev);
struct sa1111_driver *drv = SA1111_DRV(_drv); struct sa1111_driver *drv = SA1111_DRV(_drv);
return dev->devid & drv->devid; return !!(dev->devid & drv->devid);
} }
static int sa1111_bus_suspend(struct device *dev, pm_message_t state) static int sa1111_bus_suspend(struct device *dev, pm_message_t state)
......
...@@ -23,7 +23,6 @@ generic-y += preempt.h ...@@ -23,7 +23,6 @@ generic-y += preempt.h
generic-y += resource.h generic-y += resource.h
generic-y += rwsem.h generic-y += rwsem.h
generic-y += seccomp.h generic-y += seccomp.h
generic-y += sections.h
generic-y += segment.h generic-y += segment.h
generic-y += sembuf.h generic-y += sembuf.h
generic-y += serial.h generic-y += serial.h
......
...@@ -74,7 +74,7 @@ static inline uint32_t __div64_32(uint64_t *n, uint32_t base) ...@@ -74,7 +74,7 @@ static inline uint32_t __div64_32(uint64_t *n, uint32_t base)
static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias) static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
{ {
unsigned long long res; unsigned long long res;
unsigned int tmp = 0; register unsigned int tmp asm("ip") = 0;
if (!bias) { if (!bias) {
asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" asm ( "umull %Q0, %R0, %Q1, %Q2\n\t"
...@@ -90,12 +90,12 @@ static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias) ...@@ -90,12 +90,12 @@ static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
: "r" (m), "r" (n) : "r" (m), "r" (n)
: "cc"); : "cc");
} else { } else {
asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" asm ( "umull %Q0, %R0, %Q2, %Q3\n\t"
"cmn %Q0, %Q1\n\t" "cmn %Q0, %Q2\n\t"
"adcs %R0, %R0, %R1\n\t" "adcs %R0, %R0, %R2\n\t"
"adc %Q0, %3, #0" "adc %Q0, %1, #0"
: "=&r" (res) : "=&r" (res), "+&r" (tmp)
: "r" (m), "r" (n), "r" (tmp) : "r" (m), "r" (n)
: "cc"); : "cc");
} }
......
...@@ -134,6 +134,21 @@ ...@@ -134,6 +134,21 @@
*/ */
#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) #define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
#ifdef CONFIG_XIP_KERNEL
/*
* When referencing data in RAM from the XIP region in a relative manner
* with the MMU off, we need the relative offset between the two physical
* addresses. The macro below achieves this, which is:
* __pa(v_data) - __xip_pa(v_text)
*/
#define PHYS_RELATIVE(v_data, v_text) \
(((v_data) - PAGE_OFFSET + PLAT_PHYS_OFFSET) - \
((v_text) - XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) + \
CONFIG_XIP_PHYS_ADDR))
#else
#define PHYS_RELATIVE(v_data, v_text) ((v_data) - (v_text))
#endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* /*
...@@ -273,14 +288,14 @@ static inline void *phys_to_virt(phys_addr_t x) ...@@ -273,14 +288,14 @@ static inline void *phys_to_virt(phys_addr_t x)
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT) #define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT)
extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x); extern unsigned long (*arch_virt_to_idmap)(unsigned long x);
/* /*
* These are for systems that have a hardware interconnect supported alias of * These are for systems that have a hardware interconnect supported alias of
* physical memory for idmap purposes. Most cases should leave these * physical memory for idmap purposes. Most cases should leave these
* untouched. * untouched. Note: this can only return addresses less than 4GiB.
*/ */
static inline phys_addr_t __virt_to_idmap(unsigned long x) static inline unsigned long __virt_to_idmap(unsigned long x)
{ {
if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap) if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap)
return arch_virt_to_idmap(x); return arch_virt_to_idmap(x);
...@@ -303,20 +318,6 @@ static inline phys_addr_t __virt_to_idmap(unsigned long x) ...@@ -303,20 +318,6 @@ static inline phys_addr_t __virt_to_idmap(unsigned long x)
#define __bus_to_pfn(x) __phys_to_pfn(x) #define __bus_to_pfn(x) __phys_to_pfn(x)
#endif #endif
#ifdef CONFIG_VIRT_TO_BUS
#define virt_to_bus virt_to_bus
static inline __deprecated unsigned long virt_to_bus(void *x)
{
return __virt_to_bus((unsigned long)x);
}
#define bus_to_virt bus_to_virt
static inline __deprecated void *bus_to_virt(unsigned long x)
{
return (void *)__bus_to_virt(x);
}
#endif
/* /*
* Conversion between a struct page and a physical address. * Conversion between a struct page and a physical address.
* *
......
...@@ -26,7 +26,12 @@ void __check_vmalloc_seq(struct mm_struct *mm); ...@@ -26,7 +26,12 @@ void __check_vmalloc_seq(struct mm_struct *mm);
#ifdef CONFIG_CPU_HAS_ASID #ifdef CONFIG_CPU_HAS_ASID
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; }) static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
atomic64_set(&mm->context.id, 0);
return 0;
}
#ifdef CONFIG_ARM_ERRATA_798181 #ifdef CONFIG_ARM_ERRATA_798181
void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
...@@ -85,7 +90,12 @@ static inline void finish_arch_post_lock_switch(void) ...@@ -85,7 +90,12 @@ static inline void finish_arch_post_lock_switch(void)
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
#define init_new_context(tsk,mm) 0 static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
return 0;
}
#endif /* CONFIG_CPU_HAS_ASID */ #endif /* CONFIG_CPU_HAS_ASID */
......
#ifndef _ASM_ARM_SECTIONS_H
#define _ASM_ARM_SECTIONS_H
#include <asm-generic/sections.h>
extern char _exiprom[];
#endif /* _ASM_ARM_SECTIONS_H */
...@@ -15,10 +15,11 @@ ...@@ -15,10 +15,11 @@
* Eg, if you have 2 banks of up to 64MB at 0x80000000, 0x84000000, * Eg, if you have 2 banks of up to 64MB at 0x80000000, 0x84000000,
* then MAX_PHYSMEM_BITS is 32, SECTION_SIZE_BITS is 26. * then MAX_PHYSMEM_BITS is 32, SECTION_SIZE_BITS is 26.
* *
* Define these in your mach/memory.h. * These can be overridden in your mach/memory.h.
*/ */
#if !defined(SECTION_SIZE_BITS) || !defined(MAX_PHYSMEM_BITS) #if !defined(MAX_PHYSMEM_BITS) || !defined(SECTION_SIZE_BITS)
#error Sparsemem is not supported on this platform #define MAX_PHYSMEM_BITS 36
#define SECTION_SIZE_BITS 28
#endif #endif
#endif #endif
...@@ -1064,7 +1064,6 @@ ENDPROC(vector_\name) ...@@ -1064,7 +1064,6 @@ ENDPROC(vector_\name)
.endm .endm
.section .stubs, "ax", %progbits .section .stubs, "ax", %progbits
__stubs_start:
@ This must be the first word @ This must be the first word
.word vector_swi .word vector_swi
...@@ -1202,14 +1201,13 @@ vector_addrexcptn: ...@@ -1202,14 +1201,13 @@ vector_addrexcptn:
.long __fiq_svc @ e .long __fiq_svc @ e
.long __fiq_svc @ f .long __fiq_svc @ f
.globl vector_fiq_offset .globl vector_fiq
.equ vector_fiq_offset, vector_fiq
.section .vectors, "ax", %progbits .section .vectors, "ax", %progbits
__vectors_start: .L__vectors_start:
W(b) vector_rst W(b) vector_rst
W(b) vector_und W(b) vector_und
W(ldr) pc, __vectors_start + 0x1000 W(ldr) pc, .L__vectors_start + 0x1000
W(b) vector_pabt W(b) vector_pabt
W(b) vector_dabt W(b) vector_dabt
W(b) vector_addrexcptn W(b) vector_addrexcptn
......
...@@ -62,7 +62,7 @@ static int notrace arch_save_image(unsigned long unused) ...@@ -62,7 +62,7 @@ static int notrace arch_save_image(unsigned long unused)
ret = swsusp_save(); ret = swsusp_save();
if (ret == 0) if (ret == 0)
_soft_restart(virt_to_phys(cpu_resume), false); _soft_restart(virt_to_idmap(cpu_resume), false);
return ret; return ret;
} }
...@@ -87,7 +87,7 @@ static void notrace arch_restore_image(void *unused) ...@@ -87,7 +87,7 @@ static void notrace arch_restore_image(void *unused)
for (pbe = restore_pblist; pbe; pbe = pbe->next) for (pbe = restore_pblist; pbe; pbe = pbe->next)
copy_page(pbe->orig_address, pbe->address); copy_page(pbe->orig_address, pbe->address);
_soft_restart(virt_to_phys(cpu_resume), false); _soft_restart(virt_to_idmap(cpu_resume), false);
} }
static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata; static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata;
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/irqchip/arm-gic-v3.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/virt.h> #include <asm/virt.h>
...@@ -159,6 +160,29 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE ...@@ -159,6 +160,29 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
bic r7, #1 @ Clear ENABLE bic r7, #1 @ Clear ENABLE
mcr p15, 0, r7, c14, c3, 1 @ CNTV_CTL mcr p15, 0, r7, c14, c3, 1 @ CNTV_CTL
1: 1:
#endif
#ifdef CONFIG_ARM_GIC_V3
@ Check whether GICv3 system registers are available
mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1
ubfx r7, r7, #28, #4
cmp r7, #1
bne 2f
@ Enable system register accesses
mrc p15, 4, r7, c12, c9, 5 @ ICC_HSRE
orr r7, r7, #(ICC_SRE_EL2_ENABLE | ICC_SRE_EL2_SRE)
mcr p15, 4, r7, c12, c9, 5 @ ICC_HSRE
isb
@ SRE bit could be forced to 0 by firmware.
@ Check whether it sticks before accessing any other sysreg
mrc p15, 4, r7, c12, c9, 5 @ ICC_HSRE
tst r7, #ICC_SRE_EL2_SRE
beq 2f
mov r7, #0
mcr p15, 4, r7, c12, c11, 0 @ ICH_HCR
2:
#endif #endif
bx lr @ The boot CPU mode is left in r4. bx lr @ The boot CPU mode is left in r4.
......
...@@ -95,7 +95,7 @@ void __init init_IRQ(void) ...@@ -95,7 +95,7 @@ void __init init_IRQ(void)
outer_cache.write_sec = machine_desc->l2c_write_sec; outer_cache.write_sec = machine_desc->l2c_write_sec;
ret = l2x0_of_init(machine_desc->l2c_aux_val, ret = l2x0_of_init(machine_desc->l2c_aux_val,
machine_desc->l2c_aux_mask); machine_desc->l2c_aux_mask);
if (ret) if (ret && ret != -ENODEV)
pr_err("L2C: failed to init: %d\n", ret); pr_err("L2C: failed to init: %d\n", ret);
} }
......
...@@ -143,10 +143,8 @@ void (*kexec_reinit)(void); ...@@ -143,10 +143,8 @@ void (*kexec_reinit)(void);
void machine_kexec(struct kimage *image) void machine_kexec(struct kimage *image)
{ {
unsigned long page_list; unsigned long page_list, reboot_entry_phys;
unsigned long reboot_code_buffer_phys; void (*reboot_entry)(void);
unsigned long reboot_entry = (unsigned long)relocate_new_kernel;
unsigned long reboot_entry_phys;
void *reboot_code_buffer; void *reboot_code_buffer;
/* /*
...@@ -159,9 +157,6 @@ void machine_kexec(struct kimage *image) ...@@ -159,9 +157,6 @@ void machine_kexec(struct kimage *image)
page_list = image->head & PAGE_MASK; page_list = image->head & PAGE_MASK;
/* we need both effective and real address here */
reboot_code_buffer_phys =
page_to_pfn(image->control_code_page) << PAGE_SHIFT;
reboot_code_buffer = page_address(image->control_code_page); reboot_code_buffer = page_address(image->control_code_page);
/* Prepare parameters for reboot_code_buffer*/ /* Prepare parameters for reboot_code_buffer*/
...@@ -174,10 +169,11 @@ void machine_kexec(struct kimage *image) ...@@ -174,10 +169,11 @@ void machine_kexec(struct kimage *image)
/* copy our kernel relocation code to the control code page */ /* copy our kernel relocation code to the control code page */
reboot_entry = fncpy(reboot_code_buffer, reboot_entry = fncpy(reboot_code_buffer,
reboot_entry, &relocate_new_kernel,
relocate_new_kernel_size); relocate_new_kernel_size);
reboot_entry_phys = (unsigned long)reboot_entry +
(reboot_code_buffer_phys - (unsigned long)reboot_code_buffer); /* get the identity mapping physical address for the reboot code */
reboot_entry_phys = virt_to_idmap(reboot_entry);
pr_info("Bye!\n"); pr_info("Bye!\n");
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
* recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off. * recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off.
*/ */
#undef MODULES_VADDR #undef MODULES_VADDR
#define MODULES_VADDR (((unsigned long)_etext + ~PMD_MASK) & PMD_MASK) #define MODULES_VADDR (((unsigned long)_exiprom + ~PMD_MASK) & PMD_MASK)
#endif #endif
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
......
...@@ -50,7 +50,7 @@ static void __soft_restart(void *addr) ...@@ -50,7 +50,7 @@ static void __soft_restart(void *addr)
flush_cache_all(); flush_cache_all();
/* Switch to the identity mapping. */ /* Switch to the identity mapping. */
phys_reset = (phys_reset_t)(unsigned long)virt_to_idmap(cpu_reset); phys_reset = (phys_reset_t)virt_to_idmap(cpu_reset);
phys_reset((unsigned long)addr); phys_reset((unsigned long)addr);
/* Should never get here. */ /* Should never get here. */
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
* to run the rebalance_domains for all idle cores and the cpu_capacity can be * to run the rebalance_domains for all idle cores and the cpu_capacity can be
* updated during this sequence. * updated during this sequence.
*/ */
static DEFINE_PER_CPU(unsigned long, cpu_scale); static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
{ {
...@@ -306,8 +306,6 @@ void __init init_cpu_topology(void) ...@@ -306,8 +306,6 @@ void __init init_cpu_topology(void)
cpu_topo->socket_id = -1; cpu_topo->socket_id = -1;
cpumask_clear(&cpu_topo->core_sibling); cpumask_clear(&cpu_topo->core_sibling);
cpumask_clear(&cpu_topo->thread_sibling); cpumask_clear(&cpu_topo->thread_sibling);
set_capacity_scale(cpu, SCHED_CAPACITY_SCALE);
} }
smp_wmb(); smp_wmb();
......
/* ld script to make ARM Linux kernel
* taken from the i386 version by Russell King
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
*/
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/page.h>
#define PROC_INFO \
. = ALIGN(4); \
VMLINUX_SYMBOL(__proc_info_begin) = .; \
*(.proc.info.init) \
VMLINUX_SYMBOL(__proc_info_end) = .;
#define IDMAP_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__idmap_text_start) = .; \
*(.idmap.text) \
VMLINUX_SYMBOL(__idmap_text_end) = .; \
. = ALIGN(PAGE_SIZE); \
VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
*(.hyp.idmap.text) \
VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
#ifdef CONFIG_HOTPLUG_CPU
#define ARM_CPU_DISCARD(x)
#define ARM_CPU_KEEP(x) x
#else
#define ARM_CPU_DISCARD(x) x
#define ARM_CPU_KEEP(x)
#endif
#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
defined(CONFIG_GENERIC_BUG)
#define ARM_EXIT_KEEP(x) x
#define ARM_EXIT_DISCARD(x)
#else
#define ARM_EXIT_KEEP(x)
#define ARM_EXIT_DISCARD(x) x
#endif
OUTPUT_ARCH(arm)
ENTRY(stext)
#ifndef __ARMEB__
jiffies = jiffies_64;
#else
jiffies = jiffies_64 + 4;
#endif
SECTIONS
{
/*
* XXX: The linker does not define how output sections are
* assigned to input sections when there are multiple statements
* matching the same input section name. There is no documented
* order of matching.
*
* unwind exit sections must be discarded before the rest of the
* unwind sections get included.
*/
/DISCARD/ : {
*(.ARM.exidx.exit.text)
*(.ARM.extab.exit.text)
ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
ARM_EXIT_DISCARD(EXIT_TEXT)
ARM_EXIT_DISCARD(EXIT_DATA)
EXIT_CALL
#ifndef CONFIG_MMU
*(.text.fixup)
*(__ex_table)
#endif
#ifndef CONFIG_SMP_ON_UP
*(.alt.smp.init)
#endif
*(.discard)
*(.discard.*)
}
. = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
_xiprom = .; /* XIP ROM area to be mapped */
.head.text : {
_text = .;
HEAD_TEXT
}
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
IDMAP_TEXT
__exception_text_start = .;
*(.exception.text)
__exception_text_end = .;
IRQENTRY_TEXT
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.gnu.warning)
*(.glue_7)
*(.glue_7t)
. = ALIGN(4);
*(.got) /* Global offset table */
ARM_CPU_KEEP(PROC_INFO)
}
RO_DATA(PAGE_SIZE)
. = ALIGN(4);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
#ifdef CONFIG_MMU
*(__ex_table)
#endif
__stop___ex_table = .;
}
#ifdef CONFIG_ARM_UNWIND
/*
* Stack unwinding tables
*/
. = ALIGN(8);
.ARM.unwind_idx : {
__start_unwind_idx = .;
*(.ARM.exidx*)
__stop_unwind_idx = .;
}
.ARM.unwind_tab : {
__start_unwind_tab = .;
*(.ARM.extab*)
__stop_unwind_tab = .;
}
#endif
NOTES
_etext = .; /* End of text and rodata section */
/*
* The vectors and stubs are relocatable code, and the
* only thing that matters is their relative offsets
*/
__vectors_start = .;
.vectors 0xffff0000 : AT(__vectors_start) {
*(.vectors)
}
. = __vectors_start + SIZEOF(.vectors);
__vectors_end = .;
__stubs_start = .;
.stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {
*(.stubs)
}
. = __stubs_start + SIZEOF(.stubs);
__stubs_end = .;
PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
INIT_TEXT_SECTION(8)
.exit.text : {
ARM_EXIT_KEEP(EXIT_TEXT)
}
.init.proc.info : {
ARM_CPU_DISCARD(PROC_INFO)
}
.init.arch.info : {
__arch_info_begin = .;
*(.arch.info.init)
__arch_info_end = .;
}
.init.tagtable : {
__tagtable_begin = .;
*(.taglist.init)
__tagtable_end = .;
}
#ifdef CONFIG_SMP_ON_UP
.init.smpalt : {
__smpalt_begin = .;
*(.alt.smp.init)
__smpalt_end = .;
}
#endif
.init.pv_table : {
__pv_table_begin = .;
*(.pv_table)
__pv_table_end = .;
}
.init.data : {
INIT_SETUP(16)
INIT_CALLS
CON_INITCALL
SECURITY_INITCALL
INIT_RAM_FS
}
#ifdef CONFIG_SMP
PERCPU_SECTION(L1_CACHE_BYTES)
#endif
_exiprom = .; /* End of XIP ROM area */
__data_loc = ALIGN(4); /* location in binary */
. = PAGE_OFFSET + TEXT_OFFSET;
.data : AT(__data_loc) {
_data = .; /* address in memory */
_sdata = .;
/*
* first, the init task union, aligned
* to an 8192 byte boundary.
*/
INIT_TASK_DATA(THREAD_SIZE)
. = ALIGN(PAGE_SIZE);
__init_begin = .;
INIT_DATA
ARM_EXIT_KEEP(EXIT_DATA)
. = ALIGN(PAGE_SIZE);
__init_end = .;
NOSAVE_DATA
CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
READ_MOSTLY_DATA(L1_CACHE_BYTES)
/*
* and the usual data section
*/
DATA_DATA
CONSTRUCTORS
_edata = .;
}
_edata_loc = __data_loc + SIZEOF(.data);
#ifdef CONFIG_HAVE_TCM
/*
* We align everything to a page boundary so we can
* free it after init has commenced and TCM contents have
* been copied to its destination.
*/
.tcm_start : {
. = ALIGN(PAGE_SIZE);
__tcm_start = .;
__itcm_start = .;
}
/*
* Link these to the ITCM RAM
* Put VMA to the TCM address and LMA to the common RAM
* and we'll upload the contents from RAM to TCM and free
* the used RAM after that.
*/
.text_itcm ITCM_OFFSET : AT(__itcm_start)
{
__sitcm_text = .;
*(.tcm.text)
*(.tcm.rodata)
. = ALIGN(4);
__eitcm_text = .;
}
/*
* Reset the dot pointer, this is needed to create the
* relative __dtcm_start below (to be used as extern in code).
*/
. = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
.dtcm_start : {
__dtcm_start = .;
}
/* TODO: add remainder of ITCM as well, that can be used for data! */
.data_dtcm DTCM_OFFSET : AT(__dtcm_start)
{
. = ALIGN(4);
__sdtcm_data = .;
*(.tcm.data)
. = ALIGN(4);
__edtcm_data = .;
}
/* Reset the dot pointer or the linker gets confused */
. = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
/* End marker for freeing TCM copy in linked object */
.tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
. = ALIGN(PAGE_SIZE);
__tcm_end = .;
}
#endif
BSS_SECTION(0, 0, 0)
_end = .;
STABS_DEBUG
}
/*
* These must never be empty
* If you have to comment these two assert statements out, your
* binutils is too old (for other reasons as well)
*/
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
/*
* The HYP init code can't be more than a page long,
* and should not cross a page boundary.
* The above comment applies as well.
*/
ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
"HYP init code too big or misaligned")
...@@ -3,14 +3,16 @@ ...@@ -3,14 +3,16 @@
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
*/ */
#ifdef CONFIG_XIP_KERNEL
#include "vmlinux-xip.lds.S"
#else
#include <asm-generic/vmlinux.lds.h> #include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/page.h> #include <asm/page.h>
#ifdef CONFIG_ARM_KERNMEM_PERMS
#include <asm/pgtable.h> #include <asm/pgtable.h>
#endif
#define PROC_INFO \ #define PROC_INFO \
. = ALIGN(4); \ . = ALIGN(4); \
...@@ -89,17 +91,13 @@ SECTIONS ...@@ -89,17 +91,13 @@ SECTIONS
*(.discard.*) *(.discard.*)
} }
#ifdef CONFIG_XIP_KERNEL
. = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
#else
. = PAGE_OFFSET + TEXT_OFFSET; . = PAGE_OFFSET + TEXT_OFFSET;
#endif
.head.text : { .head.text : {
_text = .; _text = .;
HEAD_TEXT HEAD_TEXT
} }
#ifdef CONFIG_ARM_KERNMEM_PERMS #ifdef CONFIG_DEBUG_RODATA
. = ALIGN(1<<SECTION_SHIFT); . = ALIGN(1<<SECTION_SHIFT);
#endif #endif
...@@ -123,7 +121,7 @@ SECTIONS ...@@ -123,7 +121,7 @@ SECTIONS
ARM_CPU_KEEP(PROC_INFO) ARM_CPU_KEEP(PROC_INFO)
} }
#ifdef CONFIG_DEBUG_RODATA #ifdef CONFIG_DEBUG_ALIGN_RODATA
. = ALIGN(1<<SECTION_SHIFT); . = ALIGN(1<<SECTION_SHIFT);
#endif #endif
RO_DATA(PAGE_SIZE) RO_DATA(PAGE_SIZE)
...@@ -158,32 +156,33 @@ SECTIONS ...@@ -158,32 +156,33 @@ SECTIONS
_etext = .; /* End of text and rodata section */ _etext = .; /* End of text and rodata section */
#ifndef CONFIG_XIP_KERNEL #ifdef CONFIG_DEBUG_RODATA
# ifdef CONFIG_ARM_KERNMEM_PERMS
. = ALIGN(1<<SECTION_SHIFT); . = ALIGN(1<<SECTION_SHIFT);
# else #else
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
# endif
__init_begin = .;
#endif #endif
__init_begin = .;
/* /*
* The vectors and stubs are relocatable code, and the * The vectors and stubs are relocatable code, and the
* only thing that matters is their relative offsets * only thing that matters is their relative offsets
*/ */
__vectors_start = .; __vectors_start = .;
.vectors 0 : AT(__vectors_start) { .vectors 0xffff0000 : AT(__vectors_start) {
*(.vectors) *(.vectors)
} }
. = __vectors_start + SIZEOF(.vectors); . = __vectors_start + SIZEOF(.vectors);
__vectors_end = .; __vectors_end = .;
__stubs_start = .; __stubs_start = .;
.stubs 0x1000 : AT(__stubs_start) { .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {
*(.stubs) *(.stubs)
} }
. = __stubs_start + SIZEOF(.stubs); . = __stubs_start + SIZEOF(.stubs);
__stubs_end = .; __stubs_end = .;
PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
INIT_TEXT_SECTION(8) INIT_TEXT_SECTION(8)
.exit.text : { .exit.text : {
ARM_EXIT_KEEP(EXIT_TEXT) ARM_EXIT_KEEP(EXIT_TEXT)
...@@ -214,37 +213,28 @@ SECTIONS ...@@ -214,37 +213,28 @@ SECTIONS
__pv_table_end = .; __pv_table_end = .;
} }
.init.data : { .init.data : {
#ifndef CONFIG_XIP_KERNEL
INIT_DATA INIT_DATA
#endif
INIT_SETUP(16) INIT_SETUP(16)
INIT_CALLS INIT_CALLS
CON_INITCALL CON_INITCALL
SECURITY_INITCALL SECURITY_INITCALL
INIT_RAM_FS INIT_RAM_FS
} }
#ifndef CONFIG_XIP_KERNEL
.exit.data : { .exit.data : {
ARM_EXIT_KEEP(EXIT_DATA) ARM_EXIT_KEEP(EXIT_DATA)
} }
#endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
PERCPU_SECTION(L1_CACHE_BYTES) PERCPU_SECTION(L1_CACHE_BYTES)
#endif #endif
#ifdef CONFIG_XIP_KERNEL #ifdef CONFIG_DEBUG_RODATA
__data_loc = ALIGN(4); /* location in binary */
. = PAGE_OFFSET + TEXT_OFFSET;
#else
#ifdef CONFIG_ARM_KERNMEM_PERMS
. = ALIGN(1<<SECTION_SHIFT); . = ALIGN(1<<SECTION_SHIFT);
#else #else
. = ALIGN(THREAD_SIZE); . = ALIGN(THREAD_SIZE);
#endif #endif
__init_end = .; __init_end = .;
__data_loc = .; __data_loc = .;
#endif
.data : AT(__data_loc) { .data : AT(__data_loc) {
_data = .; /* address in memory */ _data = .; /* address in memory */
...@@ -256,15 +246,6 @@ SECTIONS ...@@ -256,15 +246,6 @@ SECTIONS
*/ */
INIT_TASK_DATA(THREAD_SIZE) INIT_TASK_DATA(THREAD_SIZE)
#ifdef CONFIG_XIP_KERNEL
. = ALIGN(PAGE_SIZE);
__init_begin = .;
INIT_DATA
ARM_EXIT_KEEP(EXIT_DATA)
. = ALIGN(PAGE_SIZE);
__init_end = .;
#endif
NOSAVE_DATA NOSAVE_DATA
CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
READ_MOSTLY_DATA(L1_CACHE_BYTES) READ_MOSTLY_DATA(L1_CACHE_BYTES)
...@@ -342,6 +323,15 @@ SECTIONS ...@@ -342,6 +323,15 @@ SECTIONS
STABS_DEBUG STABS_DEBUG
} }
#ifdef CONFIG_DEBUG_RODATA
/*
* Without CONFIG_DEBUG_ALIGN_RODATA, __start_rodata_section_aligned will
* be the first section-aligned location after __start_rodata. Otherwise,
* it will be equal to __start_rodata.
*/
__start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT);
#endif
/* /*
* These must never be empty * These must never be empty
* If you have to comment these two assert statements out, your * If you have to comment these two assert statements out, your
...@@ -357,3 +347,5 @@ ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") ...@@ -357,3 +347,5 @@ ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
*/ */
ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE, ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
"HYP init code too big or misaligned") "HYP init code too big or misaligned")
#endif /* CONFIG_XIP_KERNEL */
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
u32 *uart; u32 *uart;
/* PORT_16C550A, in polled non-fifo mode */ /* PORT_16C550A, in polled non-fifo mode */
static void putc(char c) static inline void putc(char c)
{ {
if (!uart) if (!uart)
return; return;
......
...@@ -68,7 +68,6 @@ config ARCH_NETWINDER ...@@ -68,7 +68,6 @@ config ARCH_NETWINDER
select ISA select ISA
select ISA_DMA select ISA_DMA
select PCI select PCI
select VIRT_TO_BUS
help help
Say Y here if you intend to run this kernel on the Rebel.COM Say Y here if you intend to run this kernel on the Rebel.COM
NetWinder. Information about this machine can be found at: NetWinder. Information about this machine can be found at:
......
...@@ -63,7 +63,7 @@ static void __init keystone_init(void) ...@@ -63,7 +63,7 @@ static void __init keystone_init(void)
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
} }
static phys_addr_t keystone_virt_to_idmap(unsigned long x) static unsigned long keystone_virt_to_idmap(unsigned long x)
{ {
return (phys_addr_t)(x) - CONFIG_PAGE_OFFSET + KEYSTONE_LOW_PHYS_START; return (phys_addr_t)(x) - CONFIG_PAGE_OFFSET + KEYSTONE_LOW_PHYS_START;
} }
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <mach/regs-uart.h> #include <mach/regs-uart.h>
static void putc(char c) static inline void putc(char c)
{ {
while (!(__raw_readl((void __iomem*)KS8695_UART_PA + KS8695_URLS) & URLS_URTHRE)) while (!(__raw_readl((void __iomem*)KS8695_UART_PA + KS8695_URLS) & URLS_URTHRE))
barrier(); barrier();
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#define FR_BUSY (1<<3) #define FR_BUSY (1<<3)
#define FR_TXFF (1<<5) #define FR_TXFF (1<<5)
static void putc(char c) static inline void putc(char c)
{ {
unsigned long base; unsigned long base;
......
...@@ -45,7 +45,7 @@ static void set_omap_uart_info(unsigned char port) ...@@ -45,7 +45,7 @@ static void set_omap_uart_info(unsigned char port)
*uart_info = port; *uart_info = port;
} }
static void putc(int c) static inline void putc(int c)
{ {
if (!uart_base) if (!uart_base)
return; return;
......
...@@ -76,7 +76,7 @@ int white; ...@@ -76,7 +76,7 @@ int white;
/* /*
* This does not append a newline * This does not append a newline
*/ */
static void putc(int c) static inline void putc(int c)
{ {
extern void ll_write_char(char *, char c, char white); extern void ll_write_char(char *, char c, char white);
int x,y; int x,y;
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#define UART(x) (*(volatile unsigned long *)(serial_port + (x))) #define UART(x) (*(volatile unsigned long *)(serial_port + (x)))
static void putc(int c) static inline void putc(int c)
{ {
unsigned long serial_port; unsigned long serial_port;
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#define TX_DONE (UART_LSR_TEMT | UART_LSR_THRE) #define TX_DONE (UART_LSR_TEMT | UART_LSR_THRE)
static volatile u32 * const uart_base = (u32 *)UART0_PA; static volatile u32 * const uart_base = (u32 *)UART0_PA;
static void putc(int ch) static inline void putc(int ch)
{ {
/* Check THRE and TEMT bits before we transmit the character. /* Check THRE and TEMT bits before we transmit the character.
*/ */
......
...@@ -1037,24 +1037,26 @@ config ARCH_SUPPORTS_BIG_ENDIAN ...@@ -1037,24 +1037,26 @@ config ARCH_SUPPORTS_BIG_ENDIAN
This option specifies the architecture can support big endian This option specifies the architecture can support big endian
operation. operation.
config ARM_KERNMEM_PERMS
bool "Restrict kernel memory permissions"
depends on MMU
help
If this is set, kernel memory other than kernel text (and rodata)
will be made non-executable. The tradeoff is that each region is
padded to section-size (1MiB) boundaries (because their permissions
are different and splitting the 1M pages into 4K ones causes TLB
performance problems), wasting memory.
config DEBUG_RODATA config DEBUG_RODATA
bool "Make kernel text and rodata read-only" bool "Make kernel text and rodata read-only"
depends on ARM_KERNMEM_PERMS depends on MMU && !XIP_KERNEL
default y if CPU_V7
help
If this is set, kernel text and rodata memory will be made
read-only, and non-text kernel memory will be made non-executable.
The tradeoff is that each region is padded to section-size (1MiB)
boundaries (because their permissions are different and splitting
the 1M pages into 4K ones causes TLB performance problems), which
can waste memory.
config DEBUG_ALIGN_RODATA
bool "Make rodata strictly non-executable"
depends on DEBUG_RODATA
default y default y
help help
If this is set, kernel text and rodata will be made read-only. This If this is set, rodata will be made explicitly non-executable. This
is to help catch accidental or malicious attempts to change the provides protection on the rare chance that attackers might find and
kernel's executable code. Additionally splits rodata from kernel use ROP gadgets that exist in the rodata section. This adds an
text so it can be made explicitly non-executable. This creates additional section-aligned split of rodata from kernel text so it
another section-size padded region, so it can waste more memory can be made explicitly non-executable. This padding may waste memory
space while gaining the read-only protections. space to gain the additional protection.
...@@ -22,6 +22,11 @@ ...@@ -22,6 +22,11 @@
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/hardware/cache-tauros2.h> #include <asm/hardware/cache-tauros2.h>
/* CP15 PJ4 Control configuration register */
#define CCR_L2C_PREFETCH_DISABLE BIT(24)
#define CCR_L2C_ECC_ENABLE BIT(23)
#define CCR_L2C_WAY7_4_DISABLE BIT(21)
#define CCR_L2C_BURST8_ENABLE BIT(20)
/* /*
* When Tauros2 is used on a CPU that supports the v7 hierarchical * When Tauros2 is used on a CPU that supports the v7 hierarchical
...@@ -182,18 +187,18 @@ static void enable_extra_feature(unsigned int features) ...@@ -182,18 +187,18 @@ static void enable_extra_feature(unsigned int features)
u = read_extra_features(); u = read_extra_features();
if (features & CACHE_TAUROS2_PREFETCH_ON) if (features & CACHE_TAUROS2_PREFETCH_ON)
u &= ~0x01000000; u &= ~CCR_L2C_PREFETCH_DISABLE;
else else
u |= 0x01000000; u |= CCR_L2C_PREFETCH_DISABLE;
pr_info("Tauros2: %s L2 prefetch.\n", pr_info("Tauros2: %s L2 prefetch.\n",
(features & CACHE_TAUROS2_PREFETCH_ON) (features & CACHE_TAUROS2_PREFETCH_ON)
? "Enabling" : "Disabling"); ? "Enabling" : "Disabling");
if (features & CACHE_TAUROS2_LINEFILL_BURST8) if (features & CACHE_TAUROS2_LINEFILL_BURST8)
u |= 0x00100000; u |= CCR_L2C_BURST8_ENABLE;
else else
u &= ~0x00100000; u &= ~CCR_L2C_BURST8_ENABLE;
pr_info("Tauros2: %s line fill burt8.\n", pr_info("Tauros2: %s burst8 line fill.\n",
(features & CACHE_TAUROS2_LINEFILL_BURST8) (features & CACHE_TAUROS2_LINEFILL_BURST8)
? "Enabling" : "Disabling"); ? "Enabling" : "Disabling");
...@@ -287,9 +292,7 @@ void __init tauros2_init(unsigned int features) ...@@ -287,9 +292,7 @@ void __init tauros2_init(unsigned int features)
node = of_find_matching_node(NULL, tauros2_ids); node = of_find_matching_node(NULL, tauros2_ids);
if (!node) { if (!node) {
pr_info("Not found marvell,tauros2-cache, disable it\n"); pr_info("Not found marvell,tauros2-cache, disable it\n");
return; } else {
}
ret = of_property_read_u32(node, "marvell,tauros2-cache-features", &f); ret = of_property_read_u32(node, "marvell,tauros2-cache-features", &f);
if (ret) { if (ret) {
pr_info("Not found marvell,tauros-cache-features property, " pr_info("Not found marvell,tauros-cache-features property, "
...@@ -297,6 +300,7 @@ void __init tauros2_init(unsigned int features) ...@@ -297,6 +300,7 @@ void __init tauros2_init(unsigned int features)
features = 0; features = 0;
} else } else
features = f; features = f;
}
#endif #endif
tauros2_internal_init(features); tauros2_internal_init(features);
} }
...@@ -42,6 +42,55 @@ ...@@ -42,6 +42,55 @@
#include "dma.h" #include "dma.h"
#include "mm.h" #include "mm.h"
struct arm_dma_alloc_args {
struct device *dev;
size_t size;
gfp_t gfp;
pgprot_t prot;
const void *caller;
bool want_vaddr;
};
struct arm_dma_free_args {
struct device *dev;
size_t size;
void *cpu_addr;
struct page *page;
bool want_vaddr;
};
struct arm_dma_allocator {
void *(*alloc)(struct arm_dma_alloc_args *args,
struct page **ret_page);
void (*free)(struct arm_dma_free_args *args);
};
struct arm_dma_buffer {
struct list_head list;
void *virt;
struct arm_dma_allocator *allocator;
};
static LIST_HEAD(arm_dma_bufs);
static DEFINE_SPINLOCK(arm_dma_bufs_lock);
static struct arm_dma_buffer *arm_dma_buffer_find(void *virt)
{
struct arm_dma_buffer *buf, *found = NULL;
unsigned long flags;
spin_lock_irqsave(&arm_dma_bufs_lock, flags);
list_for_each_entry(buf, &arm_dma_bufs, list) {
if (buf->virt == virt) {
list_del(&buf->list);
found = buf;
break;
}
}
spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
return found;
}
/* /*
* The DMA API is built upon the notion of "buffer ownership". A buffer * The DMA API is built upon the notion of "buffer ownership". A buffer
* is either exclusively owned by the CPU (and therefore may be accessed * is either exclusively owned by the CPU (and therefore may be accessed
...@@ -592,7 +641,7 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) ...@@ -592,7 +641,7 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
#define __alloc_from_pool(size, ret_page) NULL #define __alloc_from_pool(size, ret_page) NULL
#define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL #define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL
#define __free_from_pool(cpu_addr, size) 0 #define __free_from_pool(cpu_addr, size) do { } while (0)
#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0) #define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
#define __dma_free_remap(cpu_addr, size) do { } while (0) #define __dma_free_remap(cpu_addr, size) do { } while (0)
...@@ -610,7 +659,78 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, ...@@ -610,7 +659,78 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
return page_address(page); return page_address(page);
} }
static void *simple_allocator_alloc(struct arm_dma_alloc_args *args,
struct page **ret_page)
{
return __alloc_simple_buffer(args->dev, args->size, args->gfp,
ret_page);
}
static void simple_allocator_free(struct arm_dma_free_args *args)
{
__dma_free_buffer(args->page, args->size);
}
static struct arm_dma_allocator simple_allocator = {
.alloc = simple_allocator_alloc,
.free = simple_allocator_free,
};
static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
struct page **ret_page)
{
return __alloc_from_contiguous(args->dev, args->size, args->prot,
ret_page, args->caller,
args->want_vaddr);
}
static void cma_allocator_free(struct arm_dma_free_args *args)
{
__free_from_contiguous(args->dev, args->page, args->cpu_addr,
args->size, args->want_vaddr);
}
static struct arm_dma_allocator cma_allocator = {
.alloc = cma_allocator_alloc,
.free = cma_allocator_free,
};
static void *pool_allocator_alloc(struct arm_dma_alloc_args *args,
struct page **ret_page)
{
return __alloc_from_pool(args->size, ret_page);
}
static void pool_allocator_free(struct arm_dma_free_args *args)
{
__free_from_pool(args->cpu_addr, args->size);
}
static struct arm_dma_allocator pool_allocator = {
.alloc = pool_allocator_alloc,
.free = pool_allocator_free,
};
static void *remap_allocator_alloc(struct arm_dma_alloc_args *args,
struct page **ret_page)
{
return __alloc_remap_buffer(args->dev, args->size, args->gfp,
args->prot, ret_page, args->caller,
args->want_vaddr);
}
static void remap_allocator_free(struct arm_dma_free_args *args)
{
if (args->want_vaddr)
__dma_free_remap(args->cpu_addr, args->size);
__dma_free_buffer(args->page, args->size);
}
static struct arm_dma_allocator remap_allocator = {
.alloc = remap_allocator_alloc,
.free = remap_allocator_free,
};
static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t gfp, pgprot_t prot, bool is_coherent, gfp_t gfp, pgprot_t prot, bool is_coherent,
...@@ -619,7 +739,16 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, ...@@ -619,7 +739,16 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
u64 mask = get_coherent_dma_mask(dev); u64 mask = get_coherent_dma_mask(dev);
struct page *page = NULL; struct page *page = NULL;
void *addr; void *addr;
bool want_vaddr; bool allowblock, cma;
struct arm_dma_buffer *buf;
struct arm_dma_alloc_args args = {
.dev = dev,
.size = PAGE_ALIGN(size),
.gfp = gfp,
.prot = prot,
.caller = caller,
.want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs),
};
#ifdef CONFIG_DMA_API_DEBUG #ifdef CONFIG_DMA_API_DEBUG
u64 limit = (mask + 1) & ~mask; u64 limit = (mask + 1) & ~mask;
...@@ -633,6 +762,10 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, ...@@ -633,6 +762,10 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
if (!mask) if (!mask)
return NULL; return NULL;
buf = kzalloc(sizeof(*buf), gfp);
if (!buf)
return NULL;
if (mask < 0xffffffffULL) if (mask < 0xffffffffULL)
gfp |= GFP_DMA; gfp |= GFP_DMA;
...@@ -644,28 +777,37 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, ...@@ -644,28 +777,37 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
* platform; see CONFIG_HUGETLBFS. * platform; see CONFIG_HUGETLBFS.
*/ */
gfp &= ~(__GFP_COMP); gfp &= ~(__GFP_COMP);
args.gfp = gfp;
*handle = DMA_ERROR_CODE; *handle = DMA_ERROR_CODE;
size = PAGE_ALIGN(size); allowblock = gfpflags_allow_blocking(gfp);
want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); cma = allowblock ? dev_get_cma_area(dev) : false;
if (nommu()) if (cma)
addr = __alloc_simple_buffer(dev, size, gfp, &page); buf->allocator = &cma_allocator;
else if (dev_get_cma_area(dev) && (gfp & __GFP_DIRECT_RECLAIM)) else if (nommu() || is_coherent)
addr = __alloc_from_contiguous(dev, size, prot, &page, buf->allocator = &simple_allocator;
caller, want_vaddr); else if (allowblock)
else if (is_coherent) buf->allocator = &remap_allocator;
addr = __alloc_simple_buffer(dev, size, gfp, &page);
else if (!gfpflags_allow_blocking(gfp))
addr = __alloc_from_pool(size, &page);
else else
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, buf->allocator = &pool_allocator;
caller, want_vaddr);
addr = buf->allocator->alloc(&args, &page);
if (page) {
unsigned long flags;
if (page)
*handle = pfn_to_dma(dev, page_to_pfn(page)); *handle = pfn_to_dma(dev, page_to_pfn(page));
buf->virt = args.want_vaddr ? addr : page;
spin_lock_irqsave(&arm_dma_bufs_lock, flags);
list_add(&buf->list, &arm_dma_bufs);
spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
} else {
kfree(buf);
}
return want_vaddr ? addr : page; return args.want_vaddr ? addr : page;
} }
/* /*
...@@ -741,25 +883,21 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, ...@@ -741,25 +883,21 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
bool is_coherent) bool is_coherent)
{ {
struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
bool want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); struct arm_dma_buffer *buf;
struct arm_dma_free_args args = {
size = PAGE_ALIGN(size); .dev = dev,
.size = PAGE_ALIGN(size),
if (nommu()) { .cpu_addr = cpu_addr,
__dma_free_buffer(page, size); .page = page,
} else if (!is_coherent && __free_from_pool(cpu_addr, size)) { .want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs),
};
buf = arm_dma_buffer_find(cpu_addr);
if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr))
return; return;
} else if (!dev_get_cma_area(dev)) {
if (want_vaddr && !is_coherent) buf->allocator->free(&args);
__dma_free_remap(cpu_addr, size); kfree(buf);
__dma_free_buffer(page, size);
} else {
/*
* Non-atomic allocations cannot be freed with IRQs disabled
*/
WARN_ON(irqs_disabled());
__free_from_contiguous(dev, page, cpu_addr, size, want_vaddr);
}
} }
void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
...@@ -1122,6 +1260,9 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping, ...@@ -1122,6 +1260,9 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
spin_unlock_irqrestore(&mapping->lock, flags); spin_unlock_irqrestore(&mapping->lock, flags);
} }
/* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */
static const int iommu_order_array[] = { 9, 8, 4, 0 };
static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
gfp_t gfp, struct dma_attrs *attrs) gfp_t gfp, struct dma_attrs *attrs)
{ {
...@@ -1129,6 +1270,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, ...@@ -1129,6 +1270,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
int count = size >> PAGE_SHIFT; int count = size >> PAGE_SHIFT;
int array_size = count * sizeof(struct page *); int array_size = count * sizeof(struct page *);
int i = 0; int i = 0;
int order_idx = 0;
if (array_size <= PAGE_SIZE) if (array_size <= PAGE_SIZE)
pages = kzalloc(array_size, GFP_KERNEL); pages = kzalloc(array_size, GFP_KERNEL);
...@@ -1154,6 +1296,10 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, ...@@ -1154,6 +1296,10 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
return pages; return pages;
} }
/* Go straight to 4K chunks if caller says it's OK. */
if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs))
order_idx = ARRAY_SIZE(iommu_order_array) - 1;
/* /*
* IOMMU can map any pages, so himem can also be used here * IOMMU can map any pages, so himem can also be used here
*/ */
...@@ -1162,22 +1308,24 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, ...@@ -1162,22 +1308,24 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
while (count) { while (count) {
int j, order; int j, order;
for (order = __fls(count); order > 0; --order) { order = iommu_order_array[order_idx];
/*
* We do not want OOM killer to be invoked as long /* Drop down when we get small */
* as we can fall back to single pages, so we force if (__fls(count) < order) {
* __GFP_NORETRY for orders higher than zero. order_idx++;
*/ continue;
pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
if (pages[i])
break;
} }
if (order) {
/* See if it's easy to allocate a high-order chunk */
pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
/* Go down a notch at first sign of pressure */
if (!pages[i]) { if (!pages[i]) {
/* order_idx++;
* Fall back to single page allocation. continue;
* Might invoke OOM killer as last resort. }
*/ } else {
pages[i] = alloc_pages(gfp, 0); pages[i] = alloc_pages(gfp, 0);
if (!pages[i]) if (!pages[i])
goto error; goto error;
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
* page tables. * page tables.
*/ */
pgd_t *idmap_pgd; pgd_t *idmap_pgd;
phys_addr_t (*arch_virt_to_idmap) (unsigned long x); unsigned long (*arch_virt_to_idmap)(unsigned long x);
#ifdef CONFIG_ARM_LPAE #ifdef CONFIG_ARM_LPAE
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
......
...@@ -572,8 +572,9 @@ void __init mem_init(void) ...@@ -572,8 +572,9 @@ void __init mem_init(void)
} }
} }
#ifdef CONFIG_ARM_KERNMEM_PERMS #ifdef CONFIG_DEBUG_RODATA
struct section_perm { struct section_perm {
const char *name;
unsigned long start; unsigned long start;
unsigned long end; unsigned long end;
pmdval_t mask; pmdval_t mask;
...@@ -581,9 +582,13 @@ struct section_perm { ...@@ -581,9 +582,13 @@ struct section_perm {
pmdval_t clear; pmdval_t clear;
}; };
/* First section-aligned location at or after __start_rodata. */
extern char __start_rodata_section_aligned[];
static struct section_perm nx_perms[] = { static struct section_perm nx_perms[] = {
/* Make pages tables, etc before _stext RW (set NX). */ /* Make pages tables, etc before _stext RW (set NX). */
{ {
.name = "pre-text NX",
.start = PAGE_OFFSET, .start = PAGE_OFFSET,
.end = (unsigned long)_stext, .end = (unsigned long)_stext,
.mask = ~PMD_SECT_XN, .mask = ~PMD_SECT_XN,
...@@ -591,26 +596,26 @@ static struct section_perm nx_perms[] = { ...@@ -591,26 +596,26 @@ static struct section_perm nx_perms[] = {
}, },
/* Make init RW (set NX). */ /* Make init RW (set NX). */
{ {
.name = "init NX",
.start = (unsigned long)__init_begin, .start = (unsigned long)__init_begin,
.end = (unsigned long)_sdata, .end = (unsigned long)_sdata,
.mask = ~PMD_SECT_XN, .mask = ~PMD_SECT_XN,
.prot = PMD_SECT_XN, .prot = PMD_SECT_XN,
}, },
#ifdef CONFIG_DEBUG_RODATA
/* Make rodata NX (set RO in ro_perms below). */ /* Make rodata NX (set RO in ro_perms below). */
{ {
.start = (unsigned long)__start_rodata, .name = "rodata NX",
.start = (unsigned long)__start_rodata_section_aligned,
.end = (unsigned long)__init_begin, .end = (unsigned long)__init_begin,
.mask = ~PMD_SECT_XN, .mask = ~PMD_SECT_XN,
.prot = PMD_SECT_XN, .prot = PMD_SECT_XN,
}, },
#endif
}; };
#ifdef CONFIG_DEBUG_RODATA
static struct section_perm ro_perms[] = { static struct section_perm ro_perms[] = {
/* Make kernel code and rodata RX (set RO). */ /* Make kernel code and rodata RX (set RO). */
{ {
.name = "text/rodata RO",
.start = (unsigned long)_stext, .start = (unsigned long)_stext,
.end = (unsigned long)__init_begin, .end = (unsigned long)__init_begin,
#ifdef CONFIG_ARM_LPAE #ifdef CONFIG_ARM_LPAE
...@@ -623,7 +628,6 @@ static struct section_perm ro_perms[] = { ...@@ -623,7 +628,6 @@ static struct section_perm ro_perms[] = {
#endif #endif
}, },
}; };
#endif
/* /*
* Updates section permissions only for the current mm (sections are * Updates section permissions only for the current mm (sections are
...@@ -670,8 +674,8 @@ void set_section_perms(struct section_perm *perms, int n, bool set, ...@@ -670,8 +674,8 @@ void set_section_perms(struct section_perm *perms, int n, bool set,
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
!IS_ALIGNED(perms[i].end, SECTION_SIZE)) { !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
pr_err("BUG: section %lx-%lx not aligned to %lx\n", pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
perms[i].start, perms[i].end, perms[i].name, perms[i].start, perms[i].end,
SECTION_SIZE); SECTION_SIZE);
continue; continue;
} }
...@@ -712,7 +716,6 @@ void fix_kernmem_perms(void) ...@@ -712,7 +716,6 @@ void fix_kernmem_perms(void)
stop_machine(__fix_kernmem_perms, NULL, NULL); stop_machine(__fix_kernmem_perms, NULL, NULL);
} }
#ifdef CONFIG_DEBUG_RODATA
int __mark_rodata_ro(void *unused) int __mark_rodata_ro(void *unused)
{ {
update_sections_early(ro_perms, ARRAY_SIZE(ro_perms)); update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
...@@ -735,11 +738,10 @@ void set_kernel_text_ro(void) ...@@ -735,11 +738,10 @@ void set_kernel_text_ro(void)
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true, set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
current->active_mm); current->active_mm);
} }
#endif /* CONFIG_DEBUG_RODATA */
#else #else
static inline void fix_kernmem_perms(void) { } static inline void fix_kernmem_perms(void) { }
#endif /* CONFIG_ARM_KERNMEM_PERMS */ #endif /* CONFIG_DEBUG_RODATA */
void free_tcmmem(void) void free_tcmmem(void)
{ {
......
...@@ -1253,7 +1253,7 @@ static inline void prepare_page_table(void) ...@@ -1253,7 +1253,7 @@ static inline void prepare_page_table(void)
#ifdef CONFIG_XIP_KERNEL #ifdef CONFIG_XIP_KERNEL
/* The XIP kernel is mapped in the module area -- skip over it */ /* The XIP kernel is mapped in the module area -- skip over it */
addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK; addr = ((unsigned long)_exiprom + PMD_SIZE - 1) & PMD_MASK;
#endif #endif
for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE) for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr)); pmd_clear(pmd_off_k(addr));
...@@ -1335,7 +1335,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) ...@@ -1335,7 +1335,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
#ifdef CONFIG_XIP_KERNEL #ifdef CONFIG_XIP_KERNEL
map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
map.virtual = MODULES_VADDR; map.virtual = MODULES_VADDR;
map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
map.type = MT_ROM; map.type = MT_ROM;
create_mapping(&map); create_mapping(&map);
#endif #endif
...@@ -1426,7 +1426,11 @@ static void __init kmap_init(void) ...@@ -1426,7 +1426,11 @@ static void __init kmap_init(void)
static void __init map_lowmem(void) static void __init map_lowmem(void)
{ {
struct memblock_region *reg; struct memblock_region *reg;
#ifdef CONFIG_XIP_KERNEL
phys_addr_t kernel_x_start = round_down(__pa(_sdata), SECTION_SIZE);
#else
phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
#endif
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
/* Map all the lowmem memory banks. */ /* Map all the lowmem memory banks. */
......
...@@ -487,7 +487,7 @@ __errata_finish: ...@@ -487,7 +487,7 @@ __errata_finish:
.align 2 .align 2
__v7_setup_stack_ptr: __v7_setup_stack_ptr:
.word __v7_setup_stack - . .word PHYS_RELATIVE(__v7_setup_stack, .)
ENDPROC(__v7_setup) ENDPROC(__v7_setup)
.bss .bss
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/sched_clock.h> #include <linux/sched_clock.h>
#include <plat/time.h> #include <plat/time.h>
#include <asm/delay.h>
/* /*
* MBus bridge block registers. * MBus bridge block registers.
...@@ -188,6 +189,15 @@ orion_time_set_base(void __iomem *_timer_base) ...@@ -188,6 +189,15 @@ orion_time_set_base(void __iomem *_timer_base)
timer_base = _timer_base; timer_base = _timer_base;
} }
static unsigned long orion_delay_timer_read(void)
{
return ~readl(timer_base + TIMER0_VAL_OFF);
}
static struct delay_timer orion_delay_timer = {
.read_current_timer = orion_delay_timer_read,
};
void __init void __init
orion_time_init(void __iomem *_bridge_base, u32 _bridge_timer1_clr_mask, orion_time_init(void __iomem *_bridge_base, u32 _bridge_timer1_clr_mask,
unsigned int irq, unsigned int tclk) unsigned int irq, unsigned int tclk)
...@@ -202,6 +212,9 @@ orion_time_init(void __iomem *_bridge_base, u32 _bridge_timer1_clr_mask, ...@@ -202,6 +212,9 @@ orion_time_init(void __iomem *_bridge_base, u32 _bridge_timer1_clr_mask,
ticks_per_jiffy = (tclk + HZ/2) / HZ; ticks_per_jiffy = (tclk + HZ/2) / HZ;
orion_delay_timer.freq = tclk;
register_current_timer_delay(&orion_delay_timer);
/* /*
* Set scale and timer for sched_clock. * Set scale and timer for sched_clock.
*/ */
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/psci.h> #include <linux/psci.h>
#include <linux/slab.h>
#include <uapi/linux/psci.h> #include <uapi/linux/psci.h>
...@@ -28,73 +27,6 @@ ...@@ -28,73 +27,6 @@
#include <asm/cpu_ops.h> #include <asm/cpu_ops.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/smp_plat.h> #include <asm/smp_plat.h>
#include <asm/suspend.h>
static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu)
{
int i, ret, count = 0;
u32 *psci_states;
struct device_node *state_node, *cpu_node;
cpu_node = of_get_cpu_node(cpu, NULL);
if (!cpu_node)
return -ENODEV;
/*
* If the PSCI cpu_suspend function hook has not been initialized
* idle states must not be enabled, so bail out
*/
if (!psci_ops.cpu_suspend)
return -EOPNOTSUPP;
/* Count idle states */
while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
count))) {
count++;
of_node_put(state_node);
}
if (!count)
return -ENODEV;
psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
if (!psci_states)
return -ENOMEM;
for (i = 0; i < count; i++) {
u32 state;
state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
ret = of_property_read_u32(state_node,
"arm,psci-suspend-param",
&state);
if (ret) {
pr_warn(" * %s missing arm,psci-suspend-param property\n",
state_node->full_name);
of_node_put(state_node);
goto free_mem;
}
of_node_put(state_node);
pr_debug("psci-power-state %#x index %d\n", state, i);
if (!psci_power_state_is_valid(state)) {
pr_warn("Invalid PSCI power state %#x\n", state);
ret = -EINVAL;
goto free_mem;
}
psci_states[i] = state;
}
/* Idle states parsed correctly, initialize per-cpu pointer */
per_cpu(psci_power_state, cpu) = psci_states;
return 0;
free_mem:
kfree(psci_states);
return ret;
}
static int __init cpu_psci_cpu_init(unsigned int cpu) static int __init cpu_psci_cpu_init(unsigned int cpu)
{ {
...@@ -178,38 +110,11 @@ static int cpu_psci_cpu_kill(unsigned int cpu) ...@@ -178,38 +110,11 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
} }
#endif #endif
static int psci_suspend_finisher(unsigned long index)
{
u32 *state = __this_cpu_read(psci_power_state);
return psci_ops.cpu_suspend(state[index - 1],
virt_to_phys(cpu_resume));
}
static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
{
int ret;
u32 *state = __this_cpu_read(psci_power_state);
/*
* idle state index 0 corresponds to wfi, should never be called
* from the cpu_suspend operations
*/
if (WARN_ON_ONCE(!index))
return -EINVAL;
if (!psci_power_state_loses_context(state[index - 1]))
ret = psci_ops.cpu_suspend(state[index - 1], 0);
else
ret = cpu_suspend(index, psci_suspend_finisher);
return ret;
}
const struct cpu_operations cpu_psci_ops = { const struct cpu_operations cpu_psci_ops = {
.name = "psci", .name = "psci",
#ifdef CONFIG_CPU_IDLE #ifdef CONFIG_CPU_IDLE
.cpu_init_idle = cpu_psci_cpu_init_idle, .cpu_init_idle = psci_cpu_init_idle,
.cpu_suspend = cpu_psci_cpu_suspend, .cpu_suspend = psci_cpu_suspend_enter,
#endif #endif
.cpu_init = cpu_psci_cpu_init, .cpu_init = cpu_psci_cpu_init,
.cpu_prepare = cpu_psci_cpu_prepare, .cpu_prepare = cpu_psci_cpu_prepare,
......
...@@ -560,6 +560,7 @@ static int __device_attach_driver(struct device_driver *drv, void *_data) ...@@ -560,6 +560,7 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
struct device_attach_data *data = _data; struct device_attach_data *data = _data;
struct device *dev = data->dev; struct device *dev = data->dev;
bool async_allowed; bool async_allowed;
int ret;
/* /*
* Check if device has already been claimed. This may * Check if device has already been claimed. This may
...@@ -570,8 +571,17 @@ static int __device_attach_driver(struct device_driver *drv, void *_data) ...@@ -570,8 +571,17 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
if (dev->driver) if (dev->driver)
return -EBUSY; return -EBUSY;
if (!driver_match_device(drv, dev)) ret = driver_match_device(drv, dev);
if (ret == 0) {
/* no match */
return 0; return 0;
} else if (ret == -EPROBE_DEFER) {
dev_dbg(dev, "Device match requests probe deferral\n");
driver_deferred_probe_add(dev);
} else if (ret < 0) {
dev_dbg(dev, "Bus failed to match device: %d", ret);
return ret;
} /* ret > 0 means positive match */
async_allowed = driver_allows_async_probing(drv); async_allowed = driver_allows_async_probing(drv);
...@@ -691,6 +701,7 @@ void device_initial_probe(struct device *dev) ...@@ -691,6 +701,7 @@ void device_initial_probe(struct device *dev)
static int __driver_attach(struct device *dev, void *data) static int __driver_attach(struct device *dev, void *data)
{ {
struct device_driver *drv = data; struct device_driver *drv = data;
int ret;
/* /*
* Lock device and try to bind to it. We drop the error * Lock device and try to bind to it. We drop the error
...@@ -702,8 +713,17 @@ static int __driver_attach(struct device *dev, void *data) ...@@ -702,8 +713,17 @@ static int __driver_attach(struct device *dev, void *data)
* is an error. * is an error.
*/ */
if (!driver_match_device(drv, dev)) ret = driver_match_device(drv, dev);
if (ret == 0) {
/* no match */
return 0; return 0;
} else if (ret == -EPROBE_DEFER) {
dev_dbg(dev, "Device match requests probe deferral\n");
driver_deferred_probe_add(dev);
} else if (ret < 0) {
dev_dbg(dev, "Bus failed to match device: %d", ret);
return ret;
} /* ret > 0 means positive match */
if (dev->parent) /* Needed for USB */ if (dev->parent) /* Needed for USB */
device_lock(dev->parent); device_lock(dev->parent);
......
...@@ -353,11 +353,25 @@ void clkdev_drop(struct clk_lookup *cl) ...@@ -353,11 +353,25 @@ void clkdev_drop(struct clk_lookup *cl)
} }
EXPORT_SYMBOL(clkdev_drop); EXPORT_SYMBOL(clkdev_drop);
static struct clk_lookup *__clk_register_clkdev(struct clk_hw *hw,
const char *con_id,
const char *dev_id, ...)
{
struct clk_lookup *cl;
va_list ap;
va_start(ap, dev_id);
cl = vclkdev_create(hw, con_id, dev_id, ap);
va_end(ap);
return cl;
}
/** /**
* clk_register_clkdev - register one clock lookup for a struct clk * clk_register_clkdev - register one clock lookup for a struct clk
* @clk: struct clk to associate with all clk_lookups * @clk: struct clk to associate with all clk_lookups
* @con_id: connection ID string on device * @con_id: connection ID string on device
* @dev_id: format string describing device name * @dev_id: string describing device name
* *
* con_id or dev_id may be NULL as a wildcard, just as in the rest of * con_id or dev_id may be NULL as a wildcard, just as in the rest of
* clkdev. * clkdev.
...@@ -368,17 +382,22 @@ EXPORT_SYMBOL(clkdev_drop); ...@@ -368,17 +382,22 @@ EXPORT_SYMBOL(clkdev_drop);
* after clk_register(). * after clk_register().
*/ */
int clk_register_clkdev(struct clk *clk, const char *con_id, int clk_register_clkdev(struct clk *clk, const char *con_id,
const char *dev_fmt, ...) const char *dev_id)
{ {
struct clk_lookup *cl; struct clk_lookup *cl;
va_list ap;
if (IS_ERR(clk)) if (IS_ERR(clk))
return PTR_ERR(clk); return PTR_ERR(clk);
va_start(ap, dev_fmt); /*
cl = vclkdev_create(__clk_get_hw(clk), con_id, dev_fmt, ap); * Since dev_id can be NULL, and NULL is handled specially, we must
va_end(ap); * pass it as either a NULL format string, or with "%s".
*/
if (dev_id)
cl = __clk_register_clkdev(__clk_get_hw(clk), con_id, "%s",
dev_id);
else
cl = __clk_register_clkdev(__clk_get_hw(clk), con_id, NULL);
return cl ? 0 : -ENOMEM; return cl ? 0 : -ENOMEM;
} }
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#define pr_fmt(fmt) "psci: " fmt #define pr_fmt(fmt) "psci: " fmt
#include <linux/arm-smccc.h> #include <linux/arm-smccc.h>
#include <linux/cpuidle.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/of.h> #include <linux/of.h>
...@@ -21,10 +22,12 @@ ...@@ -21,10 +22,12 @@
#include <linux/printk.h> #include <linux/printk.h>
#include <linux/psci.h> #include <linux/psci.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <uapi/linux/psci.h> #include <uapi/linux/psci.h>
#include <asm/cpuidle.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/system_misc.h> #include <asm/system_misc.h>
#include <asm/smp_plat.h> #include <asm/smp_plat.h>
...@@ -244,6 +247,123 @@ static int __init psci_features(u32 psci_func_id) ...@@ -244,6 +247,123 @@ static int __init psci_features(u32 psci_func_id)
psci_func_id, 0, 0); psci_func_id, 0, 0);
} }
#ifdef CONFIG_CPU_IDLE
static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
{
int i, ret, count = 0;
u32 *psci_states;
struct device_node *state_node;
/*
* If the PSCI cpu_suspend function hook has not been initialized
* idle states must not be enabled, so bail out
*/
if (!psci_ops.cpu_suspend)
return -EOPNOTSUPP;
/* Count idle states */
while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
count))) {
count++;
of_node_put(state_node);
}
if (!count)
return -ENODEV;
psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
if (!psci_states)
return -ENOMEM;
for (i = 0; i < count; i++) {
u32 state;
state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
ret = of_property_read_u32(state_node,
"arm,psci-suspend-param",
&state);
if (ret) {
pr_warn(" * %s missing arm,psci-suspend-param property\n",
state_node->full_name);
of_node_put(state_node);
goto free_mem;
}
of_node_put(state_node);
pr_debug("psci-power-state %#x index %d\n", state, i);
if (!psci_power_state_is_valid(state)) {
pr_warn("Invalid PSCI power state %#x\n", state);
ret = -EINVAL;
goto free_mem;
}
psci_states[i] = state;
}
/* Idle states parsed correctly, initialize per-cpu pointer */
per_cpu(psci_power_state, cpu) = psci_states;
return 0;
free_mem:
kfree(psci_states);
return ret;
}
int psci_cpu_init_idle(unsigned int cpu)
{
struct device_node *cpu_node;
int ret;
cpu_node = of_get_cpu_node(cpu, NULL);
if (!cpu_node)
return -ENODEV;
ret = psci_dt_cpu_init_idle(cpu_node, cpu);
of_node_put(cpu_node);
return ret;
}
static int psci_suspend_finisher(unsigned long index)
{
u32 *state = __this_cpu_read(psci_power_state);
return psci_ops.cpu_suspend(state[index - 1],
virt_to_phys(cpu_resume));
}
int psci_cpu_suspend_enter(unsigned long index)
{
int ret;
u32 *state = __this_cpu_read(psci_power_state);
/*
* idle state index 0 corresponds to wfi, should never be called
* from the cpu_suspend operations
*/
if (WARN_ON_ONCE(!index))
return -EINVAL;
if (!psci_power_state_loses_context(state[index - 1]))
ret = psci_ops.cpu_suspend(state[index - 1], 0);
else
ret = cpu_suspend(index, psci_suspend_finisher);
return ret;
}
/* ARM specific CPU idle operations */
#ifdef CONFIG_ARM
static struct cpuidle_ops psci_cpuidle_ops __initdata = {
.suspend = psci_cpu_suspend_enter,
.init = psci_dt_cpu_init_idle,
};
CPUIDLE_METHOD_OF_DECLARE(psci, "arm,psci", &psci_cpuidle_ops);
#endif
#endif
static int psci_system_suspend(unsigned long unused) static int psci_system_suspend(unsigned long unused)
{ {
return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND), return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
......
...@@ -23,13 +23,16 @@ ...@@ -23,13 +23,16 @@
struct vb2_dc_conf { struct vb2_dc_conf {
struct device *dev; struct device *dev;
struct dma_attrs attrs;
}; };
struct vb2_dc_buf { struct vb2_dc_buf {
struct device *dev; struct device *dev;
void *vaddr; void *vaddr;
unsigned long size; unsigned long size;
void *cookie;
dma_addr_t dma_addr; dma_addr_t dma_addr;
struct dma_attrs attrs;
enum dma_data_direction dma_dir; enum dma_data_direction dma_dir;
struct sg_table *dma_sgt; struct sg_table *dma_sgt;
struct frame_vector *vec; struct frame_vector *vec;
...@@ -131,7 +134,8 @@ static void vb2_dc_put(void *buf_priv) ...@@ -131,7 +134,8 @@ static void vb2_dc_put(void *buf_priv)
sg_free_table(buf->sgt_base); sg_free_table(buf->sgt_base);
kfree(buf->sgt_base); kfree(buf->sgt_base);
} }
dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr); dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
&buf->attrs);
put_device(buf->dev); put_device(buf->dev);
kfree(buf); kfree(buf);
} }
...@@ -147,14 +151,18 @@ static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size, ...@@ -147,14 +151,18 @@ static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size,
if (!buf) if (!buf)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, buf->attrs = conf->attrs;
GFP_KERNEL | gfp_flags); buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
if (!buf->vaddr) { GFP_KERNEL | gfp_flags, &buf->attrs);
if (!buf->cookie) {
dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size); dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
kfree(buf); kfree(buf);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->attrs))
buf->vaddr = buf->cookie;
/* Prevent the device from being released while the buffer is used */ /* Prevent the device from being released while the buffer is used */
buf->dev = get_device(dev); buf->dev = get_device(dev);
buf->size = size; buf->size = size;
...@@ -185,8 +193,8 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma) ...@@ -185,8 +193,8 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
*/ */
vma->vm_pgoff = 0; vma->vm_pgoff = 0;
ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr, ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
buf->dma_addr, buf->size); buf->dma_addr, buf->size, &buf->attrs);
if (ret) { if (ret) {
pr_err("Remapping memory failed, error: %d\n", ret); pr_err("Remapping memory failed, error: %d\n", ret);
...@@ -329,7 +337,7 @@ static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum) ...@@ -329,7 +337,7 @@ static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
{ {
struct vb2_dc_buf *buf = dbuf->priv; struct vb2_dc_buf *buf = dbuf->priv;
return buf->vaddr + pgnum * PAGE_SIZE; return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
} }
static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf) static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
...@@ -368,8 +376,8 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf) ...@@ -368,8 +376,8 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
return NULL; return NULL;
} }
ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr, ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
buf->size); buf->size, &buf->attrs);
if (ret < 0) { if (ret < 0) {
dev_err(buf->dev, "failed to get scatterlist from DMA API\n"); dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
kfree(sgt); kfree(sgt);
...@@ -721,7 +729,8 @@ const struct vb2_mem_ops vb2_dma_contig_memops = { ...@@ -721,7 +729,8 @@ const struct vb2_mem_ops vb2_dma_contig_memops = {
}; };
EXPORT_SYMBOL_GPL(vb2_dma_contig_memops); EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
void *vb2_dma_contig_init_ctx(struct device *dev) void *vb2_dma_contig_init_ctx_attrs(struct device *dev,
struct dma_attrs *attrs)
{ {
struct vb2_dc_conf *conf; struct vb2_dc_conf *conf;
...@@ -730,10 +739,12 @@ void *vb2_dma_contig_init_ctx(struct device *dev) ...@@ -730,10 +739,12 @@ void *vb2_dma_contig_init_ctx(struct device *dev)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
conf->dev = dev; conf->dev = dev;
if (attrs)
conf->attrs = *attrs;
return conf; return conf;
} }
EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx); EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx_attrs);
void vb2_dma_contig_cleanup_ctx(void *alloc_ctx) void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
{ {
......
...@@ -62,7 +62,7 @@ static int nvdimm_bus_match(struct device *dev, struct device_driver *drv) ...@@ -62,7 +62,7 @@ static int nvdimm_bus_match(struct device *dev, struct device_driver *drv)
{ {
struct nd_device_driver *nd_drv = to_nd_device_driver(drv); struct nd_device_driver *nd_drv = to_nd_device_driver(drv);
return test_bit(to_nd_device_type(dev), &nd_drv->type); return !!test_bit(to_nd_device_type(dev), &nd_drv->type);
} }
static struct module *to_bus_provider(struct device *dev) static struct module *to_bus_provider(struct device *dev)
......
...@@ -44,8 +44,7 @@ struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id, ...@@ -44,8 +44,7 @@ struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id,
void clkdev_add_table(struct clk_lookup *, size_t); void clkdev_add_table(struct clk_lookup *, size_t);
int clk_add_alias(const char *, const char *, const char *, struct device *); int clk_add_alias(const char *, const char *, const char *, struct device *);
int clk_register_clkdev(struct clk *, const char *, const char *, ...) int clk_register_clkdev(struct clk *, const char *, const char *);
__printf(3, 4);
int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t); int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t);
#ifdef CONFIG_COMMON_CLK #ifdef CONFIG_COMMON_CLK
......
...@@ -70,8 +70,11 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *); ...@@ -70,8 +70,11 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
* @dev_groups: Default attributes of the devices on the bus. * @dev_groups: Default attributes of the devices on the bus.
* @drv_groups: Default attributes of the device drivers on the bus. * @drv_groups: Default attributes of the device drivers on the bus.
* @match: Called, perhaps multiple times, whenever a new device or driver * @match: Called, perhaps multiple times, whenever a new device or driver
* is added for this bus. It should return a nonzero value if the * is added for this bus. It should return a positive value if the
* given device can be handled by the given driver. * given device can be handled by the given driver and zero
* otherwise. It may also return error code if determining that
* the driver supports the device is not possible. In case of
* -EPROBE_DEFER it will queue the device for deferred probing.
* @uevent: Called when a device is added, removed, or a few other things * @uevent: Called when a device is added, removed, or a few other things
* that generate uevents to add the environment variables. * that generate uevents to add the environment variables.
* @probe: Called when a new device or driver add to this bus, and callback * @probe: Called when a new device or driver add to this bus, and callback
......
...@@ -18,6 +18,7 @@ enum dma_attr { ...@@ -18,6 +18,7 @@ enum dma_attr {
DMA_ATTR_NO_KERNEL_MAPPING, DMA_ATTR_NO_KERNEL_MAPPING,
DMA_ATTR_SKIP_CPU_SYNC, DMA_ATTR_SKIP_CPU_SYNC,
DMA_ATTR_FORCE_CONTIGUOUS, DMA_ATTR_FORCE_CONTIGUOUS,
DMA_ATTR_ALLOC_SINGLE_PAGES,
DMA_ATTR_MAX, DMA_ATTR_MAX,
}; };
......
...@@ -24,6 +24,9 @@ bool psci_tos_resident_on(int cpu); ...@@ -24,6 +24,9 @@ bool psci_tos_resident_on(int cpu);
bool psci_power_state_loses_context(u32 state); bool psci_power_state_loses_context(u32 state);
bool psci_power_state_is_valid(u32 state); bool psci_power_state_is_valid(u32 state);
int psci_cpu_init_idle(unsigned int cpu);
int psci_cpu_suspend_enter(unsigned long index);
struct psci_operations { struct psci_operations {
int (*cpu_suspend)(u32 state, unsigned long entry_point); int (*cpu_suspend)(u32 state, unsigned long entry_point);
int (*cpu_off)(u32 state); int (*cpu_off)(u32 state);
......
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
#include <media/videobuf2-v4l2.h> #include <media/videobuf2-v4l2.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
struct dma_attrs;
static inline dma_addr_t static inline dma_addr_t
vb2_dma_contig_plane_dma_addr(struct vb2_buffer *vb, unsigned int plane_no) vb2_dma_contig_plane_dma_addr(struct vb2_buffer *vb, unsigned int plane_no)
{ {
...@@ -24,7 +26,14 @@ vb2_dma_contig_plane_dma_addr(struct vb2_buffer *vb, unsigned int plane_no) ...@@ -24,7 +26,14 @@ vb2_dma_contig_plane_dma_addr(struct vb2_buffer *vb, unsigned int plane_no)
return *addr; return *addr;
} }
void *vb2_dma_contig_init_ctx(struct device *dev); void *vb2_dma_contig_init_ctx_attrs(struct device *dev,
struct dma_attrs *attrs);
static inline void *vb2_dma_contig_init_ctx(struct device *dev)
{
return vb2_dma_contig_init_ctx_attrs(dev, NULL);
}
void vb2_dma_contig_cleanup_ctx(void *alloc_ctx); void vb2_dma_contig_cleanup_ctx(void *alloc_ctx);
extern const struct vb2_mem_ops vb2_dma_contig_memops; extern const struct vb2_mem_ops vb2_dma_contig_memops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment