Commit 0cbaed1d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:
 "Three small fixes from over the Christmas period, and wiring up the
  new execveat syscall for ARM"

* 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
  ARM: 8275/1: mm: fix PMD_SECT_RDONLY undeclared compile error
  ARM: 8253/1: mm: use phys_addr_t type in map_lowmem() for kernel mem region
  ARM: 8249/1: mm: dump: don't skip regions
  ARM: wire up execveat syscall
parents 505569d2 1e347922
...@@ -413,6 +413,7 @@ ...@@ -413,6 +413,7 @@
#define __NR_getrandom (__NR_SYSCALL_BASE+384) #define __NR_getrandom (__NR_SYSCALL_BASE+384)
#define __NR_memfd_create (__NR_SYSCALL_BASE+385) #define __NR_memfd_create (__NR_SYSCALL_BASE+385)
#define __NR_bpf (__NR_SYSCALL_BASE+386) #define __NR_bpf (__NR_SYSCALL_BASE+386)
#define __NR_execveat (__NR_SYSCALL_BASE+387)
/* /*
* The following SWIs are ARM private. * The following SWIs are ARM private.
......
...@@ -396,6 +396,7 @@ ...@@ -396,6 +396,7 @@
CALL(sys_getrandom) CALL(sys_getrandom)
/* 385 */ CALL(sys_memfd_create) /* 385 */ CALL(sys_memfd_create)
CALL(sys_bpf) CALL(sys_bpf)
CALL(sys_execveat)
#ifndef syscalls_counted #ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted #define syscalls_counted
......
...@@ -220,9 +220,6 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u ...@@ -220,9 +220,6 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u
static const char units[] = "KMGTPE"; static const char units[] = "KMGTPE";
u64 prot = val & pg_level[level].mask; u64 prot = val & pg_level[level].mask;
if (addr < USER_PGTABLES_CEILING)
return;
if (!st->level) { if (!st->level) {
st->level = level; st->level = level;
st->current_prot = prot; st->current_prot = prot;
...@@ -308,15 +305,13 @@ static void walk_pgd(struct seq_file *m) ...@@ -308,15 +305,13 @@ static void walk_pgd(struct seq_file *m)
pgd_t *pgd = swapper_pg_dir; pgd_t *pgd = swapper_pg_dir;
struct pg_state st; struct pg_state st;
unsigned long addr; unsigned long addr;
unsigned i, pgdoff = USER_PGTABLES_CEILING / PGDIR_SIZE; unsigned i;
memset(&st, 0, sizeof(st)); memset(&st, 0, sizeof(st));
st.seq = m; st.seq = m;
st.marker = address_markers; st.marker = address_markers;
pgd += pgdoff; for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
for (i = pgdoff; i < PTRS_PER_PGD; i++, pgd++) {
addr = i * PGDIR_SIZE; addr = i * PGDIR_SIZE;
if (!pgd_none(*pgd)) { if (!pgd_none(*pgd)) {
walk_pud(&st, pgd, addr); walk_pud(&st, pgd, addr);
......
...@@ -658,8 +658,8 @@ static struct section_perm ro_perms[] = { ...@@ -658,8 +658,8 @@ static struct section_perm ro_perms[] = {
.start = (unsigned long)_stext, .start = (unsigned long)_stext,
.end = (unsigned long)__init_begin, .end = (unsigned long)__init_begin,
#ifdef CONFIG_ARM_LPAE #ifdef CONFIG_ARM_LPAE
.mask = ~PMD_SECT_RDONLY, .mask = ~L_PMD_SECT_RDONLY,
.prot = PMD_SECT_RDONLY, .prot = L_PMD_SECT_RDONLY,
#else #else
.mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
.prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
......
...@@ -1329,8 +1329,8 @@ static void __init kmap_init(void) ...@@ -1329,8 +1329,8 @@ static void __init kmap_init(void)
static void __init map_lowmem(void) static void __init map_lowmem(void)
{ {
struct memblock_region *reg; struct memblock_region *reg;
unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
/* Map all the lowmem memory banks. */ /* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) { for_each_memblock(memory, reg) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment