Commit 9710cb66 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm-4.8-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull power management fixes from Rafael Wysocki:
 "Two hibernation fixes allowing it to work with the recently added
  randomization of the kernel identity mapping base on x86-64 and one
  cpufreq driver regression fix.

  Specifics:

   - Fix the x86 identity mapping creation helpers to avoid the
     assumption that the base address of the mapping will always be
     aligned at the PGD level, as it may be aligned at the PUD level if
     address space randomization is enabled (Rafael Wysocki).

   - Fix the hibernation core to avoid executing tracing functions
     before restoring the processor state completely during resume
     (Thomas Garnier).

   - Fix a recently introduced regression in the powernv cpufreq driver
     that causes it to crash due to an out-of-bounds array access
     (Akshay Adiga)"

* tag 'pm-4.8-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  PM / hibernate: Restore processor state before using per-CPU variables
  x86/power/64: Always create temporary identity mapping correctly
  cpufreq: powernv: Fix crash in gpstate_timer_handler()
parents 01ea4439 0aeeb3e7
...@@ -5,10 +5,10 @@ struct x86_mapping_info { ...@@ -5,10 +5,10 @@ struct x86_mapping_info {
void *(*alloc_pgt_page)(void *); /* allocate buf for page table */ void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
void *context; /* context for alloc_pgt_page */ void *context; /* context for alloc_pgt_page */
unsigned long pmd_flag; /* page flag for PMD entry */ unsigned long pmd_flag; /* page flag for PMD entry */
bool kernel_mapping; /* kernel mapping or ident mapping */ unsigned long offset; /* ident mapping offset */
}; };
int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
unsigned long addr, unsigned long end); unsigned long pstart, unsigned long pend);
#endif /* _ASM_X86_INIT_H */ #endif /* _ASM_X86_INIT_H */
...@@ -3,15 +3,17 @@ ...@@ -3,15 +3,17 @@
* included by both the compressed kernel and the regular kernel. * included by both the compressed kernel and the regular kernel.
*/ */
static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page, static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
unsigned long addr, unsigned long end) unsigned long addr, unsigned long end)
{ {
addr &= PMD_MASK; addr &= PMD_MASK;
for (; addr < end; addr += PMD_SIZE) { for (; addr < end; addr += PMD_SIZE) {
pmd_t *pmd = pmd_page + pmd_index(addr); pmd_t *pmd = pmd_page + pmd_index(addr);
if (!pmd_present(*pmd)) if (pmd_present(*pmd))
set_pmd(pmd, __pmd(addr | pmd_flag)); continue;
set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag));
} }
} }
...@@ -30,13 +32,13 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page, ...@@ -30,13 +32,13 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
if (pud_present(*pud)) { if (pud_present(*pud)) {
pmd = pmd_offset(pud, 0); pmd = pmd_offset(pud, 0);
ident_pmd_init(info->pmd_flag, pmd, addr, next); ident_pmd_init(info, pmd, addr, next);
continue; continue;
} }
pmd = (pmd_t *)info->alloc_pgt_page(info->context); pmd = (pmd_t *)info->alloc_pgt_page(info->context);
if (!pmd) if (!pmd)
return -ENOMEM; return -ENOMEM;
ident_pmd_init(info->pmd_flag, pmd, addr, next); ident_pmd_init(info, pmd, addr, next);
set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
} }
...@@ -44,14 +46,15 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page, ...@@ -44,14 +46,15 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
} }
int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
unsigned long addr, unsigned long end) unsigned long pstart, unsigned long pend)
{ {
unsigned long addr = pstart + info->offset;
unsigned long end = pend + info->offset;
unsigned long next; unsigned long next;
int result; int result;
int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
for (; addr < end; addr = next) { for (; addr < end; addr = next) {
pgd_t *pgd = pgd_page + pgd_index(addr) + off; pgd_t *pgd = pgd_page + pgd_index(addr);
pud_t *pud; pud_t *pud;
next = (addr & PGDIR_MASK) + PGDIR_SIZE; next = (addr & PGDIR_MASK) + PGDIR_SIZE;
......
...@@ -87,7 +87,7 @@ static int set_up_temporary_mappings(void) ...@@ -87,7 +87,7 @@ static int set_up_temporary_mappings(void)
struct x86_mapping_info info = { struct x86_mapping_info info = {
.alloc_pgt_page = alloc_pgt_page, .alloc_pgt_page = alloc_pgt_page,
.pmd_flag = __PAGE_KERNEL_LARGE_EXEC, .pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
.kernel_mapping = true, .offset = __PAGE_OFFSET,
}; };
unsigned long mstart, mend; unsigned long mstart, mend;
pgd_t *pgd; pgd_t *pgd;
......
...@@ -145,11 +145,30 @@ static struct powernv_pstate_info { ...@@ -145,11 +145,30 @@ static struct powernv_pstate_info {
/* Use following macros for conversions between pstate_id and index */ /* Use following macros for conversions between pstate_id and index */
static inline int idx_to_pstate(unsigned int i) static inline int idx_to_pstate(unsigned int i)
{ {
if (unlikely(i >= powernv_pstate_info.nr_pstates)) {
pr_warn_once("index %u is out of bound\n", i);
return powernv_freqs[powernv_pstate_info.nominal].driver_data;
}
return powernv_freqs[i].driver_data; return powernv_freqs[i].driver_data;
} }
static inline unsigned int pstate_to_idx(int pstate) static inline unsigned int pstate_to_idx(int pstate)
{ {
int min = powernv_freqs[powernv_pstate_info.min].driver_data;
int max = powernv_freqs[powernv_pstate_info.max].driver_data;
if (min > 0) {
if (unlikely((pstate < max) || (pstate > min))) {
pr_warn_once("pstate %d is out of bound\n", pstate);
return powernv_pstate_info.nominal;
}
} else {
if (unlikely((pstate > max) || (pstate < min))) {
pr_warn_once("pstate %d is out of bound\n", pstate);
return powernv_pstate_info.nominal;
}
}
/* /*
* abs() is deliberately used so that is works with * abs() is deliberately used so that is works with
* both monotonically increasing and decreasing * both monotonically increasing and decreasing
...@@ -593,7 +612,7 @@ void gpstate_timer_handler(unsigned long data) ...@@ -593,7 +612,7 @@ void gpstate_timer_handler(unsigned long data)
} else { } else {
gpstate_idx = calc_global_pstate(gpstates->elapsed_time, gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
gpstates->highest_lpstate_idx, gpstates->highest_lpstate_idx,
freq_data.pstate_id); gpstates->last_lpstate_idx);
} }
/* /*
......
...@@ -300,12 +300,12 @@ static int create_image(int platform_mode) ...@@ -300,12 +300,12 @@ static int create_image(int platform_mode)
save_processor_state(); save_processor_state();
trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true); trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true);
error = swsusp_arch_suspend(); error = swsusp_arch_suspend();
/* Restore control flow magically appears here */
restore_processor_state();
trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false); trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false);
if (error) if (error)
printk(KERN_ERR "PM: Error %d creating hibernation image\n", printk(KERN_ERR "PM: Error %d creating hibernation image\n",
error); error);
/* Restore control flow magically appears here */
restore_processor_state();
if (!in_suspend) if (!in_suspend)
events_check_enabled = false; events_check_enabled = false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment