Commit 77f63b4d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

Pull powerpc fixes from Ben Herrenschmidt:
 "This includes small series from Michael Neuling to fix a couple of
  nasty remaining problems with the new Power8 support, also targeted at
  stable 3.10, without which some new userspace accessible registers
  aren't properly context switched, and in some case, can be clobbered
  by the user of transactional memory.

  Along with that, a few slightly more minor things, such as a missing
  Kconfig option to enable handling of denorm exceptions when not
  running under a hypervisor (or userspace will randomly crash when
  hitting denorms with the vector unit), some nasty bugs in the new
  pstore oops code, and other simple bug fixes worth having in now.

  Note: I picked up the two powerpc KVM fixes as Alex Graf asked me to
  handle KVM bits while he is on vacation.  However I'll let him decide
  whether they should go to -stable or not when he is back"

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
  powerpc/tm: Fix context switching TAR, PPR and DSCR SPRs
  powerpc: Save the TAR register earlier
  powerpc: Fix context switch DSCR on POWER8
  powerpc: Rework setting up H/FSCR bit definitions
  powerpc: Fix hypervisor facility unavaliable vector number
  powerpc/kvm/book3s_pr: Return appropriate error when allocation fails
  powerpc/kvm: Add signed type cast for comparation
  powerpc/eeh: Add missing procfs entry for PowerNV
  powerpc/pseries: Add backward compatibilty to read old kernel oops-log
  powerpc/pseries: Fix buffer overflow when reading from pstore
  powerpc: On POWERNV enable PPC_DENORMALISATION by default
parents 30b229bd 28e61cc4
...@@ -566,7 +566,7 @@ config SCHED_SMT ...@@ -566,7 +566,7 @@ config SCHED_SMT
config PPC_DENORMALISATION config PPC_DENORMALISATION
bool "PowerPC denormalisation exception handling" bool "PowerPC denormalisation exception handling"
depends on PPC_BOOK3S_64 depends on PPC_BOOK3S_64
default "n" default "y" if PPC_POWERNV
---help--- ---help---
Add support for handling denormalisation of single precision Add support for handling denormalisation of single precision
values. Useful for bare metal only. If unsure say Y here. values. Useful for bare metal only. If unsure say Y here.
......
...@@ -247,6 +247,10 @@ struct thread_struct { ...@@ -247,6 +247,10 @@ struct thread_struct {
unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */ unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */
struct pt_regs ckpt_regs; /* Checkpointed registers */ struct pt_regs ckpt_regs; /* Checkpointed registers */
unsigned long tm_tar;
unsigned long tm_ppr;
unsigned long tm_dscr;
/* /*
* Transactional FP and VSX 0-31 register set. * Transactional FP and VSX 0-31 register set.
* NOTE: the sense of these is the opposite of the integer ckpt_regs! * NOTE: the sense of these is the opposite of the integer ckpt_regs!
......
...@@ -254,19 +254,28 @@ ...@@ -254,19 +254,28 @@
#define SPRN_HRMOR 0x139 /* Real mode offset register */ #define SPRN_HRMOR 0x139 /* Real mode offset register */
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ #define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ #define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
/* HFSCR and FSCR bit numbers are the same */
#define FSCR_TAR_LG 8 /* Enable Target Address Register */
#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
#define FSCR_TM_LG 5 /* Enable Transactional Memory */
#define FSCR_PM_LG 4 /* Enable prob/priv access to PMU SPRs */
#define FSCR_BHRB_LG 3 /* Enable Branch History Rolling Buffer*/
#define FSCR_DSCR_LG 2 /* Enable Data Stream Control Register */
#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
#define FSCR_FP_LG 0 /* Enable Floating Point */
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */ #define SPRN_FSCR 0x099 /* Facility Status & Control Register */
#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */ #define FSCR_TAR __MASK(FSCR_TAR_LG)
#define FSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */ #define FSCR_EBB __MASK(FSCR_EBB_LG)
#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */ #define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */ #define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
#define HFSCR_TAR (1 << (63-55)) /* Enable Target Address Register */ #define HFSCR_TAR __MASK(FSCR_TAR_LG)
#define HFSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */ #define HFSCR_EBB __MASK(FSCR_EBB_LG)
#define HFSCR_TM (1 << (63-58)) /* Enable Transactional Memory */ #define HFSCR_TM __MASK(FSCR_TM_LG)
#define HFSCR_PM (1 << (63-60)) /* Enable prob/priv access to PMU SPRs */ #define HFSCR_PM __MASK(FSCR_PM_LG)
#define HFSCR_BHRB (1 << (63-59)) /* Enable Branch History Rolling Buffer*/ #define HFSCR_BHRB __MASK(FSCR_BHRB_LG)
#define HFSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */ #define HFSCR_DSCR __MASK(FSCR_DSCR_LG)
#define HFSCR_VECVSX (1 << (63-62)) /* Enable VMX/VSX */ #define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
#define HFSCR_FP (1 << (63-63)) /* Enable Floating Point */ #define HFSCR_FP __MASK(FSCR_FP_LG)
#define SPRN_TAR 0x32f /* Target Address Register */ #define SPRN_TAR 0x32f /* Target Address Register */
#define SPRN_LPCR 0x13E /* LPAR Control Register */ #define SPRN_LPCR 0x13E /* LPAR Control Register */
#define LPCR_VPM0 (1ul << (63-0)) #define LPCR_VPM0 (1ul << (63-0))
......
...@@ -15,6 +15,15 @@ extern struct task_struct *__switch_to(struct task_struct *, ...@@ -15,6 +15,15 @@ extern struct task_struct *__switch_to(struct task_struct *,
struct thread_struct; struct thread_struct;
extern struct task_struct *_switch(struct thread_struct *prev, extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next); struct thread_struct *next);
#ifdef CONFIG_PPC_BOOK3S_64
static inline void save_tar(struct thread_struct *prev)
{
if (cpu_has_feature(CPU_FTR_ARCH_207S))
prev->tar = mfspr(SPRN_TAR);
}
#else
static inline void save_tar(struct thread_struct *prev) {}
#endif
extern void giveup_fpu(struct task_struct *); extern void giveup_fpu(struct task_struct *);
extern void load_up_fpu(void); extern void load_up_fpu(void);
......
...@@ -138,6 +138,9 @@ int main(void) ...@@ -138,6 +138,9 @@ int main(void)
DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar)); DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar));
DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr)); DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr));
DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar)); DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar));
DEFINE(THREAD_TM_TAR, offsetof(struct thread_struct, tm_tar));
DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr));
DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr));
DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs)); DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs));
DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct, DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct,
transact_vr[0])); transact_vr[0]));
......
...@@ -1061,7 +1061,7 @@ static const struct file_operations proc_eeh_operations = { ...@@ -1061,7 +1061,7 @@ static const struct file_operations proc_eeh_operations = {
static int __init eeh_init_proc(void) static int __init eeh_init_proc(void)
{ {
if (machine_is(pseries)) if (machine_is(pseries) || machine_is(powernv))
proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations); proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations);
return 0; return 0;
} }
......
...@@ -449,15 +449,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR) ...@@ -449,15 +449,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
/*
* Back up the TAR across context switches. Note that the TAR is not
* available for use in the kernel. (To provide this, the TAR should
* be backed up/restored on exception entry/exit instead, and be in
* pt_regs. FIXME, this should be in pt_regs anyway (for debug).)
*/
mfspr r0,SPRN_TAR
std r0,THREAD_TAR(r3)
/* Event based branch registers */ /* Event based branch registers */
mfspr r0, SPRN_BESCR mfspr r0, SPRN_BESCR
std r0, THREAD_BESCR(r3) std r0, THREAD_BESCR(r3)
...@@ -584,9 +575,34 @@ BEGIN_FTR_SECTION ...@@ -584,9 +575,34 @@ BEGIN_FTR_SECTION
ld r7,DSCR_DEFAULT@toc(2) ld r7,DSCR_DEFAULT@toc(2)
ld r0,THREAD_DSCR(r4) ld r0,THREAD_DSCR(r4)
cmpwi r6,0 cmpwi r6,0
li r8, FSCR_DSCR
bne 1f bne 1f
ld r0,0(r7) ld r0,0(r7)
1: cmpd r0,r25 b 3f
1:
BEGIN_FTR_SECTION_NESTED(70)
mfspr r6, SPRN_FSCR
or r6, r6, r8
mtspr SPRN_FSCR, r6
BEGIN_FTR_SECTION_NESTED(69)
mfspr r6, SPRN_HFSCR
or r6, r6, r8
mtspr SPRN_HFSCR, r6
END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
b 4f
END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
3:
BEGIN_FTR_SECTION_NESTED(70)
mfspr r6, SPRN_FSCR
andc r6, r6, r8
mtspr SPRN_FSCR, r6
BEGIN_FTR_SECTION_NESTED(69)
mfspr r6, SPRN_HFSCR
andc r6, r6, r8
mtspr SPRN_HFSCR, r6
END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
4: cmpd r0,r25
beq 2f beq 2f
mtspr SPRN_DSCR,r0 mtspr SPRN_DSCR,r0
2: 2:
......
...@@ -848,7 +848,7 @@ hv_facility_unavailable_relon_trampoline: ...@@ -848,7 +848,7 @@ hv_facility_unavailable_relon_trampoline:
. = 0x4f80 . = 0x4f80
SET_SCRATCH0(r13) SET_SCRATCH0(r13)
EXCEPTION_PROLOG_0(PACA_EXGEN) EXCEPTION_PROLOG_0(PACA_EXGEN)
b facility_unavailable_relon_hv b hv_facility_unavailable_relon_hv
STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint) STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
#ifdef CONFIG_PPC_DENORMALISATION #ifdef CONFIG_PPC_DENORMALISATION
...@@ -1175,6 +1175,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) ...@@ -1175,6 +1175,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
b .ret_from_except b .ret_from_except
STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception)
STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception)
.align 7 .align 7
.globl __end_handlers .globl __end_handlers
...@@ -1188,7 +1189,7 @@ __end_handlers: ...@@ -1188,7 +1189,7 @@ __end_handlers:
STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
STD_RELON_EXCEPTION_HV_OOL(0xf80, facility_unavailable) STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
/* /*
......
...@@ -600,6 +600,16 @@ struct task_struct *__switch_to(struct task_struct *prev, ...@@ -600,6 +600,16 @@ struct task_struct *__switch_to(struct task_struct *prev,
struct ppc64_tlb_batch *batch; struct ppc64_tlb_batch *batch;
#endif #endif
/* Back up the TAR across context switches.
* Note that the TAR is not available for use in the kernel. (To
* provide this, the TAR should be backed up/restored on exception
* entry/exit instead, and be in pt_regs. FIXME, this should be in
* pt_regs anyway (for debug).)
* Save the TAR here before we do treclaim/trecheckpoint as these
* will change the TAR.
*/
save_tar(&prev->thread);
__switch_to_tm(prev); __switch_to_tm(prev);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -233,6 +233,16 @@ dont_backup_fp: ...@@ -233,6 +233,16 @@ dont_backup_fp:
std r5, _CCR(r7) std r5, _CCR(r7)
std r6, _XER(r7) std r6, _XER(r7)
/* ******************** TAR, PPR, DSCR ********** */
mfspr r3, SPRN_TAR
mfspr r4, SPRN_PPR
mfspr r5, SPRN_DSCR
std r3, THREAD_TM_TAR(r12)
std r4, THREAD_TM_PPR(r12)
std r5, THREAD_TM_DSCR(r12)
/* MSR and flags: We don't change CRs, and we don't need to alter /* MSR and flags: We don't change CRs, and we don't need to alter
* MSR. * MSR.
*/ */
...@@ -347,6 +357,16 @@ dont_restore_fp: ...@@ -347,6 +357,16 @@ dont_restore_fp:
mtmsr r6 /* FP/Vec off again! */ mtmsr r6 /* FP/Vec off again! */
restore_gprs: restore_gprs:
/* ******************** TAR, PPR, DSCR ********** */
ld r4, THREAD_TM_TAR(r3)
ld r5, THREAD_TM_PPR(r3)
ld r6, THREAD_TM_DSCR(r3)
mtspr SPRN_TAR, r4
mtspr SPRN_PPR, r5
mtspr SPRN_DSCR, r6
/* ******************** CR,LR,CCR,MSR ********** */ /* ******************** CR,LR,CCR,MSR ********** */
ld r3, _CTR(r7) ld r3, _CTR(r7)
ld r4, _LINK(r7) ld r4, _LINK(r7)
......
...@@ -44,9 +44,7 @@ ...@@ -44,9 +44,7 @@
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/rtas.h> #include <asm/rtas.h>
#include <asm/pmc.h> #include <asm/pmc.h>
#ifdef CONFIG_PPC32
#include <asm/reg.h> #include <asm/reg.h>
#endif
#ifdef CONFIG_PMAC_BACKLIGHT #ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/backlight.h> #include <asm/backlight.h>
#endif #endif
...@@ -1296,43 +1294,54 @@ void vsx_unavailable_exception(struct pt_regs *regs) ...@@ -1296,43 +1294,54 @@ void vsx_unavailable_exception(struct pt_regs *regs)
die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
} }
#ifdef CONFIG_PPC64
void facility_unavailable_exception(struct pt_regs *regs) void facility_unavailable_exception(struct pt_regs *regs)
{ {
static char *facility_strings[] = { static char *facility_strings[] = {
"FPU", [FSCR_FP_LG] = "FPU",
"VMX/VSX", [FSCR_VECVSX_LG] = "VMX/VSX",
"DSCR", [FSCR_DSCR_LG] = "DSCR",
"PMU SPRs", [FSCR_PM_LG] = "PMU SPRs",
"BHRB", [FSCR_BHRB_LG] = "BHRB",
"TM", [FSCR_TM_LG] = "TM",
"AT", [FSCR_EBB_LG] = "EBB",
"EBB", [FSCR_TAR_LG] = "TAR",
"TAR",
}; };
char *facility, *prefix; char *facility = "unknown";
u64 value; u64 value;
u8 status;
bool hv;
if (regs->trap == 0xf60) { hv = (regs->trap == 0xf80);
value = mfspr(SPRN_FSCR); if (hv)
prefix = "";
} else {
value = mfspr(SPRN_HFSCR); value = mfspr(SPRN_HFSCR);
prefix = "Hypervisor "; else
value = mfspr(SPRN_FSCR);
status = value >> 56;
if (status == FSCR_DSCR_LG) {
/* User is acessing the DSCR. Set the inherit bit and allow
* the user to set it directly in future by setting via the
* H/FSCR DSCR bit.
*/
current->thread.dscr_inherit = 1;
if (hv)
mtspr(SPRN_HFSCR, value | HFSCR_DSCR);
else
mtspr(SPRN_FSCR, value | FSCR_DSCR);
return;
} }
value = value >> 56; if ((status < ARRAY_SIZE(facility_strings)) &&
facility_strings[status])
facility = facility_strings[status];
/* We restore the interrupt state now */ /* We restore the interrupt state now */
if (!arch_irq_disabled_regs(regs)) if (!arch_irq_disabled_regs(regs))
local_irq_enable(); local_irq_enable();
if (value < ARRAY_SIZE(facility_strings))
facility = facility_strings[value];
else
facility = "unknown";
pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n", pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
prefix, facility, regs->nip, regs->msr); hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
if (user_mode(regs)) { if (user_mode(regs)) {
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip); _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
...@@ -1341,6 +1350,7 @@ void facility_unavailable_exception(struct pt_regs *regs) ...@@ -1341,6 +1350,7 @@ void facility_unavailable_exception(struct pt_regs *regs)
die("Unexpected facility unavailable exception", regs, SIGABRT); die("Unexpected facility unavailable exception", regs, SIGABRT);
} }
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
......
...@@ -1809,7 +1809,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) ...@@ -1809,7 +1809,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
rma_size <<= PAGE_SHIFT; rma_size <<= PAGE_SHIFT;
rmls = lpcr_rmls(rma_size); rmls = lpcr_rmls(rma_size);
err = -EINVAL; err = -EINVAL;
if (rmls < 0) { if ((long)rmls < 0) {
pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size); pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
goto out_srcu; goto out_srcu;
} }
...@@ -1874,7 +1874,7 @@ int kvmppc_core_init_vm(struct kvm *kvm) ...@@ -1874,7 +1874,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
/* Allocate the guest's logical partition ID */ /* Allocate the guest's logical partition ID */
lpid = kvmppc_alloc_lpid(); lpid = kvmppc_alloc_lpid();
if (lpid < 0) if ((long)lpid < 0)
return -ENOMEM; return -ENOMEM;
kvm->arch.lpid = lpid; kvm->arch.lpid = lpid;
......
...@@ -1047,11 +1047,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -1047,11 +1047,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
if (err) if (err)
goto free_shadow_vcpu; goto free_shadow_vcpu;
err = -ENOMEM;
p = __get_free_page(GFP_KERNEL|__GFP_ZERO); p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
/* the real shared page fills the last 4k of our page */
vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
if (!p) if (!p)
goto uninit_vcpu; goto uninit_vcpu;
/* the real shared page fills the last 4k of our page */
vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
/* default to book3s_64 (970fx) */ /* default to book3s_64 (970fx) */
......
...@@ -569,35 +569,6 @@ int nvram_decompress(void *in, void *out, size_t inlen, size_t outlen) ...@@ -569,35 +569,6 @@ int nvram_decompress(void *in, void *out, size_t inlen, size_t outlen)
return ret; return ret;
} }
static int unzip_oops(char *oops_buf, char *big_buf)
{
struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
u64 timestamp = oops_hdr->timestamp;
char *big_oops_data = NULL;
char *oops_data_buf = NULL;
size_t big_oops_data_sz;
int unzipped_len;
big_oops_data = big_buf + sizeof(struct oops_log_info);
big_oops_data_sz = big_oops_buf_sz - sizeof(struct oops_log_info);
oops_data_buf = oops_buf + sizeof(struct oops_log_info);
unzipped_len = nvram_decompress(oops_data_buf, big_oops_data,
oops_hdr->report_length,
big_oops_data_sz);
if (unzipped_len < 0) {
pr_err("nvram: decompression failed; returned %d\n",
unzipped_len);
return -1;
}
oops_hdr = (struct oops_log_info *)big_buf;
oops_hdr->version = OOPS_HDR_VERSION;
oops_hdr->report_length = (u16) unzipped_len;
oops_hdr->timestamp = timestamp;
return 0;
}
static int nvram_pstore_open(struct pstore_info *psi) static int nvram_pstore_open(struct pstore_info *psi)
{ {
/* Reset the iterator to start reading partitions again */ /* Reset the iterator to start reading partitions again */
...@@ -685,10 +656,9 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type, ...@@ -685,10 +656,9 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
unsigned int err_type, id_no, size = 0; unsigned int err_type, id_no, size = 0;
struct nvram_os_partition *part = NULL; struct nvram_os_partition *part = NULL;
char *buff = NULL, *big_buff = NULL; char *buff = NULL, *big_buff = NULL;
int rc, sig = 0; int sig = 0;
loff_t p; loff_t p;
read_partition:
read_type++; read_type++;
switch (nvram_type_ids[read_type]) { switch (nvram_type_ids[read_type]) {
...@@ -749,30 +719,46 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type, ...@@ -749,30 +719,46 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
*id = id_no; *id = id_no;
if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) { if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
int length, unzipped_len;
size_t hdr_size;
oops_hdr = (struct oops_log_info *)buff; oops_hdr = (struct oops_log_info *)buff;
*buf = buff + sizeof(*oops_hdr); if (oops_hdr->version < OOPS_HDR_VERSION) {
/* Old format oops header had 2-byte record size */
hdr_size = sizeof(u16);
length = oops_hdr->version;
time->tv_sec = 0;
time->tv_nsec = 0;
} else {
hdr_size = sizeof(*oops_hdr);
length = oops_hdr->report_length;
time->tv_sec = oops_hdr->timestamp;
time->tv_nsec = 0;
}
*buf = kmalloc(length, GFP_KERNEL);
if (*buf == NULL)
return -ENOMEM;
memcpy(*buf, buff + hdr_size, length);
kfree(buff);
if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) { if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) {
big_buff = kmalloc(big_oops_buf_sz, GFP_KERNEL); big_buff = kmalloc(big_oops_buf_sz, GFP_KERNEL);
if (!big_buff) if (!big_buff)
return -ENOMEM; return -ENOMEM;
rc = unzip_oops(buff, big_buff); unzipped_len = nvram_decompress(*buf, big_buff,
length, big_oops_buf_sz);
if (rc != 0) { if (unzipped_len < 0) {
kfree(buff); pr_err("nvram: decompression failed, returned "
"rc %d\n", unzipped_len);
kfree(big_buff); kfree(big_buff);
goto read_partition; } else {
*buf = big_buff;
length = unzipped_len;
} }
oops_hdr = (struct oops_log_info *)big_buff;
*buf = big_buff + sizeof(*oops_hdr);
kfree(buff);
} }
return length;
time->tv_sec = oops_hdr->timestamp;
time->tv_nsec = 0;
return oops_hdr->report_length;
} }
*buf = buff; *buf = buff;
...@@ -816,6 +802,7 @@ static int nvram_pstore_init(void) ...@@ -816,6 +802,7 @@ static int nvram_pstore_init(void)
static void __init nvram_init_oops_partition(int rtas_partition_exists) static void __init nvram_init_oops_partition(int rtas_partition_exists)
{ {
int rc; int rc;
size_t size;
rc = pseries_nvram_init_os_partition(&oops_log_partition); rc = pseries_nvram_init_os_partition(&oops_log_partition);
if (rc != 0) { if (rc != 0) {
...@@ -844,8 +831,9 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists) ...@@ -844,8 +831,9 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
big_oops_buf_sz = (oops_data_sz * 100) / 45; big_oops_buf_sz = (oops_data_sz * 100) / 45;
big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL); big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
if (big_oops_buf) { if (big_oops_buf) {
stream.workspace = kmalloc(zlib_deflate_workspacesize( size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL),
WINDOW_BITS, MEM_LEVEL), GFP_KERNEL); zlib_inflate_workspacesize());
stream.workspace = kmalloc(size, GFP_KERNEL);
if (!stream.workspace) { if (!stream.workspace) {
pr_err("nvram: No memory for compression workspace; " pr_err("nvram: No memory for compression workspace; "
"skipping compression of %s partition data\n", "skipping compression of %s partition data\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment