Commit c667aeac authored by Heiko Carstens's avatar Heiko Carstens Committed by Martin Schwidefsky

s390: rename struct _lowcore to struct lowcore

Finally get rid of the leading underscore. I tried this already two or
three years ago, however Michael Holzheu objected since this would
break the crash utility (again).

However Michael integrated support for the new name into the crash
utility back then, so it doesn't break if the name will be changed
now.  So finally get rid of the ever confusing leading underscore.
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 423d5b36
......@@ -16,7 +16,7 @@
#define LC_ORDER 1
#define LC_PAGES 2
struct _lowcore {
struct lowcore {
__u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */
__u32 ipl_parmblock_ptr; /* 0x0014 */
__u8 pad_0x0018[0x0080-0x0018]; /* 0x0018 */
......@@ -183,9 +183,9 @@ struct _lowcore {
__u8 vector_save_area[1024]; /* 0x1c00 */
} __packed;
#define S390_lowcore (*((struct _lowcore *) 0))
#define S390_lowcore (*((struct lowcore *) 0))
extern struct _lowcore *lowcore_ptr[];
extern struct lowcore *lowcore_ptr[];
static inline void set_prefix(__u32 address)
{
......
......@@ -60,7 +60,7 @@ static inline int test_cpu_flag(int flag)
*/
static inline int test_cpu_flag_of(int flag, int cpu)
{
struct _lowcore *lc = lowcore_ptr[cpu];
struct lowcore *lc = lowcore_ptr[cpu];
return !!(lc->cpu_flags & (1UL << flag));
}
......
......@@ -42,8 +42,8 @@ struct vdso_per_cpu_data {
extern struct vdso_data *vdso_data;
int vdso_alloc_per_cpu(struct _lowcore *lowcore);
void vdso_free_per_cpu(struct _lowcore *lowcore);
int vdso_alloc_per_cpu(struct lowcore *lowcore);
void vdso_free_per_cpu(struct lowcore *lowcore);
#endif /* __ASSEMBLY__ */
#endif /* __S390_VDSO_H__ */
......@@ -97,96 +97,96 @@ int main(void)
OFFSET(__TIMER_IDLE_EXIT, s390_idle_data, timer_idle_exit);
BLANK();
/* hardware defined lowcore locations 0x000 - 0x1ff */
OFFSET(__LC_EXT_PARAMS, _lowcore, ext_params);
OFFSET(__LC_EXT_CPU_ADDR, _lowcore, ext_cpu_addr);
OFFSET(__LC_EXT_INT_CODE, _lowcore, ext_int_code);
OFFSET(__LC_SVC_ILC, _lowcore, svc_ilc);
OFFSET(__LC_SVC_INT_CODE, _lowcore, svc_code);
OFFSET(__LC_PGM_ILC, _lowcore, pgm_ilc);
OFFSET(__LC_PGM_INT_CODE, _lowcore, pgm_code);
OFFSET(__LC_DATA_EXC_CODE, _lowcore, data_exc_code);
OFFSET(__LC_MON_CLASS_NR, _lowcore, mon_class_num);
OFFSET(__LC_PER_CODE, _lowcore, per_code);
OFFSET(__LC_PER_ATMID, _lowcore, per_atmid);
OFFSET(__LC_PER_ADDRESS, _lowcore, per_address);
OFFSET(__LC_EXC_ACCESS_ID, _lowcore, exc_access_id);
OFFSET(__LC_PER_ACCESS_ID, _lowcore, per_access_id);
OFFSET(__LC_OP_ACCESS_ID, _lowcore, op_access_id);
OFFSET(__LC_AR_MODE_ID, _lowcore, ar_mode_id);
OFFSET(__LC_TRANS_EXC_CODE, _lowcore, trans_exc_code);
OFFSET(__LC_MON_CODE, _lowcore, monitor_code);
OFFSET(__LC_SUBCHANNEL_ID, _lowcore, subchannel_id);
OFFSET(__LC_SUBCHANNEL_NR, _lowcore, subchannel_nr);
OFFSET(__LC_IO_INT_PARM, _lowcore, io_int_parm);
OFFSET(__LC_IO_INT_WORD, _lowcore, io_int_word);
OFFSET(__LC_STFL_FAC_LIST, _lowcore, stfl_fac_list);
OFFSET(__LC_STFLE_FAC_LIST, _lowcore, stfle_fac_list);
OFFSET(__LC_MCCK_CODE, _lowcore, mcck_interruption_code);
OFFSET(__LC_MCCK_FAIL_STOR_ADDR, _lowcore, failing_storage_address);
OFFSET(__LC_LAST_BREAK, _lowcore, breaking_event_addr);
OFFSET(__LC_RST_OLD_PSW, _lowcore, restart_old_psw);
OFFSET(__LC_EXT_OLD_PSW, _lowcore, external_old_psw);
OFFSET(__LC_SVC_OLD_PSW, _lowcore, svc_old_psw);
OFFSET(__LC_PGM_OLD_PSW, _lowcore, program_old_psw);
OFFSET(__LC_MCK_OLD_PSW, _lowcore, mcck_old_psw);
OFFSET(__LC_IO_OLD_PSW, _lowcore, io_old_psw);
OFFSET(__LC_RST_NEW_PSW, _lowcore, restart_psw);
OFFSET(__LC_EXT_NEW_PSW, _lowcore, external_new_psw);
OFFSET(__LC_SVC_NEW_PSW, _lowcore, svc_new_psw);
OFFSET(__LC_PGM_NEW_PSW, _lowcore, program_new_psw);
OFFSET(__LC_MCK_NEW_PSW, _lowcore, mcck_new_psw);
OFFSET(__LC_IO_NEW_PSW, _lowcore, io_new_psw);
OFFSET(__LC_EXT_PARAMS, lowcore, ext_params);
OFFSET(__LC_EXT_CPU_ADDR, lowcore, ext_cpu_addr);
OFFSET(__LC_EXT_INT_CODE, lowcore, ext_int_code);
OFFSET(__LC_SVC_ILC, lowcore, svc_ilc);
OFFSET(__LC_SVC_INT_CODE, lowcore, svc_code);
OFFSET(__LC_PGM_ILC, lowcore, pgm_ilc);
OFFSET(__LC_PGM_INT_CODE, lowcore, pgm_code);
OFFSET(__LC_DATA_EXC_CODE, lowcore, data_exc_code);
OFFSET(__LC_MON_CLASS_NR, lowcore, mon_class_num);
OFFSET(__LC_PER_CODE, lowcore, per_code);
OFFSET(__LC_PER_ATMID, lowcore, per_atmid);
OFFSET(__LC_PER_ADDRESS, lowcore, per_address);
OFFSET(__LC_EXC_ACCESS_ID, lowcore, exc_access_id);
OFFSET(__LC_PER_ACCESS_ID, lowcore, per_access_id);
OFFSET(__LC_OP_ACCESS_ID, lowcore, op_access_id);
OFFSET(__LC_AR_MODE_ID, lowcore, ar_mode_id);
OFFSET(__LC_TRANS_EXC_CODE, lowcore, trans_exc_code);
OFFSET(__LC_MON_CODE, lowcore, monitor_code);
OFFSET(__LC_SUBCHANNEL_ID, lowcore, subchannel_id);
OFFSET(__LC_SUBCHANNEL_NR, lowcore, subchannel_nr);
OFFSET(__LC_IO_INT_PARM, lowcore, io_int_parm);
OFFSET(__LC_IO_INT_WORD, lowcore, io_int_word);
OFFSET(__LC_STFL_FAC_LIST, lowcore, stfl_fac_list);
OFFSET(__LC_STFLE_FAC_LIST, lowcore, stfle_fac_list);
OFFSET(__LC_MCCK_CODE, lowcore, mcck_interruption_code);
OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address);
OFFSET(__LC_LAST_BREAK, lowcore, breaking_event_addr);
OFFSET(__LC_RST_OLD_PSW, lowcore, restart_old_psw);
OFFSET(__LC_EXT_OLD_PSW, lowcore, external_old_psw);
OFFSET(__LC_SVC_OLD_PSW, lowcore, svc_old_psw);
OFFSET(__LC_PGM_OLD_PSW, lowcore, program_old_psw);
OFFSET(__LC_MCK_OLD_PSW, lowcore, mcck_old_psw);
OFFSET(__LC_IO_OLD_PSW, lowcore, io_old_psw);
OFFSET(__LC_RST_NEW_PSW, lowcore, restart_psw);
OFFSET(__LC_EXT_NEW_PSW, lowcore, external_new_psw);
OFFSET(__LC_SVC_NEW_PSW, lowcore, svc_new_psw);
OFFSET(__LC_PGM_NEW_PSW, lowcore, program_new_psw);
OFFSET(__LC_MCK_NEW_PSW, lowcore, mcck_new_psw);
OFFSET(__LC_IO_NEW_PSW, lowcore, io_new_psw);
/* software defined lowcore locations 0x200 - 0xdff*/
OFFSET(__LC_SAVE_AREA_SYNC, _lowcore, save_area_sync);
OFFSET(__LC_SAVE_AREA_ASYNC, _lowcore, save_area_async);
OFFSET(__LC_SAVE_AREA_RESTART, _lowcore, save_area_restart);
OFFSET(__LC_CPU_FLAGS, _lowcore, cpu_flags);
OFFSET(__LC_RETURN_PSW, _lowcore, return_psw);
OFFSET(__LC_RETURN_MCCK_PSW, _lowcore, return_mcck_psw);
OFFSET(__LC_SYNC_ENTER_TIMER, _lowcore, sync_enter_timer);
OFFSET(__LC_ASYNC_ENTER_TIMER, _lowcore, async_enter_timer);
OFFSET(__LC_MCCK_ENTER_TIMER, _lowcore, mcck_enter_timer);
OFFSET(__LC_EXIT_TIMER, _lowcore, exit_timer);
OFFSET(__LC_USER_TIMER, _lowcore, user_timer);
OFFSET(__LC_SYSTEM_TIMER, _lowcore, system_timer);
OFFSET(__LC_STEAL_TIMER, _lowcore, steal_timer);
OFFSET(__LC_LAST_UPDATE_TIMER, _lowcore, last_update_timer);
OFFSET(__LC_LAST_UPDATE_CLOCK, _lowcore, last_update_clock);
OFFSET(__LC_INT_CLOCK, _lowcore, int_clock);
OFFSET(__LC_MCCK_CLOCK, _lowcore, mcck_clock);
OFFSET(__LC_CURRENT, _lowcore, current_task);
OFFSET(__LC_THREAD_INFO, _lowcore, thread_info);
OFFSET(__LC_KERNEL_STACK, _lowcore, kernel_stack);
OFFSET(__LC_ASYNC_STACK, _lowcore, async_stack);
OFFSET(__LC_PANIC_STACK, _lowcore, panic_stack);
OFFSET(__LC_RESTART_STACK, _lowcore, restart_stack);
OFFSET(__LC_RESTART_FN, _lowcore, restart_fn);
OFFSET(__LC_RESTART_DATA, _lowcore, restart_data);
OFFSET(__LC_RESTART_SOURCE, _lowcore, restart_source);
OFFSET(__LC_USER_ASCE, _lowcore, user_asce);
OFFSET(__LC_LPP, _lowcore, lpp);
OFFSET(__LC_CURRENT_PID, _lowcore, current_pid);
OFFSET(__LC_PERCPU_OFFSET, _lowcore, percpu_offset);
OFFSET(__LC_VDSO_PER_CPU, _lowcore, vdso_per_cpu_data);
OFFSET(__LC_MACHINE_FLAGS, _lowcore, machine_flags);
OFFSET(__LC_GMAP, _lowcore, gmap);
OFFSET(__LC_PASTE, _lowcore, paste);
OFFSET(__LC_SAVE_AREA_SYNC, lowcore, save_area_sync);
OFFSET(__LC_SAVE_AREA_ASYNC, lowcore, save_area_async);
OFFSET(__LC_SAVE_AREA_RESTART, lowcore, save_area_restart);
OFFSET(__LC_CPU_FLAGS, lowcore, cpu_flags);
OFFSET(__LC_RETURN_PSW, lowcore, return_psw);
OFFSET(__LC_RETURN_MCCK_PSW, lowcore, return_mcck_psw);
OFFSET(__LC_SYNC_ENTER_TIMER, lowcore, sync_enter_timer);
OFFSET(__LC_ASYNC_ENTER_TIMER, lowcore, async_enter_timer);
OFFSET(__LC_MCCK_ENTER_TIMER, lowcore, mcck_enter_timer);
OFFSET(__LC_EXIT_TIMER, lowcore, exit_timer);
OFFSET(__LC_USER_TIMER, lowcore, user_timer);
OFFSET(__LC_SYSTEM_TIMER, lowcore, system_timer);
OFFSET(__LC_STEAL_TIMER, lowcore, steal_timer);
OFFSET(__LC_LAST_UPDATE_TIMER, lowcore, last_update_timer);
OFFSET(__LC_LAST_UPDATE_CLOCK, lowcore, last_update_clock);
OFFSET(__LC_INT_CLOCK, lowcore, int_clock);
OFFSET(__LC_MCCK_CLOCK, lowcore, mcck_clock);
OFFSET(__LC_CURRENT, lowcore, current_task);
OFFSET(__LC_THREAD_INFO, lowcore, thread_info);
OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
OFFSET(__LC_ASYNC_STACK, lowcore, async_stack);
OFFSET(__LC_PANIC_STACK, lowcore, panic_stack);
OFFSET(__LC_RESTART_STACK, lowcore, restart_stack);
OFFSET(__LC_RESTART_FN, lowcore, restart_fn);
OFFSET(__LC_RESTART_DATA, lowcore, restart_data);
OFFSET(__LC_RESTART_SOURCE, lowcore, restart_source);
OFFSET(__LC_USER_ASCE, lowcore, user_asce);
OFFSET(__LC_LPP, lowcore, lpp);
OFFSET(__LC_CURRENT_PID, lowcore, current_pid);
OFFSET(__LC_PERCPU_OFFSET, lowcore, percpu_offset);
OFFSET(__LC_VDSO_PER_CPU, lowcore, vdso_per_cpu_data);
OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
OFFSET(__LC_GMAP, lowcore, gmap);
OFFSET(__LC_PASTE, lowcore, paste);
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
OFFSET(__LC_DUMP_REIPL, _lowcore, ipib);
OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
/* hardware defined lowcore locations 0x1000 - 0x18ff */
OFFSET(__LC_VX_SAVE_AREA_ADDR, _lowcore, vector_save_area_addr);
OFFSET(__LC_EXT_PARAMS2, _lowcore, ext_params2);
OFFSET(__LC_FPREGS_SAVE_AREA, _lowcore, floating_pt_save_area);
OFFSET(__LC_GPREGS_SAVE_AREA, _lowcore, gpregs_save_area);
OFFSET(__LC_PSW_SAVE_AREA, _lowcore, psw_save_area);
OFFSET(__LC_PREFIX_SAVE_AREA, _lowcore, prefixreg_save_area);
OFFSET(__LC_FP_CREG_SAVE_AREA, _lowcore, fpt_creg_save_area);
OFFSET(__LC_TOD_PROGREG_SAVE_AREA, _lowcore, tod_progreg_save_area);
OFFSET(__LC_CPU_TIMER_SAVE_AREA, _lowcore, cpu_timer_save_area);
OFFSET(__LC_CLOCK_COMP_SAVE_AREA, _lowcore, clock_comp_save_area);
OFFSET(__LC_AREGS_SAVE_AREA, _lowcore, access_regs_save_area);
OFFSET(__LC_CREGS_SAVE_AREA, _lowcore, cregs_save_area);
OFFSET(__LC_PGM_TDB, _lowcore, pgm_tdb);
OFFSET(__LC_VX_SAVE_AREA_ADDR, lowcore, vector_save_area_addr);
OFFSET(__LC_EXT_PARAMS2, lowcore, ext_params2);
OFFSET(__LC_FPREGS_SAVE_AREA, lowcore, floating_pt_save_area);
OFFSET(__LC_GPREGS_SAVE_AREA, lowcore, gpregs_save_area);
OFFSET(__LC_PSW_SAVE_AREA, lowcore, psw_save_area);
OFFSET(__LC_PREFIX_SAVE_AREA, lowcore, prefixreg_save_area);
OFFSET(__LC_FP_CREG_SAVE_AREA, lowcore, fpt_creg_save_area);
OFFSET(__LC_TOD_PROGREG_SAVE_AREA, lowcore, tod_progreg_save_area);
OFFSET(__LC_CPU_TIMER_SAVE_AREA, lowcore, cpu_timer_save_area);
OFFSET(__LC_CLOCK_COMP_SAVE_AREA, lowcore, clock_comp_save_area);
OFFSET(__LC_AREGS_SAVE_AREA, lowcore, access_regs_save_area);
OFFSET(__LC_CREGS_SAVE_AREA, lowcore, cregs_save_area);
OFFSET(__LC_PGM_TDB, lowcore, pgm_tdb);
BLANK();
/* gmap/sie offsets */
OFFSET(__GMAP_ASCE, gmap, asce);
......
......@@ -83,9 +83,9 @@ struct save_area * __init save_area_boot_cpu(void)
*/
void __init save_area_add_regs(struct save_area *sa, void *regs)
{
struct _lowcore *lc;
struct lowcore *lc;
lc = (struct _lowcore *)(regs - __LC_FPREGS_SAVE_AREA);
lc = (struct lowcore *)(regs - __LC_FPREGS_SAVE_AREA);
memcpy(&sa->psw, &lc->psw_save_area, sizeof(sa->psw));
memcpy(&sa->ctrs, &lc->cregs_save_area, sizeof(sa->ctrs));
memcpy(&sa->gprs, &lc->gpregs_save_area, sizeof(sa->gprs));
......
......@@ -2041,9 +2041,9 @@ static void do_reset_calls(void)
void s390_reset_system(void)
{
struct _lowcore *lc;
struct lowcore *lc;
lc = (struct _lowcore *)(unsigned long) store_prefix();
lc = (struct lowcore *)(unsigned long) store_prefix();
/* Stack for interrupt/machine check handler */
lc->panic_stack = S390_lowcore.panic_stack;
......
......@@ -97,7 +97,7 @@ unsigned long MODULES_VADDR;
unsigned long MODULES_END;
/* An array with a pointer to the lowcore of every CPU. */
struct _lowcore *lowcore_ptr[NR_CPUS];
struct lowcore *lowcore_ptr[NR_CPUS];
EXPORT_SYMBOL(lowcore_ptr);
/*
......@@ -291,12 +291,12 @@ void *restart_stack __attribute__((__section__(".data")));
static void __init setup_lowcore(void)
{
struct _lowcore *lc;
struct lowcore *lc;
/*
* Setup lowcore for boot cpu
*/
BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096);
BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096);
lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
lc->restart_psw.mask = PSW_KERNEL_BITS;
lc->restart_psw.addr =
......
......@@ -64,7 +64,7 @@ enum {
static DEFINE_PER_CPU(struct cpu *, cpu_device);
struct pcpu {
struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
struct lowcore *lowcore; /* lowcore page(s) for the cpu */
unsigned long ec_mask; /* bit mask for ec_xxx functions */
unsigned long ec_clk; /* sigp timestamp for ec_xxx */
signed char state; /* physical cpu state */
......@@ -185,10 +185,10 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
{
unsigned long async_stack, panic_stack;
struct _lowcore *lc;
struct lowcore *lc;
if (pcpu != &pcpu_devices[0]) {
pcpu->lowcore = (struct _lowcore *)
pcpu->lowcore = (struct lowcore *)
__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
panic_stack = __get_free_page(GFP_KERNEL);
......@@ -240,7 +240,7 @@ static void pcpu_free_lowcore(struct pcpu *pcpu)
static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
{
struct _lowcore *lc = pcpu->lowcore;
struct lowcore *lc = pcpu->lowcore;
if (MACHINE_HAS_TLB_LC)
cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
......@@ -260,7 +260,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
{
struct _lowcore *lc = pcpu->lowcore;
struct lowcore *lc = pcpu->lowcore;
struct thread_info *ti = task_thread_info(tsk);
lc->kernel_stack = (unsigned long) task_stack_page(tsk)
......@@ -276,7 +276,7 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
{
struct _lowcore *lc = pcpu->lowcore;
struct lowcore *lc = pcpu->lowcore;
lc->restart_stack = lc->kernel_stack;
lc->restart_fn = (unsigned long) func;
......@@ -291,7 +291,7 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
void *data, unsigned long stack)
{
struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
unsigned long source_cpu = stap();
__load_psw_mask(PSW_KERNEL_BITS);
......@@ -923,7 +923,7 @@ void __init smp_prepare_boot_cpu(void)
pcpu->state = CPU_STATE_CONFIGURED;
pcpu->address = stap();
pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
S390_lowcore.percpu_offset = __per_cpu_offset[0];
smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
set_cpu_present(0, true);
......
......@@ -90,7 +90,7 @@ static void vdso_init_data(struct vdso_data *vd)
*/
#define SEGMENT_ORDER 2
int vdso_alloc_per_cpu(struct _lowcore *lowcore)
int vdso_alloc_per_cpu(struct lowcore *lowcore)
{
unsigned long segment_table, page_table, page_frame;
u32 *psal, *aste;
......@@ -138,7 +138,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
return -ENOMEM;
}
void vdso_free_per_cpu(struct _lowcore *lowcore)
void vdso_free_per_cpu(struct lowcore *lowcore)
{
unsigned long segment_table, page_table, page_frame;
u32 *psal, *aste;
......@@ -163,7 +163,7 @@ static void vdso_init_cr5(void)
if (!vdso_enabled)
return;
cr5 = offsetof(struct _lowcore, paste);
cr5 = offsetof(struct lowcore, paste);
__ctl_load(cr5, 5, 5);
}
......
......@@ -399,9 +399,9 @@ static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
rc = write_guest_lc(vcpu,
offsetof(struct _lowcore, restart_old_psw),
offsetof(struct lowcore, restart_old_psw),
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
return rc ? -EFAULT : 0;
......
......@@ -355,7 +355,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
* into a u32 memory representation. They will remain bits 0-31.
*/
fac = *vcpu->kvm->arch.model.fac->list >> 32;
rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
&fac, sizeof(fac));
if (rc)
return rc;
......
......@@ -163,11 +163,11 @@ static int is_swapped(unsigned long addr)
unsigned long lc;
int cpu;
if (addr < sizeof(struct _lowcore))
if (addr < sizeof(struct lowcore))
return 1;
for_each_online_cpu(cpu) {
lc = (unsigned long) lowcore_ptr[cpu];
if (addr > lc + sizeof(struct _lowcore) - 1 || addr < lc)
if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
continue;
return 1;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment