Commit 612f02d6 authored by Stephen Rothwell's avatar Stephen Rothwell Committed by Paul Mackerras

[POWERPC] Clean up it_lp_queue.h

No more StudlyCaps.
Remove from a couple of places it is no longer needed.
Use C style comments.
Signed-off-by: default avatarStephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent d0b79c54
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/lppaca.h> #include <asm/lppaca.h>
#include <asm/iseries/it_lp_queue.h>
#include <asm/iseries/it_lp_reg_save.h> #include <asm/iseries/it_lp_reg_save.h>
#include <asm/paca.h> #include <asm/paca.h>
......
...@@ -51,20 +51,21 @@ static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes]; ...@@ -51,20 +51,21 @@ static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
static struct HvLpEvent * get_next_hvlpevent(void) static struct HvLpEvent * get_next_hvlpevent(void)
{ {
struct HvLpEvent * event; struct HvLpEvent * event;
event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr; event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;
if (hvlpevent_is_valid(event)) { if (hvlpevent_is_valid(event)) {
/* rmb() needed only for weakly consistent machines (regatta) */ /* rmb() needed only for weakly consistent machines (regatta) */
rmb(); rmb();
/* Set pointer to next potential event */ /* Set pointer to next potential event */
hvlpevent_queue.xSlicCurEventPtr += ((event->xSizeMinus1 + hvlpevent_queue.hq_current_event += ((event->xSizeMinus1 +
LpEventAlign) / LpEventAlign) * LpEventAlign; IT_LP_EVENT_ALIGN) / IT_LP_EVENT_ALIGN) *
IT_LP_EVENT_ALIGN;
/* Wrap to beginning if no room at end */ /* Wrap to beginning if no room at end */
if (hvlpevent_queue.xSlicCurEventPtr > if (hvlpevent_queue.hq_current_event >
hvlpevent_queue.xSlicLastValidEventPtr) { hvlpevent_queue.hq_last_event) {
hvlpevent_queue.xSlicCurEventPtr = hvlpevent_queue.hq_current_event =
hvlpevent_queue.xSlicEventStackPtr; hvlpevent_queue.hq_event_stack;
} }
} else { } else {
event = NULL; event = NULL;
...@@ -82,10 +83,10 @@ int hvlpevent_is_pending(void) ...@@ -82,10 +83,10 @@ int hvlpevent_is_pending(void)
if (smp_processor_id() >= spread_lpevents) if (smp_processor_id() >= spread_lpevents)
return 0; return 0;
next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr; next_event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;
return hvlpevent_is_valid(next_event) || return hvlpevent_is_valid(next_event) ||
hvlpevent_queue.xPlicOverflowIntPending; hvlpevent_queue.hq_overflow_pending;
} }
static void hvlpevent_clear_valid(struct HvLpEvent * event) static void hvlpevent_clear_valid(struct HvLpEvent * event)
...@@ -95,18 +96,18 @@ static void hvlpevent_clear_valid(struct HvLpEvent * event) ...@@ -95,18 +96,18 @@ static void hvlpevent_clear_valid(struct HvLpEvent * event)
* ie. on 64-byte boundaries. * ie. on 64-byte boundaries.
*/ */
struct HvLpEvent *tmp; struct HvLpEvent *tmp;
unsigned extra = ((event->xSizeMinus1 + LpEventAlign) / unsigned extra = ((event->xSizeMinus1 + IT_LP_EVENT_ALIGN) /
LpEventAlign) - 1; IT_LP_EVENT_ALIGN) - 1;
switch (extra) { switch (extra) {
case 3: case 3:
tmp = (struct HvLpEvent*)((char*)event + 3 * LpEventAlign); tmp = (struct HvLpEvent*)((char*)event + 3 * IT_LP_EVENT_ALIGN);
hvlpevent_invalidate(tmp); hvlpevent_invalidate(tmp);
case 2: case 2:
tmp = (struct HvLpEvent*)((char*)event + 2 * LpEventAlign); tmp = (struct HvLpEvent*)((char*)event + 2 * IT_LP_EVENT_ALIGN);
hvlpevent_invalidate(tmp); hvlpevent_invalidate(tmp);
case 1: case 1:
tmp = (struct HvLpEvent*)((char*)event + 1 * LpEventAlign); tmp = (struct HvLpEvent*)((char*)event + 1 * IT_LP_EVENT_ALIGN);
hvlpevent_invalidate(tmp); hvlpevent_invalidate(tmp);
} }
...@@ -120,7 +121,7 @@ void process_hvlpevents(struct pt_regs *regs) ...@@ -120,7 +121,7 @@ void process_hvlpevents(struct pt_regs *regs)
struct HvLpEvent * event; struct HvLpEvent * event;
/* If we have recursed, just return */ /* If we have recursed, just return */
if (!spin_trylock(&hvlpevent_queue.lock)) if (!spin_trylock(&hvlpevent_queue.hq_lock))
return; return;
for (;;) { for (;;) {
...@@ -148,17 +149,17 @@ void process_hvlpevents(struct pt_regs *regs) ...@@ -148,17 +149,17 @@ void process_hvlpevents(struct pt_regs *regs)
printk(KERN_INFO "Unexpected Lp Event type=%d\n", event->xType ); printk(KERN_INFO "Unexpected Lp Event type=%d\n", event->xType );
hvlpevent_clear_valid(event); hvlpevent_clear_valid(event);
} else if (hvlpevent_queue.xPlicOverflowIntPending) } else if (hvlpevent_queue.hq_overflow_pending)
/* /*
* No more valid events. If overflow events are * No more valid events. If overflow events are
* pending process them * pending process them
*/ */
HvCallEvent_getOverflowLpEvents(hvlpevent_queue.xIndex); HvCallEvent_getOverflowLpEvents(hvlpevent_queue.hq_index);
else else
break; break;
} }
spin_unlock(&hvlpevent_queue.lock); spin_unlock(&hvlpevent_queue.hq_lock);
} }
static int set_spread_lpevents(char *str) static int set_spread_lpevents(char *str)
...@@ -184,20 +185,20 @@ void setup_hvlpevent_queue(void) ...@@ -184,20 +185,20 @@ void setup_hvlpevent_queue(void)
{ {
void *eventStack; void *eventStack;
spin_lock_init(&hvlpevent_queue.lock); spin_lock_init(&hvlpevent_queue.hq_lock);
/* Allocate a page for the Event Stack. */ /* Allocate a page for the Event Stack. */
eventStack = alloc_bootmem_pages(LpEventStackSize); eventStack = alloc_bootmem_pages(IT_LP_EVENT_STACK_SIZE);
memset(eventStack, 0, LpEventStackSize); memset(eventStack, 0, IT_LP_EVENT_STACK_SIZE);
/* Invoke the hypervisor to initialize the event stack */ /* Invoke the hypervisor to initialize the event stack */
HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize); HvCallEvent_setLpEventStack(0, eventStack, IT_LP_EVENT_STACK_SIZE);
hvlpevent_queue.xSlicEventStackPtr = (char *)eventStack; hvlpevent_queue.hq_event_stack = eventStack;
hvlpevent_queue.xSlicCurEventPtr = (char *)eventStack; hvlpevent_queue.hq_current_event = eventStack;
hvlpevent_queue.xSlicLastValidEventPtr = (char *)eventStack + hvlpevent_queue.hq_last_event = (char *)eventStack +
(LpEventStackSize - LpEventMaxSize); (IT_LP_EVENT_STACK_SIZE - IT_LP_EVENT_MAX_SIZE);
hvlpevent_queue.xIndex = 0; hvlpevent_queue.hq_index = 0;
} }
/* Register a handler for an LpEvent type */ /* Register a handler for an LpEvent type */
......
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/lppaca.h> #include <asm/lppaca.h>
#include <asm/iseries/it_lp_queue.h>
#include <asm/iseries/hv_call_xm.h> #include <asm/iseries/hv_call_xm.h>
#include "processor_vpd.h" #include "processor_vpd.h"
......
...@@ -29,20 +29,20 @@ ...@@ -29,20 +29,20 @@
struct HvLpEvent; struct HvLpEvent;
#define ITMaxLpQueues 8 #define IT_LP_MAX_QUEUES 8
#define NotUsed 0 // Queue will not be used by PLIC #define IT_LP_NOT_USED 0 /* Queue will not be used by PLIC */
#define DedicatedIo 1 // Queue dedicated to IO processor specified #define IT_LP_DEDICATED_IO 1 /* Queue dedicated to IO processor specified */
#define DedicatedLp 2 // Queue dedicated to LP specified #define IT_LP_DEDICATED_LP 2 /* Queue dedicated to LP specified */
#define Shared 3 // Queue shared for both IO and LP #define IT_LP_SHARED 3 /* Queue shared for both IO and LP */
#define LpEventStackSize 4096 #define IT_LP_EVENT_STACK_SIZE 4096
#define LpEventMaxSize 256 #define IT_LP_EVENT_MAX_SIZE 256
#define LpEventAlign 64 #define IT_LP_EVENT_ALIGN 64
struct hvlpevent_queue { struct hvlpevent_queue {
/* /*
* The xSlicCurEventPtr is the pointer to the next event stack entry * The hq_current_event is the pointer to the next event stack entry
* that will become valid. The OS must peek at this entry to determine * that will become valid. The OS must peek at this entry to determine
* if it is valid. PLIC will set the valid indicator as the very last * if it is valid. PLIC will set the valid indicator as the very last
* store into that entry. * store into that entry.
...@@ -52,23 +52,23 @@ struct hvlpevent_queue { ...@@ -52,23 +52,23 @@ struct hvlpevent_queue {
* location again. * location again.
* *
* If the event stack fills and there are overflow events, then PLIC * If the event stack fills and there are overflow events, then PLIC
* will set the xPlicOverflowIntPending flag in which case the OS will * will set the hq_overflow_pending flag in which case the OS will
* have to fetch the additional LP events once they have drained the * have to fetch the additional LP events once they have drained the
* event stack. * event stack.
* *
* The first 16-bytes are known by both the OS and PLIC. The remainder * The first 16-bytes are known by both the OS and PLIC. The remainder
* of the cache line is for use by the OS. * of the cache line is for use by the OS.
*/ */
u8 xPlicOverflowIntPending;// 0x00 Overflow events are pending u8 hq_overflow_pending; /* 0x00 Overflow events are pending */
u8 xPlicStatus; // 0x01 DedicatedIo or DedicatedLp or NotUsed u8 hq_status; /* 0x01 DedicatedIo or DedicatedLp or NotUsed */
u16 xSlicLogicalProcIndex; // 0x02 Logical Proc Index for correlation u16 hq_proc_index; /* 0x02 Logical Proc Index for correlation */
u8 xPlicRsvd[12]; // 0x04 u8 hq_reserved1[12]; /* 0x04 */
char *xSlicCurEventPtr; // 0x10 char *hq_current_event; /* 0x10 */
char *xSlicLastValidEventPtr; // 0x18 char *hq_last_event; /* 0x18 */
char *xSlicEventStackPtr; // 0x20 char *hq_event_stack; /* 0x20 */
u8 xIndex; // 0x28 unique sequential index. u8 hq_index; /* 0x28 unique sequential index. */
u8 xSlicRsvd[3]; // 0x29-2b u8 hq_reserved2[3]; /* 0x29-2b */
spinlock_t lock; spinlock_t hq_lock;
}; };
extern struct hvlpevent_queue hvlpevent_queue; extern struct hvlpevent_queue hvlpevent_queue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment