Commit e17d416b authored by Dean Nelson's avatar Dean Nelson Committed by Linus Torvalds

sgi-xp: isolate xpc_vars_part structure to sn2 only

Isolate the xpc_vars_part structure of XPC's reserved page to sn2 only.
Signed-off-by: default avatarDean Nelson <dcn@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 94bd2708
...@@ -227,9 +227,9 @@ xpc_disallow_hb(short partid, struct xpc_vars *vars) ...@@ -227,9 +227,9 @@ xpc_disallow_hb(short partid, struct xpc_vars *vars)
* itself from that partition. It is desirable that the size of this structure * itself from that partition. It is desirable that the size of this structure
* evenly divides into a 128-byte cacheline, such that none of the entries in * evenly divides into a 128-byte cacheline, such that none of the entries in
* this array crosses a 128-byte cacheline boundary. As it is now, each entry * this array crosses a 128-byte cacheline boundary. As it is now, each entry
* occupies a 64-byte cacheline. * occupies 64-bytes.
*/ */
struct xpc_vars_part { struct xpc_vars_part_sn2 {
u64 magic; u64 magic;
u64 openclose_args_pa; /* physical address of open and close args */ u64 openclose_args_pa; /* physical address of open and close args */
...@@ -265,8 +265,6 @@ struct xpc_vars_part { ...@@ -265,8 +265,6 @@ struct xpc_vars_part {
#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words) #define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words)
#define XPC_RP_VARS(_rp) ((struct xpc_vars *)(XPC_RP_MACH_NASIDS(_rp) + \ #define XPC_RP_VARS(_rp) ((struct xpc_vars *)(XPC_RP_MACH_NASIDS(_rp) + \
xp_nasid_mask_words)) xp_nasid_mask_words))
#define XPC_RP_VARS_PART(_rp) ((struct xpc_vars_part *) \
((u8 *)XPC_RP_VARS(_rp) + XPC_RP_VARS_SIZE))
/* /*
* Functions registered by add_timer() or called by kernel_thread() only * Functions registered by add_timer() or called by kernel_thread() only
...@@ -541,13 +539,6 @@ struct xpc_partition { ...@@ -541,13 +539,6 @@ struct xpc_partition {
wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */
atomic_t references; /* #of references to infrastructure */ atomic_t references; /* #of references to infrastructure */
/*
* NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN
* XPC SETS UP THE NECESSARY INFRASTRUCTURE TO SUPPORT CROSS PARTITION
* COMMUNICATION. ALL OF THE FOLLOWING FIELDS WILL BE CLEARED. (THE
* 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.)
*/
u8 nchannels; /* #of defined channels supported */ u8 nchannels; /* #of defined channels supported */
atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
atomic_t nchannels_engaged; /* #of channels engaged with remote part */ atomic_t nchannels_engaged; /* #of channels engaged with remote part */
...@@ -613,7 +604,7 @@ struct xpc_partition { ...@@ -613,7 +604,7 @@ struct xpc_partition {
* dropped IPIs. These occur whenever an IPI amo write doesn't complete until * dropped IPIs. These occur whenever an IPI amo write doesn't complete until
* after the IPI was received. * after the IPI was received.
*/ */
#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ) #define XPC_P_DROPPED_IPI_WAIT_INTERVAL (0.25 * HZ)
/* number of seconds to wait for other partitions to disengage */ /* number of seconds to wait for other partitions to disengage */
#define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90 #define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90
...@@ -637,13 +628,16 @@ extern void xpc_activate_partition(struct xpc_partition *); ...@@ -637,13 +628,16 @@ extern void xpc_activate_partition(struct xpc_partition *);
extern void xpc_activate_kthreads(struct xpc_channel *, int); extern void xpc_activate_kthreads(struct xpc_channel *, int);
extern void xpc_create_kthreads(struct xpc_channel *, int, int); extern void xpc_create_kthreads(struct xpc_channel *, int, int);
extern void xpc_disconnect_wait(int); extern void xpc_disconnect_wait(int);
extern enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *); extern enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *);
extern enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *);
extern u64 (*xpc_get_IPI_flags) (struct xpc_partition *);
extern struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *);
extern enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *);
extern void (*xpc_teardown_infrastructure) (struct xpc_partition *);
/* found in xpc_sn2.c */ /* found in xpc_sn2.c */
extern void xpc_init_sn2(void); extern void xpc_init_sn2(void);
extern struct xpc_vars *xpc_vars; /*>>> eliminate from here */ extern struct xpc_vars *xpc_vars; /*>>> eliminate from here */
extern struct xpc_vars_part *xpc_vars_part; /*>>> eliminate from here */
/* found in xpc_uv.c */ /* found in xpc_uv.c */
extern void xpc_init_uv(void); extern void xpc_init_uv(void);
...@@ -670,6 +664,7 @@ extern void xpc_deactivate_partition(const int, struct xpc_partition *, ...@@ -670,6 +664,7 @@ extern void xpc_deactivate_partition(const int, struct xpc_partition *,
extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *); extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *);
/* found in xpc_channel.c */ /* found in xpc_channel.c */
extern void *xpc_kzalloc_cacheline_aligned(size_t, gfp_t, void **);
extern void xpc_initiate_connect(int); extern void xpc_initiate_connect(int);
extern void xpc_initiate_disconnect(int); extern void xpc_initiate_disconnect(int);
extern enum xp_retval xpc_initiate_allocate(short, int, u32, void **); extern enum xp_retval xpc_initiate_allocate(short, int, u32, void **);
...@@ -677,8 +672,6 @@ extern enum xp_retval xpc_initiate_send(short, int, void *); ...@@ -677,8 +672,6 @@ extern enum xp_retval xpc_initiate_send(short, int, void *);
extern enum xp_retval xpc_initiate_send_notify(short, int, void *, extern enum xp_retval xpc_initiate_send_notify(short, int, void *,
xpc_notify_func, void *); xpc_notify_func, void *);
extern void xpc_initiate_received(short, int, void *); extern void xpc_initiate_received(short, int, void *);
extern enum xp_retval xpc_setup_infrastructure(struct xpc_partition *);
extern enum xp_retval xpc_pull_remote_vars_part(struct xpc_partition *);
extern void xpc_process_channel_activity(struct xpc_partition *); extern void xpc_process_channel_activity(struct xpc_partition *);
extern void xpc_connected_callout(struct xpc_channel *); extern void xpc_connected_callout(struct xpc_channel *);
extern void xpc_deliver_msg(struct xpc_channel *); extern void xpc_deliver_msg(struct xpc_channel *);
...@@ -686,7 +679,6 @@ extern void xpc_disconnect_channel(const int, struct xpc_channel *, ...@@ -686,7 +679,6 @@ extern void xpc_disconnect_channel(const int, struct xpc_channel *,
enum xp_retval, unsigned long *); enum xp_retval, unsigned long *);
extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval); extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval);
extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval); extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval);
extern void xpc_teardown_infrastructure(struct xpc_partition *);
static inline void static inline void
xpc_wakeup_channel_mgr(struct xpc_partition *part) xpc_wakeup_channel_mgr(struct xpc_partition *part)
......
This diff is collapsed.
...@@ -176,6 +176,12 @@ static struct notifier_block xpc_die_notifier = { ...@@ -176,6 +176,12 @@ static struct notifier_block xpc_die_notifier = {
}; };
enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *rp); enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *rp);
enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part);
u64 (*xpc_get_IPI_flags) (struct xpc_partition *part);
struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch);
enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *part);
void (*xpc_teardown_infrastructure) (struct xpc_partition *part);
/* /*
* Timer function to enforce the timelimit on the partition disengage request. * Timer function to enforce the timelimit on the partition disengage request.
...@@ -312,38 +318,9 @@ xpc_initiate_discovery(void *ignore) ...@@ -312,38 +318,9 @@ xpc_initiate_discovery(void *ignore)
return 0; return 0;
} }
/*
* Establish first contact with the remote partititon. This involves pulling
* the XPC per partition variables from the remote partition and waiting for
* the remote partition to pull ours.
*/
static enum xp_retval
xpc_make_first_contact(struct xpc_partition *part)
{
enum xp_retval ret;
while ((ret = xpc_pull_remote_vars_part(part)) != xpSuccess) {
if (ret != xpRetry) {
XPC_DEACTIVATE_PARTITION(part, ret);
return ret;
}
dev_dbg(xpc_chan, "waiting to make first contact with "
"partition %d\n", XPC_PARTID(part));
/* wait a 1/4 of a second or so */
(void)msleep_interruptible(250);
if (part->act_state == XPC_P_DEACTIVATING)
return part->reason;
}
return xpc_mark_partition_active(part);
}
/* /*
* The first kthread assigned to a newly activated partition is the one * The first kthread assigned to a newly activated partition is the one
* created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to * created by XPC HB with which it calls xpc_activating(). XPC hangs on to
* that kthread until the partition is brought down, at which time that kthread * that kthread until the partition is brought down, at which time that kthread
* returns back to XPC HB. (The return of that kthread will signify to XPC HB * returns back to XPC HB. (The return of that kthread will signify to XPC HB
* that XPC has dismantled all communication infrastructure for the associated * that XPC has dismantled all communication infrastructure for the associated
...@@ -393,41 +370,10 @@ xpc_channel_mgr(struct xpc_partition *part) ...@@ -393,41 +370,10 @@ xpc_channel_mgr(struct xpc_partition *part)
* upped partition. * upped partition.
* *
* The kthread that was created by XPC HB and which setup the XPC * The kthread that was created by XPC HB and which setup the XPC
* infrastructure will remain assigned to the partition until the partition * infrastructure will remain assigned to the partition becoming the channel
* goes down. At which time the kthread will teardown the XPC infrastructure * manager for that partition until the partition is deactivating, at which
* and then exit. * time the kthread will teardown the XPC infrastructure and then exit.
*
* XPC HB will put the remote partition's XPC per partition specific variables
* physical address into xpc_partitions[partid].remote_vars_part_pa prior to
* calling xpc_partition_up().
*/ */
static void
xpc_partition_up(struct xpc_partition *part)
{
DBUG_ON(part->channels != NULL);
dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part));
if (xpc_setup_infrastructure(part) != xpSuccess)
return;
/*
* The kthread that XPC HB called us with will become the
* channel manager for this partition. It will not return
* back to XPC HB until the partition's XPC infrastructure
* has been dismantled.
*/
(void)xpc_part_ref(part); /* this will always succeed */
if (xpc_make_first_contact(part) == xpSuccess)
xpc_channel_mgr(part);
xpc_part_deref(part);
xpc_teardown_infrastructure(part);
}
static int static int
xpc_activating(void *__partid) xpc_activating(void *__partid)
{ {
...@@ -453,7 +399,7 @@ xpc_activating(void *__partid) ...@@ -453,7 +399,7 @@ xpc_activating(void *__partid)
XPC_SET_REASON(part, 0, 0); XPC_SET_REASON(part, 0, 0);
spin_unlock_irqrestore(&part->act_lock, irq_flags); spin_unlock_irqrestore(&part->act_lock, irq_flags);
dev_dbg(xpc_part, "bringing partition %d up\n", partid); dev_dbg(xpc_part, "activating partition %d\n", partid);
/* /*
* Register the remote partition's AMOs with SAL so it can handle * Register the remote partition's AMOs with SAL so it can handle
...@@ -467,7 +413,7 @@ xpc_activating(void *__partid) ...@@ -467,7 +413,7 @@ xpc_activating(void *__partid)
*/ */
if (sn_register_xp_addr_region(part->remote_amos_page_pa, if (sn_register_xp_addr_region(part->remote_amos_page_pa,
PAGE_SIZE, 1) < 0) { PAGE_SIZE, 1) < 0) {
dev_warn(xpc_part, "xpc_partition_up(%d) failed to register " dev_warn(xpc_part, "xpc_activating(%d) failed to register "
"xp_addr region\n", partid); "xp_addr region\n", partid);
spin_lock_irqsave(&part->act_lock, irq_flags); spin_lock_irqsave(&part->act_lock, irq_flags);
...@@ -481,11 +427,18 @@ xpc_activating(void *__partid) ...@@ -481,11 +427,18 @@ xpc_activating(void *__partid)
xpc_allow_hb(partid, xpc_vars); xpc_allow_hb(partid, xpc_vars);
xpc_IPI_send_activated(part); xpc_IPI_send_activated(part);
/* if (xpc_setup_infrastructure(part) == xpSuccess) {
* xpc_partition_up() holds this thread and marks this partition as (void)xpc_part_ref(part); /* this will always succeed */
* XPC_P_ACTIVE by calling xpc_hb_mark_active().
*/ if (xpc_make_first_contact(part) == xpSuccess) {
(void)xpc_partition_up(part); xpc_mark_partition_active(part);
xpc_channel_mgr(part);
/* won't return until partition is deactivating */
}
xpc_part_deref(part);
xpc_teardown_infrastructure(part);
}
xpc_disallow_hb(partid, xpc_vars); xpc_disallow_hb(partid, xpc_vars);
xpc_mark_partition_inactive(part); xpc_mark_partition_inactive(part);
...@@ -568,7 +521,7 @@ xpc_dropped_IPI_check(struct xpc_partition *part) ...@@ -568,7 +521,7 @@ xpc_dropped_IPI_check(struct xpc_partition *part)
xpc_check_for_channel_activity(part); xpc_check_for_channel_activity(part);
part->dropped_IPI_timer.expires = jiffies + part->dropped_IPI_timer.expires = jiffies +
XPC_P_DROPPED_IPI_WAIT; XPC_P_DROPPED_IPI_WAIT_INTERVAL;
add_timer(&part->dropped_IPI_timer); add_timer(&part->dropped_IPI_timer);
xpc_part_deref(part); xpc_part_deref(part);
} }
......
...@@ -486,6 +486,7 @@ xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version, ...@@ -486,6 +486,7 @@ xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version,
dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n", dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n",
part->last_heartbeat); part->last_heartbeat);
/* >>> remote_vars_part_pa and vars_part_pa are sn2 only!!! */
part->remote_vars_part_pa = remote_vars->vars_part_pa; part->remote_vars_part_pa = remote_vars->vars_part_pa;
dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n", dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n",
part->remote_vars_part_pa); part->remote_vars_part_pa);
......
This diff is collapsed.
...@@ -36,10 +36,58 @@ xpc_rsvd_page_init_uv(struct xpc_rsvd_page *rp) ...@@ -36,10 +36,58 @@ xpc_rsvd_page_init_uv(struct xpc_rsvd_page *rp)
return xpSuccess; return xpSuccess;
} }
/*
* Setup the infrastructure necessary to support XPartition Communication
* between the specified remote partition and the local one.
*/
static enum xp_retval
xpc_setup_infrastructure_uv(struct xpc_partition *part)
{
/* >>> this function needs fleshing out */
return xpUnsupported;
}
/*
* Teardown the infrastructure necessary to support XPartition Communication
* between the specified remote partition and the local one.
*/
static void
xpc_teardown_infrastructure_uv(struct xpc_partition *part)
{
/* >>> this function needs fleshing out */
return;
}
static enum xp_retval
xpc_make_first_contact_uv(struct xpc_partition *part)
{
/* >>> this function needs fleshing out */
return xpUnsupported;
}
static u64
xpc_get_IPI_flags_uv(struct xpc_partition *part)
{
/* >>> this function needs fleshing out */
return 0UL;
}
static struct xpc_msg *
xpc_get_deliverable_msg_uv(struct xpc_channel *ch)
{
/* >>> this function needs fleshing out */
return NULL;
}
void void
xpc_init_uv(void) xpc_init_uv(void)
{ {
xpc_rsvd_page_init = xpc_rsvd_page_init_uv; xpc_rsvd_page_init = xpc_rsvd_page_init_uv;
xpc_setup_infrastructure = xpc_setup_infrastructure_uv;
xpc_teardown_infrastructure = xpc_teardown_infrastructure_uv;
xpc_make_first_contact = xpc_make_first_contact_uv;
xpc_get_IPI_flags = xpc_get_IPI_flags_uv;
xpc_get_deliverable_msg = xpc_get_deliverable_msg_uv;
} }
void void
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment