Commit a812dcc3 authored by Dean Nelson's avatar Dean Nelson Committed by Linus Torvalds

sgi-xp: add usage of GRU driver by xpc_remote_memcpy()

Add UV support to xpc_remote_memcpy(), which involves interfacing to the
GRU driver.
Signed-off-by: default avatarDean Nelson <dcn@sgi.com>
Cc: Jack Steiner <steiner@sgi.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 261f3b49
...@@ -207,7 +207,9 @@ enum xp_retval { ...@@ -207,7 +207,9 @@ enum xp_retval {
xpUnsupported, /* 56: unsupported functionality or resource */ xpUnsupported, /* 56: unsupported functionality or resource */
xpNeedMoreInfo, /* 57: more info is needed by SAL */ xpNeedMoreInfo, /* 57: more info is needed by SAL */
xpUnknownReason /* 58: unknown reason - must be last in enum */ xpGruCopyError, /* 58: gru_copy_gru() returned error */
xpUnknownReason /* 59: unknown reason - must be last in enum */
}; };
/* /*
...@@ -349,7 +351,9 @@ extern short xp_max_npartitions; ...@@ -349,7 +351,9 @@ extern short xp_max_npartitions;
extern short xp_partition_id; extern short xp_partition_id;
extern u8 xp_region_size; extern u8 xp_region_size;
extern enum xp_retval (*xp_remote_memcpy) (void *, const void *, size_t); extern unsigned long (*xp_pa) (void *);
extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long,
size_t);
extern int (*xp_cpu_to_nasid) (int); extern int (*xp_cpu_to_nasid) (int);
extern u64 xp_nofault_PIOR_target; extern u64 xp_nofault_PIOR_target;
......
...@@ -41,7 +41,11 @@ EXPORT_SYMBOL_GPL(xp_partition_id); ...@@ -41,7 +41,11 @@ EXPORT_SYMBOL_GPL(xp_partition_id);
u8 xp_region_size; u8 xp_region_size;
EXPORT_SYMBOL_GPL(xp_region_size); EXPORT_SYMBOL_GPL(xp_region_size);
enum xp_retval (*xp_remote_memcpy) (void *dst, const void *src, size_t len); unsigned long (*xp_pa) (void *addr);
EXPORT_SYMBOL_GPL(xp_pa);
enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa,
const unsigned long src_gpa, size_t len);
EXPORT_SYMBOL_GPL(xp_remote_memcpy); EXPORT_SYMBOL_GPL(xp_remote_memcpy);
int (*xp_cpu_to_nasid) (int cpuid); int (*xp_cpu_to_nasid) (int cpuid);
......
...@@ -63,7 +63,7 @@ xp_register_nofault_code_sn2(void) ...@@ -63,7 +63,7 @@ xp_register_nofault_code_sn2(void)
return xpSuccess; return xpSuccess;
} }
void static void
xp_unregister_nofault_code_sn2(void) xp_unregister_nofault_code_sn2(void)
{ {
u64 func_addr = *(u64 *)xp_nofault_PIOR; u64 func_addr = *(u64 *)xp_nofault_PIOR;
...@@ -74,45 +74,42 @@ xp_unregister_nofault_code_sn2(void) ...@@ -74,45 +74,42 @@ xp_unregister_nofault_code_sn2(void)
err_func_addr, 1, 0); err_func_addr, 1, 0);
} }
/*
* Convert a virtual memory address to a physical memory address.
*/
static unsigned long
xp_pa_sn2(void *addr)
{
return __pa(addr);
}
/* /*
* Wrapper for bte_copy(). * Wrapper for bte_copy().
* *
* vdst - virtual address of the destination of the transfer. * dst_pa - physical address of the destination of the transfer.
* psrc - physical address of the source of the transfer. * src_pa - physical address of the source of the transfer.
* len - number of bytes to transfer from source to destination. * len - number of bytes to transfer from source to destination.
* *
* Note: xp_remote_memcpy_sn2() should never be called while holding a spinlock. * Note: xp_remote_memcpy_sn2() should never be called while holding a spinlock.
*/ */
static enum xp_retval static enum xp_retval
xp_remote_memcpy_sn2(void *vdst, const void *psrc, size_t len) xp_remote_memcpy_sn2(unsigned long dst_pa, const unsigned long src_pa,
size_t len)
{ {
bte_result_t ret; bte_result_t ret;
u64 pdst = ia64_tpa(vdst);
/* ??? What are the rules governing the src and dst addresses passed in?
* ??? Currently we're assuming that dst is a virtual address and src
* ??? is a physical address, is this appropriate? Can we allow them to
* ??? be whatever and we make the change here without damaging the
* ??? addresses?
*/
/* ret = bte_copy(src_pa, dst_pa, len, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
* Ensure that the physically mapped memory is contiguous.
*
* We do this by ensuring that the memory is from region 7 only.
* If the need should arise to use memory from one of the other
* regions, then modify the BUG_ON() statement to ensure that the
* memory from that region is always physically contiguous.
*/
BUG_ON(REGION_NUMBER(vdst) != RGN_KERNEL);
ret = bte_copy((u64)psrc, pdst, len, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
if (ret == BTE_SUCCESS) if (ret == BTE_SUCCESS)
return xpSuccess; return xpSuccess;
if (is_shub2()) if (is_shub2()) {
dev_err(xp, "bte_copy() on shub2 failed, error=0x%x\n", ret); dev_err(xp, "bte_copy() on shub2 failed, error=0x%x dst_pa="
else "0x%016lx src_pa=0x%016lx len=%ld\\n", ret, dst_pa,
dev_err(xp, "bte_copy() failed, error=%d\n", ret); src_pa, len);
} else {
dev_err(xp, "bte_copy() failed, error=%d dst_pa=0x%016lx "
"src_pa=0x%016lx len=%ld\\n", ret, dst_pa, src_pa, len);
}
return xpBteCopyError; return xpBteCopyError;
} }
...@@ -132,6 +129,7 @@ xp_init_sn2(void) ...@@ -132,6 +129,7 @@ xp_init_sn2(void)
xp_partition_id = sn_partition_id; xp_partition_id = sn_partition_id;
xp_region_size = sn_region_size; xp_region_size = sn_region_size;
xp_pa = xp_pa_sn2;
xp_remote_memcpy = xp_remote_memcpy_sn2; xp_remote_memcpy = xp_remote_memcpy_sn2;
xp_cpu_to_nasid = xp_cpu_to_nasid_sn2; xp_cpu_to_nasid = xp_cpu_to_nasid_sn2;
......
...@@ -13,13 +13,33 @@ ...@@ -13,13 +13,33 @@
* *
*/ */
#include <linux/device.h>
#include <asm/uv/uv_hub.h>
#include "../sgi-gru/grukservices.h"
#include "xp.h" #include "xp.h"
/*
* Convert a virtual memory address to a physical memory address.
*/
static unsigned long
xp_pa_uv(void *addr)
{
return uv_gpa(addr);
}
static enum xp_retval static enum xp_retval
xp_remote_memcpy_uv(void *vdst, const void *psrc, size_t len) xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa,
size_t len)
{ {
/* !!! this function needs fleshing out */ int ret;
return xpUnsupported;
ret = gru_copy_gpa(dst_gpa, src_gpa, len);
if (ret == 0)
return xpSuccess;
dev_err(xp, "gru_copy_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx "
"len=%ld\n", dst_gpa, src_gpa, len);
return xpGruCopyError;
} }
enum xp_retval enum xp_retval
...@@ -29,6 +49,7 @@ xp_init_uv(void) ...@@ -29,6 +49,7 @@ xp_init_uv(void)
xp_max_npartitions = XP_MAX_NPARTITIONS_UV; xp_max_npartitions = XP_MAX_NPARTITIONS_UV;
xp_pa = xp_pa_uv;
xp_remote_memcpy = xp_remote_memcpy_uv; xp_remote_memcpy = xp_remote_memcpy_uv;
return xpSuccess; return xpSuccess;
......
...@@ -91,8 +91,8 @@ struct xpc_rsvd_page { ...@@ -91,8 +91,8 @@ struct xpc_rsvd_page {
u8 version; u8 version;
u8 pad1[3]; /* align to next u64 in 1st 64-byte cacheline */ u8 pad1[3]; /* align to next u64 in 1st 64-byte cacheline */
union { union {
u64 vars_pa; /* physical address of struct xpc_vars */ unsigned long vars_pa; /* phys address of struct xpc_vars */
u64 activate_mq_gpa; /* global phys address of activate_mq */ unsigned long activate_mq_gpa; /* gru phy addr of activate_mq */
} sn; } sn;
unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */ unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */
u64 pad2[10]; /* align to last u64 in 2nd 64-byte cacheline */ u64 pad2[10]; /* align to last u64 in 2nd 64-byte cacheline */
...@@ -122,8 +122,8 @@ struct xpc_vars_sn2 { ...@@ -122,8 +122,8 @@ struct xpc_vars_sn2 {
u64 heartbeat_offline; /* if 0, heartbeat should be changing */ u64 heartbeat_offline; /* if 0, heartbeat should be changing */
int activate_IRQ_nasid; int activate_IRQ_nasid;
int activate_IRQ_phys_cpuid; int activate_IRQ_phys_cpuid;
u64 vars_part_pa; unsigned long vars_part_pa;
u64 amos_page_pa; /* paddr of page of amos from MSPEC driver */ unsigned long amos_page_pa;/* paddr of page of amos from MSPEC driver */
struct amo *amos_page; /* vaddr of page of amos from MSPEC driver */ struct amo *amos_page; /* vaddr of page of amos from MSPEC driver */
}; };
...@@ -142,10 +142,10 @@ struct xpc_vars_sn2 { ...@@ -142,10 +142,10 @@ struct xpc_vars_sn2 {
struct xpc_vars_part_sn2 { struct xpc_vars_part_sn2 {
u64 magic; u64 magic;
u64 openclose_args_pa; /* physical address of open and close args */ unsigned long openclose_args_pa; /* phys addr of open and close args */
u64 GPs_pa; /* physical address of Get/Put values */ unsigned long GPs_pa; /* physical address of Get/Put values */
u64 chctl_amo_pa; /* physical address of chctl flags' amo */ unsigned long chctl_amo_pa; /* physical address of chctl flags' amo */
int notify_IRQ_nasid; /* nasid of where to send notify IRQs */ int notify_IRQ_nasid; /* nasid of where to send notify IRQs */
int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */ int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */
...@@ -213,7 +213,7 @@ struct xpc_openclose_args { ...@@ -213,7 +213,7 @@ struct xpc_openclose_args {
u16 msg_size; /* sizeof each message entry */ u16 msg_size; /* sizeof each message entry */
u16 remote_nentries; /* #of message entries in remote msg queue */ u16 remote_nentries; /* #of message entries in remote msg queue */
u16 local_nentries; /* #of message entries in local msg queue */ u16 local_nentries; /* #of message entries in local msg queue */
u64 local_msgqueue_pa; /* physical address of local message queue */ unsigned long local_msgqueue_pa; /* phys addr of local message queue */
}; };
#define XPC_OPENCLOSE_ARGS_SIZE \ #define XPC_OPENCLOSE_ARGS_SIZE \
...@@ -366,8 +366,8 @@ struct xpc_channel { ...@@ -366,8 +366,8 @@ struct xpc_channel {
void *remote_msgqueue_base; /* base address of kmalloc'd space */ void *remote_msgqueue_base; /* base address of kmalloc'd space */
struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */ struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */
/* local message queue */ /* local message queue */
u64 remote_msgqueue_pa; /* phys addr of remote partition's */ unsigned long remote_msgqueue_pa; /* phys addr of remote partition's */
/* local message queue */ /* local message queue */
atomic_t references; /* #of external references to queues */ atomic_t references; /* #of external references to queues */
...@@ -491,12 +491,12 @@ xpc_any_msg_chctl_flags_set(union xpc_channel_ctl_flags *chctl) ...@@ -491,12 +491,12 @@ xpc_any_msg_chctl_flags_set(union xpc_channel_ctl_flags *chctl)
*/ */
struct xpc_partition_sn2 { struct xpc_partition_sn2 {
u64 remote_amos_page_pa; /* phys addr of partition's amos page */ unsigned long remote_amos_page_pa; /* paddr of partition's amos page */
int activate_IRQ_nasid; /* active partition's act/deact nasid */ int activate_IRQ_nasid; /* active partition's act/deact nasid */
int activate_IRQ_phys_cpuid; /* active part's act/deact phys cpuid */ int activate_IRQ_phys_cpuid; /* active part's act/deact phys cpuid */
u64 remote_vars_pa; /* phys addr of partition's vars */ unsigned long remote_vars_pa; /* phys addr of partition's vars */
u64 remote_vars_part_pa; /* phys addr of partition's vars part */ unsigned long remote_vars_part_pa; /* paddr of partition's vars part */
u8 remote_vars_version; /* version# of partition's vars */ u8 remote_vars_version; /* version# of partition's vars */
void *local_GPs_base; /* base address of kmalloc'd space */ void *local_GPs_base; /* base address of kmalloc'd space */
...@@ -504,10 +504,10 @@ struct xpc_partition_sn2 { ...@@ -504,10 +504,10 @@ struct xpc_partition_sn2 {
void *remote_GPs_base; /* base address of kmalloc'd space */ void *remote_GPs_base; /* base address of kmalloc'd space */
struct xpc_gp_sn2 *remote_GPs; /* copy of remote partition's local */ struct xpc_gp_sn2 *remote_GPs; /* copy of remote partition's local */
/* Get/Put values */ /* Get/Put values */
u64 remote_GPs_pa; /* phys address of remote partition's local */ unsigned long remote_GPs_pa; /* phys addr of remote partition's local */
/* Get/Put values */ /* Get/Put values */
u64 remote_openclose_args_pa; /* phys addr of remote's args */ unsigned long remote_openclose_args_pa; /* phys addr of remote's args */
int notify_IRQ_nasid; /* nasid of where to send notify IRQs */ int notify_IRQ_nasid; /* nasid of where to send notify IRQs */
int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */ int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */
...@@ -529,7 +529,7 @@ struct xpc_partition { ...@@ -529,7 +529,7 @@ struct xpc_partition {
u8 remote_rp_version; /* version# of partition's rsvd pg */ u8 remote_rp_version; /* version# of partition's rsvd pg */
unsigned long remote_rp_ts_jiffies; /* timestamp when rsvd pg setup */ unsigned long remote_rp_ts_jiffies; /* timestamp when rsvd pg setup */
u64 remote_rp_pa; /* phys addr of partition's rsvd pg */ unsigned long remote_rp_pa; /* phys addr of partition's rsvd pg */
u64 last_heartbeat; /* HB at last read */ u64 last_heartbeat; /* HB at last read */
u32 activate_IRQ_rcvd; /* IRQs since activation */ u32 activate_IRQ_rcvd; /* IRQs since activation */
spinlock_t act_lock; /* protect updating of act_state */ spinlock_t act_lock; /* protect updating of act_state */
...@@ -623,7 +623,8 @@ extern void xpc_activate_partition(struct xpc_partition *); ...@@ -623,7 +623,8 @@ extern void xpc_activate_partition(struct xpc_partition *);
extern void xpc_activate_kthreads(struct xpc_channel *, int); extern void xpc_activate_kthreads(struct xpc_channel *, int);
extern void xpc_create_kthreads(struct xpc_channel *, int, int); extern void xpc_create_kthreads(struct xpc_channel *, int, int);
extern void xpc_disconnect_wait(int); extern void xpc_disconnect_wait(int);
extern enum xp_retval (*xpc_get_partition_rsvd_page_pa) (u64, u64 *, u64 *, extern enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *, u64 *,
unsigned long *,
size_t *); size_t *);
extern enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *); extern enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *);
extern void (*xpc_heartbeat_init) (void); extern void (*xpc_heartbeat_init) (void);
...@@ -640,8 +641,8 @@ extern void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *); ...@@ -640,8 +641,8 @@ extern void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *);
extern void (*xpc_process_msg_chctl_flags) (struct xpc_partition *, int); extern void (*xpc_process_msg_chctl_flags) (struct xpc_partition *, int);
extern int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *); extern int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *);
extern struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *); extern struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *);
extern void (*xpc_request_partition_activation) (struct xpc_rsvd_page *, u64, extern void (*xpc_request_partition_activation) (struct xpc_rsvd_page *,
int); unsigned long, int);
extern void (*xpc_request_partition_reactivation) (struct xpc_partition *); extern void (*xpc_request_partition_reactivation) (struct xpc_partition *);
extern void (*xpc_request_partition_deactivation) (struct xpc_partition *); extern void (*xpc_request_partition_deactivation) (struct xpc_partition *);
extern void (*xpc_cancel_partition_deactivation_request) ( extern void (*xpc_cancel_partition_deactivation_request) (
...@@ -690,7 +691,8 @@ extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *); ...@@ -690,7 +691,8 @@ extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *);
extern void xpc_mark_partition_inactive(struct xpc_partition *); extern void xpc_mark_partition_inactive(struct xpc_partition *);
extern void xpc_discovery(void); extern void xpc_discovery(void);
extern enum xp_retval xpc_get_remote_rp(int, unsigned long *, extern enum xp_retval xpc_get_remote_rp(int, unsigned long *,
struct xpc_rsvd_page *, u64 *); struct xpc_rsvd_page *,
unsigned long *);
extern void xpc_deactivate_partition(const int, struct xpc_partition *, extern void xpc_deactivate_partition(const int, struct xpc_partition *,
enum xp_retval); enum xp_retval);
extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *); extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *);
......
...@@ -366,9 +366,8 @@ xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number, ...@@ -366,9 +366,8 @@ xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number,
dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa=" dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa="
"0x%lx, local_nentries=%d, remote_nentries=%d) " "0x%lx, local_nentries=%d, remote_nentries=%d) "
"received from partid=%d, channel=%d\n", "received from partid=%d, channel=%d\n",
(unsigned long)args->local_msgqueue_pa, args->local_msgqueue_pa, args->local_nentries,
args->local_nentries, args->remote_nentries, args->remote_nentries, ch->partid, ch->number);
ch->partid, ch->number);
if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
spin_unlock_irqrestore(&ch->lock, irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags);
......
...@@ -169,8 +169,9 @@ static struct notifier_block xpc_die_notifier = { ...@@ -169,8 +169,9 @@ static struct notifier_block xpc_die_notifier = {
.notifier_call = xpc_system_die, .notifier_call = xpc_system_die,
}; };
enum xp_retval (*xpc_get_partition_rsvd_page_pa) (u64 buf, u64 *cookie, enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *buf, u64 *cookie,
u64 *paddr, size_t *len); unsigned long *rp_pa,
size_t *len);
enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *rp); enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *rp);
void (*xpc_heartbeat_init) (void); void (*xpc_heartbeat_init) (void);
void (*xpc_heartbeat_exit) (void); void (*xpc_heartbeat_exit) (void);
...@@ -189,7 +190,8 @@ int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *ch); ...@@ -189,7 +190,8 @@ int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *ch);
struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch); struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch);
void (*xpc_request_partition_activation) (struct xpc_rsvd_page *remote_rp, void (*xpc_request_partition_activation) (struct xpc_rsvd_page *remote_rp,
u64 remote_rp_pa, int nasid); unsigned long remote_rp_pa,
int nasid);
void (*xpc_request_partition_reactivation) (struct xpc_partition *part); void (*xpc_request_partition_reactivation) (struct xpc_partition *part);
void (*xpc_request_partition_deactivation) (struct xpc_partition *part); void (*xpc_request_partition_deactivation) (struct xpc_partition *part);
void (*xpc_cancel_partition_deactivation_request) (struct xpc_partition *part); void (*xpc_cancel_partition_deactivation_request) (struct xpc_partition *part);
......
...@@ -60,15 +60,15 @@ xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) ...@@ -60,15 +60,15 @@ xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
* Given a nasid, get the physical address of the partition's reserved page * Given a nasid, get the physical address of the partition's reserved page
* for that nasid. This function returns 0 on any error. * for that nasid. This function returns 0 on any error.
*/ */
static u64 static unsigned long
xpc_get_rsvd_page_pa(int nasid) xpc_get_rsvd_page_pa(int nasid)
{ {
enum xp_retval ret; enum xp_retval ret;
u64 cookie = 0; u64 cookie = 0;
u64 rp_pa = nasid; /* seed with nasid */ unsigned long rp_pa = nasid; /* seed with nasid */
size_t len = 0; size_t len = 0;
u64 buf = buf; size_t buf_len = 0;
u64 buf_len = 0; void *buf = buf;
void *buf_base = NULL; void *buf_base = NULL;
while (1) { while (1) {
...@@ -78,7 +78,7 @@ xpc_get_rsvd_page_pa(int nasid) ...@@ -78,7 +78,7 @@ xpc_get_rsvd_page_pa(int nasid)
dev_dbg(xpc_part, "SAL returned with ret=%d, cookie=0x%016lx, " dev_dbg(xpc_part, "SAL returned with ret=%d, cookie=0x%016lx, "
"address=0x%016lx, len=0x%016lx\n", ret, "address=0x%016lx, len=0x%016lx\n", ret,
(unsigned long)cookie, (unsigned long)rp_pa, len); (unsigned long)cookie, rp_pa, len);
if (ret != xpNeedMoreInfo) if (ret != xpNeedMoreInfo)
break; break;
...@@ -87,19 +87,17 @@ xpc_get_rsvd_page_pa(int nasid) ...@@ -87,19 +87,17 @@ xpc_get_rsvd_page_pa(int nasid)
if (L1_CACHE_ALIGN(len) > buf_len) { if (L1_CACHE_ALIGN(len) > buf_len) {
kfree(buf_base); kfree(buf_base);
buf_len = L1_CACHE_ALIGN(len); buf_len = L1_CACHE_ALIGN(len);
buf = (u64)xpc_kmalloc_cacheline_aligned(buf_len, buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
GFP_KERNEL, &buf_base);
&buf_base);
if (buf_base == NULL) { if (buf_base == NULL) {
dev_err(xpc_part, "unable to kmalloc " dev_err(xpc_part, "unable to kmalloc "
"len=0x%016lx\n", "len=0x%016lx\n", buf_len);
(unsigned long)buf_len);
ret = xpNoMemory; ret = xpNoMemory;
break; break;
} }
} }
ret = xp_remote_memcpy((void *)buf, (void *)rp_pa, buf_len); ret = xp_remote_memcpy(xp_pa(buf), rp_pa, buf_len);
if (ret != xpSuccess) { if (ret != xpSuccess) {
dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret); dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret);
break; break;
...@@ -111,8 +109,7 @@ xpc_get_rsvd_page_pa(int nasid) ...@@ -111,8 +109,7 @@ xpc_get_rsvd_page_pa(int nasid)
if (ret != xpSuccess) if (ret != xpSuccess)
rp_pa = 0; rp_pa = 0;
dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
(unsigned long)rp_pa);
return rp_pa; return rp_pa;
} }
...@@ -125,7 +122,7 @@ struct xpc_rsvd_page * ...@@ -125,7 +122,7 @@ struct xpc_rsvd_page *
xpc_setup_rsvd_page(void) xpc_setup_rsvd_page(void)
{ {
struct xpc_rsvd_page *rp; struct xpc_rsvd_page *rp;
u64 rp_pa; unsigned long rp_pa;
unsigned long new_ts_jiffies; unsigned long new_ts_jiffies;
/* get the local reserved page's address */ /* get the local reserved page's address */
...@@ -193,7 +190,7 @@ xpc_setup_rsvd_page(void) ...@@ -193,7 +190,7 @@ xpc_setup_rsvd_page(void)
*/ */
enum xp_retval enum xp_retval
xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids, xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids,
struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa) struct xpc_rsvd_page *remote_rp, unsigned long *remote_rp_pa)
{ {
int l; int l;
enum xp_retval ret; enum xp_retval ret;
...@@ -205,7 +202,7 @@ xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids, ...@@ -205,7 +202,7 @@ xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids,
return xpNoRsvdPageAddr; return xpNoRsvdPageAddr;
/* pull over the reserved page header and part_nasids mask */ /* pull over the reserved page header and part_nasids mask */
ret = xp_remote_memcpy(remote_rp, (void *)*remote_rp_pa, ret = xp_remote_memcpy(xp_pa(remote_rp), *remote_rp_pa,
XPC_RP_HEADER_SIZE + xpc_nasid_mask_nbytes); XPC_RP_HEADER_SIZE + xpc_nasid_mask_nbytes);
if (ret != xpSuccess) if (ret != xpSuccess)
return ret; return ret;
...@@ -389,7 +386,7 @@ xpc_discovery(void) ...@@ -389,7 +386,7 @@ xpc_discovery(void)
{ {
void *remote_rp_base; void *remote_rp_base;
struct xpc_rsvd_page *remote_rp; struct xpc_rsvd_page *remote_rp;
u64 remote_rp_pa; unsigned long remote_rp_pa;
int region; int region;
int region_size; int region_size;
int max_regions; int max_regions;
...@@ -500,7 +497,7 @@ enum xp_retval ...@@ -500,7 +497,7 @@ enum xp_retval
xpc_initiate_partid_to_nasids(short partid, void *nasid_mask) xpc_initiate_partid_to_nasids(short partid, void *nasid_mask)
{ {
struct xpc_partition *part; struct xpc_partition *part;
u64 part_nasid_pa; unsigned long part_nasid_pa;
part = &xpc_partitions[partid]; part = &xpc_partitions[partid];
if (part->remote_rp_pa == 0) if (part->remote_rp_pa == 0)
...@@ -508,8 +505,8 @@ xpc_initiate_partid_to_nasids(short partid, void *nasid_mask) ...@@ -508,8 +505,8 @@ xpc_initiate_partid_to_nasids(short partid, void *nasid_mask)
memset(nasid_mask, 0, xpc_nasid_mask_nbytes); memset(nasid_mask, 0, xpc_nasid_mask_nbytes);
part_nasid_pa = (u64)XPC_RP_PART_NASIDS(part->remote_rp_pa); part_nasid_pa = (unsigned long)XPC_RP_PART_NASIDS(part->remote_rp_pa);
return xp_remote_memcpy(nasid_mask, (void *)part_nasid_pa, return xp_remote_memcpy(xp_pa(nasid_mask), part_nasid_pa,
xpc_nasid_mask_nbytes); xpc_nasid_mask_nbytes);
} }
...@@ -207,8 +207,8 @@ xpc_handle_activate_IRQ_sn2(int irq, void *dev_id) ...@@ -207,8 +207,8 @@ xpc_handle_activate_IRQ_sn2(int irq, void *dev_id)
* Flag the appropriate amo variable and send an IRQ to the specified node. * Flag the appropriate amo variable and send an IRQ to the specified node.
*/ */
static void static void
xpc_send_activate_IRQ_sn2(u64 amos_page_pa, int from_nasid, int to_nasid, xpc_send_activate_IRQ_sn2(unsigned long amos_page_pa, int from_nasid,
int to_phys_cpuid) int to_nasid, int to_phys_cpuid)
{ {
struct amo *amos = (struct amo *)__va(amos_page_pa + struct amo *amos = (struct amo *)__va(amos_page_pa +
(XPC_ACTIVATE_IRQ_AMOS_SN2 * (XPC_ACTIVATE_IRQ_AMOS_SN2 *
...@@ -404,7 +404,7 @@ xpc_send_chctl_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags) ...@@ -404,7 +404,7 @@ xpc_send_chctl_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
args->remote_nentries = ch->remote_nentries; args->remote_nentries = ch->remote_nentries;
args->local_nentries = ch->local_nentries; args->local_nentries = ch->local_nentries;
args->local_msgqueue_pa = __pa(ch->local_msgqueue); args->local_msgqueue_pa = xp_pa(ch->local_msgqueue);
XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREPLY, irq_flags); XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREPLY, irq_flags);
} }
...@@ -577,13 +577,13 @@ xpc_allow_amo_ops_shub_wars_1_1_sn2(void) ...@@ -577,13 +577,13 @@ xpc_allow_amo_ops_shub_wars_1_1_sn2(void)
} }
static enum xp_retval static enum xp_retval
xpc_get_partition_rsvd_page_pa_sn2(u64 buf, u64 *cookie, u64 *paddr, xpc_get_partition_rsvd_page_pa_sn2(void *buf, u64 *cookie, unsigned long *rp_pa,
size_t *len) size_t *len)
{ {
s64 status; s64 status;
enum xp_retval ret; enum xp_retval ret;
status = sn_partition_reserved_page_pa(buf, cookie, paddr, len); status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
if (status == SALRET_OK) if (status == SALRET_OK)
ret = xpSuccess; ret = xpSuccess;
else if (status == SALRET_MORE_PASSES) else if (status == SALRET_MORE_PASSES)
...@@ -604,7 +604,7 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) ...@@ -604,7 +604,7 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp)
xpc_vars_sn2 = XPC_RP_VARS(rp); xpc_vars_sn2 = XPC_RP_VARS(rp);
rp->sn.vars_pa = __pa(xpc_vars_sn2); rp->sn.vars_pa = xp_pa(xpc_vars_sn2);
/* vars_part array follows immediately after vars */ /* vars_part array follows immediately after vars */
xpc_vars_part_sn2 = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) + xpc_vars_part_sn2 = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) +
...@@ -649,7 +649,7 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) ...@@ -649,7 +649,7 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp)
xpc_vars_sn2->version = XPC_V_VERSION; xpc_vars_sn2->version = XPC_V_VERSION;
xpc_vars_sn2->activate_IRQ_nasid = cpuid_to_nasid(0); xpc_vars_sn2->activate_IRQ_nasid = cpuid_to_nasid(0);
xpc_vars_sn2->activate_IRQ_phys_cpuid = cpu_physical_id(0); xpc_vars_sn2->activate_IRQ_phys_cpuid = cpu_physical_id(0);
xpc_vars_sn2->vars_part_pa = __pa(xpc_vars_part_sn2); xpc_vars_sn2->vars_part_pa = xp_pa(xpc_vars_part_sn2);
xpc_vars_sn2->amos_page_pa = ia64_tpa((u64)amos_page); xpc_vars_sn2->amos_page_pa = ia64_tpa((u64)amos_page);
xpc_vars_sn2->amos_page = amos_page; /* save for next load of XPC */ xpc_vars_sn2->amos_page = amos_page; /* save for next load of XPC */
...@@ -734,8 +734,8 @@ xpc_check_remote_hb_sn2(void) ...@@ -734,8 +734,8 @@ xpc_check_remote_hb_sn2(void)
} }
/* pull the remote_hb cache line */ /* pull the remote_hb cache line */
ret = xp_remote_memcpy(remote_vars, ret = xp_remote_memcpy(xp_pa(remote_vars),
(void *)part->sn.sn2.remote_vars_pa, part->sn.sn2.remote_vars_pa,
XPC_RP_VARS_SIZE); XPC_RP_VARS_SIZE);
if (ret != xpSuccess) { if (ret != xpSuccess) {
XPC_DEACTIVATE_PARTITION(part, ret); XPC_DEACTIVATE_PARTITION(part, ret);
...@@ -768,7 +768,8 @@ xpc_check_remote_hb_sn2(void) ...@@ -768,7 +768,8 @@ xpc_check_remote_hb_sn2(void)
* assumed to be of size XPC_RP_VARS_SIZE. * assumed to be of size XPC_RP_VARS_SIZE.
*/ */
static enum xp_retval static enum xp_retval
xpc_get_remote_vars_sn2(u64 remote_vars_pa, struct xpc_vars_sn2 *remote_vars) xpc_get_remote_vars_sn2(unsigned long remote_vars_pa,
struct xpc_vars_sn2 *remote_vars)
{ {
enum xp_retval ret; enum xp_retval ret;
...@@ -776,7 +777,7 @@ xpc_get_remote_vars_sn2(u64 remote_vars_pa, struct xpc_vars_sn2 *remote_vars) ...@@ -776,7 +777,7 @@ xpc_get_remote_vars_sn2(u64 remote_vars_pa, struct xpc_vars_sn2 *remote_vars)
return xpVarsNotSet; return xpVarsNotSet;
/* pull over the cross partition variables */ /* pull over the cross partition variables */
ret = xp_remote_memcpy(remote_vars, (void *)remote_vars_pa, ret = xp_remote_memcpy(xp_pa(remote_vars), remote_vars_pa,
XPC_RP_VARS_SIZE); XPC_RP_VARS_SIZE);
if (ret != xpSuccess) if (ret != xpSuccess)
return ret; return ret;
...@@ -791,7 +792,7 @@ xpc_get_remote_vars_sn2(u64 remote_vars_pa, struct xpc_vars_sn2 *remote_vars) ...@@ -791,7 +792,7 @@ xpc_get_remote_vars_sn2(u64 remote_vars_pa, struct xpc_vars_sn2 *remote_vars)
static void static void
xpc_request_partition_activation_sn2(struct xpc_rsvd_page *remote_rp, xpc_request_partition_activation_sn2(struct xpc_rsvd_page *remote_rp,
u64 remote_rp_pa, int nasid) unsigned long remote_rp_pa, int nasid)
{ {
xpc_send_local_activate_IRQ_sn2(nasid); xpc_send_local_activate_IRQ_sn2(nasid);
} }
...@@ -883,7 +884,8 @@ xpc_partition_deactivation_requested_sn2(short partid) ...@@ -883,7 +884,8 @@ xpc_partition_deactivation_requested_sn2(short partid)
static void static void
xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version, xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version,
unsigned long *remote_rp_ts_jiffies, unsigned long *remote_rp_ts_jiffies,
u64 remote_rp_pa, u64 remote_vars_pa, unsigned long remote_rp_pa,
unsigned long remote_vars_pa,
struct xpc_vars_sn2 *remote_vars) struct xpc_vars_sn2 *remote_vars)
{ {
struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
...@@ -948,8 +950,8 @@ xpc_identify_activate_IRQ_req_sn2(int nasid) ...@@ -948,8 +950,8 @@ xpc_identify_activate_IRQ_req_sn2(int nasid)
{ {
struct xpc_rsvd_page *remote_rp; struct xpc_rsvd_page *remote_rp;
struct xpc_vars_sn2 *remote_vars; struct xpc_vars_sn2 *remote_vars;
u64 remote_rp_pa; unsigned long remote_rp_pa;
u64 remote_vars_pa; unsigned long remote_vars_pa;
int remote_rp_version; int remote_rp_version;
int reactivate = 0; int reactivate = 0;
unsigned long remote_rp_ts_jiffies = 0; unsigned long remote_rp_ts_jiffies = 0;
...@@ -1291,11 +1293,11 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part) ...@@ -1291,11 +1293,11 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part)
* The setting of the magic # indicates that these per partition * The setting of the magic # indicates that these per partition
* specific variables are ready to be used. * specific variables are ready to be used.
*/ */
xpc_vars_part_sn2[partid].GPs_pa = __pa(part_sn2->local_GPs); xpc_vars_part_sn2[partid].GPs_pa = xp_pa(part_sn2->local_GPs);
xpc_vars_part_sn2[partid].openclose_args_pa = xpc_vars_part_sn2[partid].openclose_args_pa =
__pa(part->local_openclose_args); xp_pa(part->local_openclose_args);
xpc_vars_part_sn2[partid].chctl_amo_pa = xpc_vars_part_sn2[partid].chctl_amo_pa =
__pa(part_sn2->local_chctl_amo_va); xp_pa(part_sn2->local_chctl_amo_va);
cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */
xpc_vars_part_sn2[partid].notify_IRQ_nasid = cpuid_to_nasid(cpuid); xpc_vars_part_sn2[partid].notify_IRQ_nasid = cpuid_to_nasid(cpuid);
xpc_vars_part_sn2[partid].notify_IRQ_phys_cpuid = xpc_vars_part_sn2[partid].notify_IRQ_phys_cpuid =
...@@ -1382,25 +1384,25 @@ xpc_teardown_infrastructure_sn2(struct xpc_partition *part) ...@@ -1382,25 +1384,25 @@ xpc_teardown_infrastructure_sn2(struct xpc_partition *part)
* Create a wrapper that hides the underlying mechanism for pulling a cacheline * Create a wrapper that hides the underlying mechanism for pulling a cacheline
* (or multiple cachelines) from a remote partition. * (or multiple cachelines) from a remote partition.
* *
* src must be a cacheline aligned physical address on the remote partition. * src_pa must be a cacheline aligned physical address on the remote partition.
* dst must be a cacheline aligned virtual address on this partition. * dst must be a cacheline aligned virtual address on this partition.
* cnt must be cacheline sized * cnt must be cacheline sized
*/ */
/* ??? Replace this function by call to xp_remote_memcpy() or bte_copy()? */ /* ??? Replace this function by call to xp_remote_memcpy() or bte_copy()? */
static enum xp_retval static enum xp_retval
xpc_pull_remote_cachelines_sn2(struct xpc_partition *part, void *dst, xpc_pull_remote_cachelines_sn2(struct xpc_partition *part, void *dst,
const void *src, size_t cnt) const unsigned long src_pa, size_t cnt)
{ {
enum xp_retval ret; enum xp_retval ret;
DBUG_ON((u64)src != L1_CACHE_ALIGN((u64)src)); DBUG_ON(src_pa != L1_CACHE_ALIGN(src_pa));
DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst)); DBUG_ON((unsigned long)dst != L1_CACHE_ALIGN((unsigned long)dst));
DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
if (part->act_state == XPC_P_DEACTIVATING) if (part->act_state == XPC_P_DEACTIVATING)
return part->reason; return part->reason;
ret = xp_remote_memcpy(dst, src, cnt); ret = xp_remote_memcpy(xp_pa(dst), src_pa, cnt);
if (ret != xpSuccess) { if (ret != xpSuccess) {
dev_dbg(xpc_chan, "xp_remote_memcpy() from partition %d failed," dev_dbg(xpc_chan, "xp_remote_memcpy() from partition %d failed,"
" ret=%d\n", XPC_PARTID(part), ret); " ret=%d\n", XPC_PARTID(part), ret);
...@@ -1420,7 +1422,8 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part) ...@@ -1420,7 +1422,8 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part)
struct xpc_vars_part_sn2 *pulled_entry_cacheline = struct xpc_vars_part_sn2 *pulled_entry_cacheline =
(struct xpc_vars_part_sn2 *)L1_CACHE_ALIGN((u64)buffer); (struct xpc_vars_part_sn2 *)L1_CACHE_ALIGN((u64)buffer);
struct xpc_vars_part_sn2 *pulled_entry; struct xpc_vars_part_sn2 *pulled_entry;
u64 remote_entry_cacheline_pa, remote_entry_pa; unsigned long remote_entry_cacheline_pa;
unsigned long remote_entry_pa;
short partid = XPC_PARTID(part); short partid = XPC_PARTID(part);
enum xp_retval ret; enum xp_retval ret;
...@@ -1440,7 +1443,7 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part) ...@@ -1440,7 +1443,7 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part)
(L1_CACHE_BYTES - 1))); (L1_CACHE_BYTES - 1)));
ret = xpc_pull_remote_cachelines_sn2(part, pulled_entry_cacheline, ret = xpc_pull_remote_cachelines_sn2(part, pulled_entry_cacheline,
(void *)remote_entry_cacheline_pa, remote_entry_cacheline_pa,
L1_CACHE_BYTES); L1_CACHE_BYTES);
if (ret != xpSuccess) { if (ret != xpSuccess) {
dev_dbg(xpc_chan, "failed to pull XPC vars_part from " dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
...@@ -1587,7 +1590,7 @@ xpc_get_chctl_all_flags_sn2(struct xpc_partition *part) ...@@ -1587,7 +1590,7 @@ xpc_get_chctl_all_flags_sn2(struct xpc_partition *part)
if (xpc_any_openclose_chctl_flags_set(&chctl)) { if (xpc_any_openclose_chctl_flags_set(&chctl)) {
ret = xpc_pull_remote_cachelines_sn2(part, part-> ret = xpc_pull_remote_cachelines_sn2(part, part->
remote_openclose_args, remote_openclose_args,
(void *)part_sn2-> part_sn2->
remote_openclose_args_pa, remote_openclose_args_pa,
XPC_OPENCLOSE_ARGS_SIZE); XPC_OPENCLOSE_ARGS_SIZE);
if (ret != xpSuccess) { if (ret != xpSuccess) {
...@@ -1604,7 +1607,7 @@ xpc_get_chctl_all_flags_sn2(struct xpc_partition *part) ...@@ -1604,7 +1607,7 @@ xpc_get_chctl_all_flags_sn2(struct xpc_partition *part)
if (xpc_any_msg_chctl_flags_set(&chctl)) { if (xpc_any_msg_chctl_flags_set(&chctl)) {
ret = xpc_pull_remote_cachelines_sn2(part, part_sn2->remote_GPs, ret = xpc_pull_remote_cachelines_sn2(part, part_sn2->remote_GPs,
(void *)part_sn2->remote_GPs_pa, part_sn2->remote_GPs_pa,
XPC_GP_SIZE); XPC_GP_SIZE);
if (ret != xpSuccess) { if (ret != xpSuccess) {
XPC_DEACTIVATE_PARTITION(part, ret); XPC_DEACTIVATE_PARTITION(part, ret);
...@@ -1971,8 +1974,10 @@ xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get) ...@@ -1971,8 +1974,10 @@ xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get)
{ {
struct xpc_partition *part = &xpc_partitions[ch->partid]; struct xpc_partition *part = &xpc_partitions[ch->partid];
struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
struct xpc_msg *remote_msg, *msg; unsigned long remote_msg_pa;
u32 msg_index, nmsgs; struct xpc_msg *msg;
u32 msg_index;
u32 nmsgs;
u64 msg_offset; u64 msg_offset;
enum xp_retval ret; enum xp_retval ret;
...@@ -1996,10 +2001,9 @@ xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get) ...@@ -1996,10 +2001,9 @@ xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get)
msg_offset = msg_index * ch->msg_size; msg_offset = msg_index * ch->msg_size;
msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset); msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset);
remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa + remote_msg_pa = ch->remote_msgqueue_pa + msg_offset;
msg_offset);
ret = xpc_pull_remote_cachelines_sn2(part, msg, remote_msg, ret = xpc_pull_remote_cachelines_sn2(part, msg, remote_msg_pa,
nmsgs * ch->msg_size); nmsgs * ch->msg_size);
if (ret != xpSuccess) { if (ret != xpSuccess) {
......
...@@ -61,7 +61,7 @@ xpc_heartbeat_exit_uv(void) ...@@ -61,7 +61,7 @@ xpc_heartbeat_exit_uv(void)
static void static void
xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
u64 remote_rp_pa, int nasid) unsigned long remote_rp_pa, int nasid)
{ {
short partid = remote_rp->SAL_partid; short partid = remote_rp->SAL_partid;
struct xpc_partition *part = &xpc_partitions[partid]; struct xpc_partition *part = &xpc_partitions[partid];
......
...@@ -44,7 +44,7 @@ struct xpnet_message { ...@@ -44,7 +44,7 @@ struct xpnet_message {
u16 version; /* Version for this message */ u16 version; /* Version for this message */
u16 embedded_bytes; /* #of bytes embedded in XPC message */ u16 embedded_bytes; /* #of bytes embedded in XPC message */
u32 magic; /* Special number indicating this is xpnet */ u32 magic; /* Special number indicating this is xpnet */
u64 buf_pa; /* phys address of buffer to retrieve */ unsigned long buf_pa; /* phys address of buffer to retrieve */
u32 size; /* #of bytes in buffer */ u32 size; /* #of bytes in buffer */
u8 leadin_ignore; /* #of bytes to ignore at the beginning */ u8 leadin_ignore; /* #of bytes to ignore at the beginning */
u8 tailout_ignore; /* #of bytes to ignore at the end */ u8 tailout_ignore; /* #of bytes to ignore at the end */
...@@ -152,6 +152,7 @@ static void ...@@ -152,6 +152,7 @@ static void
xpnet_receive(short partid, int channel, struct xpnet_message *msg) xpnet_receive(short partid, int channel, struct xpnet_message *msg)
{ {
struct sk_buff *skb; struct sk_buff *skb;
void *dst;
enum xp_retval ret; enum xp_retval ret;
struct xpnet_dev_private *priv = struct xpnet_dev_private *priv =
(struct xpnet_dev_private *)xpnet_device->priv; (struct xpnet_dev_private *)xpnet_device->priv;
...@@ -166,9 +167,8 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg) ...@@ -166,9 +167,8 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg)
return; return;
} }
dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size,
(unsigned long)msg->buf_pa, msg->size, msg->leadin_ignore, msg->leadin_ignore, msg->tailout_ignore);
msg->tailout_ignore);
/* reserve an extra cache line */ /* reserve an extra cache line */
skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES); skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES);
...@@ -210,15 +210,12 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg) ...@@ -210,15 +210,12 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg)
skb_copy_to_linear_data(skb, &msg->data, skb_copy_to_linear_data(skb, &msg->data,
(size_t)msg->embedded_bytes); (size_t)msg->embedded_bytes);
} else { } else {
dst = (void *)((u64)skb->data & ~(L1_CACHE_BYTES - 1));
dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t" dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t"
"xp_remote_memcpy(0x%p, 0x%p, %hu)\n", (void *) "xp_remote_memcpy(0x%p, 0x%p, %hu)\n", dst,
((u64)skb->data & ~(L1_CACHE_BYTES - 1)),
(void *)msg->buf_pa, msg->size); (void *)msg->buf_pa, msg->size);
ret = xp_remote_memcpy((void *)((u64)skb->data & ret = xp_remote_memcpy(xp_pa(dst), msg->buf_pa, msg->size);
~(L1_CACHE_BYTES - 1)),
(void *)msg->buf_pa, msg->size);
if (ret != xpSuccess) { if (ret != xpSuccess) {
/* /*
* !!! Need better way of cleaning skb. Currently skb * !!! Need better way of cleaning skb. Currently skb
...@@ -226,8 +223,7 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg) ...@@ -226,8 +223,7 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg)
* !!! dev_kfree_skb. * !!! dev_kfree_skb.
*/ */
dev_err(xpnet, "xp_remote_memcpy(0x%p, 0x%p, 0x%hx) " dev_err(xpnet, "xp_remote_memcpy(0x%p, 0x%p, 0x%hx) "
"returned error=0x%x\n", (void *) "returned error=0x%x\n", dst,
((u64)skb->data & ~(L1_CACHE_BYTES - 1)),
(void *)msg->buf_pa, msg->size, ret); (void *)msg->buf_pa, msg->size, ret);
xpc_received(partid, channel, (void *)msg); xpc_received(partid, channel, (void *)msg);
...@@ -428,13 +424,13 @@ xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg, ...@@ -428,13 +424,13 @@ xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg,
msg->size = end_addr - start_addr; msg->size = end_addr - start_addr;
msg->leadin_ignore = (u64)skb->data - start_addr; msg->leadin_ignore = (u64)skb->data - start_addr;
msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb); msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb);
msg->buf_pa = __pa(start_addr); msg->buf_pa = xp_pa((void *)start_addr);
dev_dbg(xpnet, "sending XPC message to %d:%d\n" dev_dbg(xpnet, "sending XPC message to %d:%d\n"
KERN_DEBUG "msg->buf_pa=0x%lx, msg->size=%u, " KERN_DEBUG "msg->buf_pa=0x%lx, msg->size=%u, "
"msg->leadin_ignore=%u, msg->tailout_ignore=%u\n", "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n",
dest_partid, XPC_NET_CHANNEL, (unsigned long)msg->buf_pa, dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size,
msg->size, msg->leadin_ignore, msg->tailout_ignore); msg->leadin_ignore, msg->tailout_ignore);
atomic_inc(&queued_msg->use_count); atomic_inc(&queued_msg->use_count);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment