Commit 133a58c1 authored by Tony Luck's avatar Tony Luck

Pull sn2-reduce-kmalloc-wrap into release branch

parents dc5cdd8e 7aa6ba41
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/completion.h> #include <linux/completion.h>
#include <asm/sn/bte.h> #include <asm/sn/bte.h>
...@@ -29,6 +28,31 @@ ...@@ -29,6 +28,31 @@
#include <asm/sn/xpc.h> #include <asm/sn/xpc.h>
/*
* Guarantee that the kzalloc'd memory is cacheline aligned.
*/
static void *
xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
{
/* see if kzalloc will give us cachline aligned memory by default */
*base = kzalloc(size, flags);
if (*base == NULL) {
return NULL;
}
if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) {
return *base;
}
kfree(*base);
/* nope, we'll have to do it ourselves */
*base = kzalloc(size + L1_CACHE_BYTES, flags);
if (*base == NULL) {
return NULL;
}
return (void *) L1_CACHE_ALIGN((u64) *base);
}
/* /*
* Set up the initial values for the XPartition Communication channels. * Set up the initial values for the XPartition Communication channels.
*/ */
...@@ -93,20 +117,19 @@ xpc_setup_infrastructure(struct xpc_partition *part) ...@@ -93,20 +117,19 @@ xpc_setup_infrastructure(struct xpc_partition *part)
* Allocate all of the channel structures as a contiguous chunk of * Allocate all of the channel structures as a contiguous chunk of
* memory. * memory.
*/ */
part->channels = kmalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS, part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS,
GFP_KERNEL); GFP_KERNEL);
if (part->channels == NULL) { if (part->channels == NULL) {
dev_err(xpc_chan, "can't get memory for channels\n"); dev_err(xpc_chan, "can't get memory for channels\n");
return xpcNoMemory; return xpcNoMemory;
} }
memset(part->channels, 0, sizeof(struct xpc_channel) * XPC_NCHANNELS);
part->nchannels = XPC_NCHANNELS; part->nchannels = XPC_NCHANNELS;
/* allocate all the required GET/PUT values */ /* allocate all the required GET/PUT values */
part->local_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE, part->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
GFP_KERNEL, &part->local_GPs_base); GFP_KERNEL, &part->local_GPs_base);
if (part->local_GPs == NULL) { if (part->local_GPs == NULL) {
kfree(part->channels); kfree(part->channels);
...@@ -115,55 +138,51 @@ xpc_setup_infrastructure(struct xpc_partition *part) ...@@ -115,55 +138,51 @@ xpc_setup_infrastructure(struct xpc_partition *part)
"values\n"); "values\n");
return xpcNoMemory; return xpcNoMemory;
} }
memset(part->local_GPs, 0, XPC_GP_SIZE);
part->remote_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE, part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
GFP_KERNEL, &part->remote_GPs_base); GFP_KERNEL, &part->remote_GPs_base);
if (part->remote_GPs == NULL) { if (part->remote_GPs == NULL) {
kfree(part->channels);
part->channels = NULL;
kfree(part->local_GPs_base);
part->local_GPs = NULL;
dev_err(xpc_chan, "can't get memory for remote get/put " dev_err(xpc_chan, "can't get memory for remote get/put "
"values\n"); "values\n");
kfree(part->local_GPs_base);
part->local_GPs = NULL;
kfree(part->channels);
part->channels = NULL;
return xpcNoMemory; return xpcNoMemory;
} }
memset(part->remote_GPs, 0, XPC_GP_SIZE);
/* allocate all the required open and close args */ /* allocate all the required open and close args */
part->local_openclose_args = xpc_kmalloc_cacheline_aligned( part->local_openclose_args = xpc_kzalloc_cacheline_aligned(
XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
&part->local_openclose_args_base); &part->local_openclose_args_base);
if (part->local_openclose_args == NULL) { if (part->local_openclose_args == NULL) {
kfree(part->channels); dev_err(xpc_chan, "can't get memory for local connect args\n");
part->channels = NULL;
kfree(part->local_GPs_base);
part->local_GPs = NULL;
kfree(part->remote_GPs_base); kfree(part->remote_GPs_base);
part->remote_GPs = NULL; part->remote_GPs = NULL;
dev_err(xpc_chan, "can't get memory for local connect args\n"); kfree(part->local_GPs_base);
part->local_GPs = NULL;
kfree(part->channels);
part->channels = NULL;
return xpcNoMemory; return xpcNoMemory;
} }
memset(part->local_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE);
part->remote_openclose_args = xpc_kmalloc_cacheline_aligned( part->remote_openclose_args = xpc_kzalloc_cacheline_aligned(
XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
&part->remote_openclose_args_base); &part->remote_openclose_args_base);
if (part->remote_openclose_args == NULL) { if (part->remote_openclose_args == NULL) {
kfree(part->channels); dev_err(xpc_chan, "can't get memory for remote connect args\n");
part->channels = NULL;
kfree(part->local_GPs_base);
part->local_GPs = NULL;
kfree(part->remote_GPs_base);
part->remote_GPs = NULL;
kfree(part->local_openclose_args_base); kfree(part->local_openclose_args_base);
part->local_openclose_args = NULL; part->local_openclose_args = NULL;
dev_err(xpc_chan, "can't get memory for remote connect args\n"); kfree(part->remote_GPs_base);
part->remote_GPs = NULL;
kfree(part->local_GPs_base);
part->local_GPs = NULL;
kfree(part->channels);
part->channels = NULL;
return xpcNoMemory; return xpcNoMemory;
} }
memset(part->remote_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE);
xpc_initialize_channels(part, partid); xpc_initialize_channels(part, partid);
...@@ -186,18 +205,18 @@ xpc_setup_infrastructure(struct xpc_partition *part) ...@@ -186,18 +205,18 @@ xpc_setup_infrastructure(struct xpc_partition *part)
ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, SA_SHIRQ, ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, SA_SHIRQ,
part->IPI_owner, (void *) (u64) partid); part->IPI_owner, (void *) (u64) partid);
if (ret != 0) { if (ret != 0) {
kfree(part->channels);
part->channels = NULL;
kfree(part->local_GPs_base);
part->local_GPs = NULL;
kfree(part->remote_GPs_base);
part->remote_GPs = NULL;
kfree(part->local_openclose_args_base);
part->local_openclose_args = NULL;
kfree(part->remote_openclose_args_base);
part->remote_openclose_args = NULL;
dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
"errno=%d\n", -ret); "errno=%d\n", -ret);
kfree(part->remote_openclose_args_base);
part->remote_openclose_args = NULL;
kfree(part->local_openclose_args_base);
part->local_openclose_args = NULL;
kfree(part->remote_GPs_base);
part->remote_GPs = NULL;
kfree(part->local_GPs_base);
part->local_GPs = NULL;
kfree(part->channels);
part->channels = NULL;
return xpcLackOfResources; return xpcLackOfResources;
} }
...@@ -446,22 +465,20 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch) ...@@ -446,22 +465,20 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
for (nentries = ch->local_nentries; nentries > 0; nentries--) { for (nentries = ch->local_nentries; nentries > 0; nentries--) {
nbytes = nentries * ch->msg_size; nbytes = nentries * ch->msg_size;
ch->local_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes, ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
GFP_KERNEL, GFP_KERNEL,
&ch->local_msgqueue_base); &ch->local_msgqueue_base);
if (ch->local_msgqueue == NULL) { if (ch->local_msgqueue == NULL) {
continue; continue;
} }
memset(ch->local_msgqueue, 0, nbytes);
nbytes = nentries * sizeof(struct xpc_notify); nbytes = nentries * sizeof(struct xpc_notify);
ch->notify_queue = kmalloc(nbytes, GFP_KERNEL); ch->notify_queue = kzalloc(nbytes, GFP_KERNEL);
if (ch->notify_queue == NULL) { if (ch->notify_queue == NULL) {
kfree(ch->local_msgqueue_base); kfree(ch->local_msgqueue_base);
ch->local_msgqueue = NULL; ch->local_msgqueue = NULL;
continue; continue;
} }
memset(ch->notify_queue, 0, nbytes);
spin_lock_irqsave(&ch->lock, irq_flags); spin_lock_irqsave(&ch->lock, irq_flags);
if (nentries < ch->local_nentries) { if (nentries < ch->local_nentries) {
...@@ -501,13 +518,12 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch) ...@@ -501,13 +518,12 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
for (nentries = ch->remote_nentries; nentries > 0; nentries--) { for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
nbytes = nentries * ch->msg_size; nbytes = nentries * ch->msg_size;
ch->remote_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes, ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
GFP_KERNEL, GFP_KERNEL,
&ch->remote_msgqueue_base); &ch->remote_msgqueue_base);
if (ch->remote_msgqueue == NULL) { if (ch->remote_msgqueue == NULL) {
continue; continue;
} }
memset(ch->remote_msgqueue, 0, nbytes);
spin_lock_irqsave(&ch->lock, irq_flags); spin_lock_irqsave(&ch->lock, irq_flags);
if (nentries < ch->remote_nentries) { if (nentries < ch->remote_nentries) {
......
...@@ -52,7 +52,6 @@ ...@@ -52,7 +52,6 @@
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/completion.h> #include <linux/completion.h>
......
...@@ -80,6 +80,31 @@ char ____cacheline_aligned xpc_remote_copy_buffer[XPC_RP_HEADER_SIZE + ...@@ -80,6 +80,31 @@ char ____cacheline_aligned xpc_remote_copy_buffer[XPC_RP_HEADER_SIZE +
XP_NASID_MASK_BYTES]; XP_NASID_MASK_BYTES];
/*
* Guarantee that the kmalloc'd memory is cacheline aligned.
*/
static void *
xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
{
/* see if kmalloc will give us cachline aligned memory by default */
*base = kmalloc(size, flags);
if (*base == NULL) {
return NULL;
}
if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) {
return *base;
}
kfree(*base);
/* nope, we'll have to do it ourselves */
*base = kmalloc(size + L1_CACHE_BYTES, flags);
if (*base == NULL) {
return NULL;
}
return (void *) L1_CACHE_ALIGN((u64) *base);
}
/* /*
* Given a nasid, get the physical address of the partition's reserved page * Given a nasid, get the physical address of the partition's reserved page
* for that nasid. This function returns 0 on any error. * for that nasid. This function returns 0 on any error.
...@@ -1038,13 +1063,12 @@ xpc_discovery(void) ...@@ -1038,13 +1063,12 @@ xpc_discovery(void)
remote_vars = (struct xpc_vars *) remote_rp; remote_vars = (struct xpc_vars *) remote_rp;
discovered_nasids = kmalloc(sizeof(u64) * xp_nasid_mask_words, discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words,
GFP_KERNEL); GFP_KERNEL);
if (discovered_nasids == NULL) { if (discovered_nasids == NULL) {
kfree(remote_rp_base); kfree(remote_rp_base);
return; return;
} }
memset(discovered_nasids, 0, sizeof(u64) * xp_nasid_mask_words);
rp = (struct xpc_rsvd_page *) xpc_rsvd_page; rp = (struct xpc_rsvd_page *) xpc_rsvd_page;
......
...@@ -1227,28 +1227,6 @@ xpc_map_bte_errors(bte_result_t error) ...@@ -1227,28 +1227,6 @@ xpc_map_bte_errors(bte_result_t error)
static inline void *
xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
{
/* see if kmalloc will give us cachline aligned memory by default */
*base = kmalloc(size, flags);
if (*base == NULL) {
return NULL;
}
if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) {
return *base;
}
kfree(*base);
/* nope, we'll have to do it ourselves */
*base = kmalloc(size + L1_CACHE_BYTES, flags);
if (*base == NULL) {
return NULL;
}
return (void *) L1_CACHE_ALIGN((u64) *base);
}
/* /*
* Check to see if there is any channel activity to/from the specified * Check to see if there is any channel activity to/from the specified
* partition. * partition.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment