From: Christoph Lameter <clameter@sgi.com>
To: akpm@linux-foundation.org
Cc: linux-arch@vger.kernel.org,
Martin Schwidefsky <schwidefsky@de.ibm.com>,
linux-kernel@vger.kernel.org, David Miller <davem@davemloft.net>,
Eric Dumazet <dada1@cosmosbay.com>,
Peter Zijlstra <peterz@infradead.org>
Subject: [patch 27/30] cpu alloc: Explicitly code allocpercpu calls in iucv
Date: Fri, 16 Nov 2007 15:09:47 -0800 [thread overview]
Message-ID: <20071116231108.797412104@sgi.com> (raw)
In-Reply-To: 20071116230920.278761667@sgi.com
[-- Attachment #1: 0037-cpu-alloc-Explicitly-code-allocpercpu-calls-in-iucv.patch --]
[-- Type: text/plain, Size: 10015 bytes --]
The iucv is the only user of the various functions that are used to bring
parts of cpus up and down. Its the only allocpercpu user that will do
I/O on per cpu objects (which is difficult to do with virtually mapped memory).
And its the only use of allocpercpu where a GFP_DMA allocation is done.
Remove the allocpercpu calls from iucv and code the allocation and freeing
manually.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Christoph Lameter <clameter@sgi.com>
---
net/iucv/iucv.c | 107 +++++++++++++++++++++++++++++++-------------------------
1 file changed, 61 insertions(+), 46 deletions(-)
Index: linux-2.6/net/iucv/iucv.c
===================================================================
--- linux-2.6.orig/net/iucv/iucv.c 2007-11-15 21:17:23.719404553 -0800
+++ linux-2.6/net/iucv/iucv.c 2007-11-15 21:25:38.758654051 -0800
@@ -97,7 +97,7 @@ struct iucv_irq_list {
struct iucv_irq_data data;
};
-static struct iucv_irq_data *iucv_irq_data;
+static struct iucv_irq_data *iucv_irq_data[NR_CPUS];
static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE;
static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE;
@@ -277,7 +277,7 @@ union iucv_param {
/*
* Anchor for per-cpu IUCV command parameter block.
*/
-static union iucv_param *iucv_param;
+static union iucv_param *iucv_param[NR_CPUS];
/**
* iucv_call_b2f0
@@ -356,7 +356,7 @@ static void iucv_allow_cpu(void *data)
* 0x10 - Flag to allow priority message completion interrupts
* 0x08 - Flag to allow IUCV control interrupts
*/
- parm = percpu_ptr(iucv_param, smp_processor_id());
+ parm = iucv_param[cpu];
memset(parm, 0, sizeof(union iucv_param));
parm->set_mask.ipmask = 0xf8;
iucv_call_b2f0(IUCV_SETMASK, parm);
@@ -377,7 +377,7 @@ static void iucv_block_cpu(void *data)
union iucv_param *parm;
/* Disable all iucv interrupts. */
- parm = percpu_ptr(iucv_param, smp_processor_id());
+ parm = iucv_param[cpu];
memset(parm, 0, sizeof(union iucv_param));
iucv_call_b2f0(IUCV_SETMASK, parm);
@@ -401,9 +401,9 @@ static void iucv_declare_cpu(void *data)
return;
/* Declare interrupt buffer. */
- parm = percpu_ptr(iucv_param, cpu);
+ parm = iucv_param[cpu];
memset(parm, 0, sizeof(union iucv_param));
- parm->db.ipbfadr1 = virt_to_phys(percpu_ptr(iucv_irq_data, cpu));
+ parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]);
rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm);
if (rc) {
char *err = "Unknown";
@@ -458,7 +458,7 @@ static void iucv_retrieve_cpu(void *data
iucv_block_cpu(NULL);
/* Retrieve interrupt buffer. */
- parm = percpu_ptr(iucv_param, cpu);
+ parm = iucv_param[cpu];
iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm);
/* Clear indication that an iucv buffer exists for this cpu. */
@@ -558,22 +558,23 @@ static int __cpuinit iucv_cpu_notify(str
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- if (!percpu_populate(iucv_irq_data,
- sizeof(struct iucv_irq_data),
- GFP_KERNEL|GFP_DMA, cpu))
+ iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
+ GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
+ if (!iucv_irq_data[cpu])
return NOTIFY_BAD;
- if (!percpu_populate(iucv_param, sizeof(union iucv_param),
- GFP_KERNEL|GFP_DMA, cpu)) {
- percpu_depopulate(iucv_irq_data, cpu);
+ iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
+ GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
+ if (!iucv_param[cpu])
return NOTIFY_BAD;
- }
break;
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- percpu_depopulate(iucv_param, cpu);
- percpu_depopulate(iucv_irq_data, cpu);
+ kfree(iucv_param[cpu]);
+ iucv_param[cpu] = NULL;
+ kfree(iucv_irq_data[cpu]);
+ iucv_irq_data[cpu] = NULL;
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
@@ -612,7 +613,7 @@ static int iucv_sever_pathid(u16 pathid,
{
union iucv_param *parm;
- parm = percpu_ptr(iucv_param, smp_processor_id());
+ parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
if (userdata)
memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
@@ -755,7 +756,7 @@ int iucv_path_accept(struct iucv_path *p
local_bh_disable();
/* Prepare parameter block. */
- parm = percpu_ptr(iucv_param, smp_processor_id());
+ parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
parm->ctrl.ippathid = path->pathid;
parm->ctrl.ipmsglim = path->msglim;
@@ -799,7 +800,7 @@ int iucv_path_connect(struct iucv_path *
BUG_ON(in_atomic());
spin_lock_bh(&iucv_table_lock);
iucv_cleanup_queue();
- parm = percpu_ptr(iucv_param, smp_processor_id());
+ parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
parm->ctrl.ipmsglim = path->msglim;
parm->ctrl.ipflags1 = path->flags;
@@ -854,7 +855,7 @@ int iucv_path_quiesce(struct iucv_path *
int rc;
local_bh_disable();
- parm = percpu_ptr(iucv_param, smp_processor_id());
+ parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
if (userdata)
memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
@@ -881,7 +882,7 @@ int iucv_path_resume(struct iucv_path *p
int rc;
local_bh_disable();
- parm = percpu_ptr(iucv_param, smp_processor_id());
+ parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
if (userdata)
memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
@@ -936,7 +937,7 @@ int iucv_message_purge(struct iucv_path
int rc;
local_bh_disable();
- parm = percpu_ptr(iucv_param, smp_processor_id());
+ parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
parm->purge.ippathid = path->pathid;
parm->purge.ipmsgid = msg->id;
@@ -1003,7 +1004,7 @@ int iucv_message_receive(struct iucv_pat
}
local_bh_disable();
- parm = percpu_ptr(iucv_param, smp_processor_id());
+ parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
parm->db.ipbfadr1 = (u32)(addr_t) buffer;
parm->db.ipbfln1f = (u32) size;
@@ -1040,7 +1041,7 @@ int iucv_message_reject(struct iucv_path
int rc;
local_bh_disable();
- parm = percpu_ptr(iucv_param, smp_processor_id());
+ parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
parm->db.ippathid = path->pathid;
parm->db.ipmsgid = msg->id;
@@ -1074,7 +1075,7 @@ int iucv_message_reply(struct iucv_path
int rc;
local_bh_disable();
- parm = percpu_ptr(iucv_param, smp_processor_id());
+ parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
if (flags & IUCV_IPRMDATA) {
parm->dpl.ippathid = path->pathid;
@@ -1118,7 +1119,7 @@ int iucv_message_send(struct iucv_path *
int rc;
local_bh_disable();
- parm = percpu_ptr(iucv_param, smp_processor_id());
+ parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
if (flags & IUCV_IPRMDATA) {
/* Message of 8 bytes can be placed into the parameter list. */
@@ -1172,7 +1173,7 @@ int iucv_message_send2way(struct iucv_pa
int rc;
local_bh_disable();
- parm = percpu_ptr(iucv_param, smp_processor_id());
+ parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
if (flags & IUCV_IPRMDATA) {
parm->dpl.ippathid = path->pathid;
@@ -1559,7 +1560,7 @@ static void iucv_external_interrupt(u16
struct iucv_irq_data *p;
struct iucv_irq_list *work;
- p = percpu_ptr(iucv_irq_data, smp_processor_id());
+ p = iucv_irq_data[smp_processor_id()];
if (p->ippathid >= iucv_max_pathid) {
printk(KERN_WARNING "iucv_do_int: Got interrupt with "
"pathid %d > max_connections (%ld)\n",
@@ -1598,6 +1599,7 @@ static void iucv_external_interrupt(u16
static int __init iucv_init(void)
{
int rc;
+ int cpu;
if (!MACHINE_IS_VM) {
rc = -EPROTONOSUPPORT;
@@ -1617,19 +1619,23 @@ static int __init iucv_init(void)
rc = PTR_ERR(iucv_root);
goto out_bus;
}
- /* Note: GFP_DMA used to get memory below 2G */
- iucv_irq_data = percpu_alloc(sizeof(struct iucv_irq_data),
- GFP_KERNEL|GFP_DMA);
- if (!iucv_irq_data) {
- rc = -ENOMEM;
- goto out_root;
- }
- /* Allocate parameter blocks. */
- iucv_param = percpu_alloc(sizeof(union iucv_param),
- GFP_KERNEL|GFP_DMA);
- if (!iucv_param) {
- rc = -ENOMEM;
- goto out_extint;
+
+ for_each_online_cpu(cpu) {
+ /* Note: GFP_DMA used to get memory below 2G */
+ iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
+ GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
+ if (!iucv_irq_data[cpu]) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
+
+ /* Allocate parameter blocks. */
+ iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
+ GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
+ if (!iucv_param[cpu]) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
}
register_hotcpu_notifier(&iucv_cpu_notifier);
ASCEBC(iucv_error_no_listener, 16);
@@ -1638,9 +1644,13 @@ static int __init iucv_init(void)
iucv_available = 1;
return 0;
-out_extint:
- percpu_free(iucv_irq_data);
-out_root:
+out_free:
+ for_each_possible_cpu(cpu) {
+ kfree(iucv_param[cpu]);
+ iucv_param[cpu] = NULL;
+ kfree(iucv_irq_data[cpu]);
+ iucv_irq_data[cpu] = NULL;
+ }
s390_root_dev_unregister(iucv_root);
out_bus:
bus_unregister(&iucv_bus);
@@ -1658,6 +1668,7 @@ out:
static void __exit iucv_exit(void)
{
struct iucv_irq_list *p, *n;
+ int cpu;
spin_lock_irq(&iucv_queue_lock);
list_for_each_entry_safe(p, n, &iucv_task_queue, list)
@@ -1666,8 +1677,12 @@ static void __exit iucv_exit(void)
kfree(p);
spin_unlock_irq(&iucv_queue_lock);
unregister_hotcpu_notifier(&iucv_cpu_notifier);
- percpu_free(iucv_param);
- percpu_free(iucv_irq_data);
+ for_each_possible_cpu(cpu) {
+ kfree(iucv_param[cpu]);
+ iucv_param[cpu] = NULL;
+ kfree(iucv_irq_data[cpu]);
+ iucv_irq_data[cpu] = NULL;
+ }
s390_root_dev_unregister(iucv_root);
bus_unregister(&iucv_bus);
unregister_external_interrupt(0x4000, iucv_external_interrupt);
--
next prev parent reply other threads:[~2007-11-16 23:11 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2007-11-16 23:09 [patch 00/30] cpu alloc v2: Optimize by removing arrays of pointers to per cpu objects Christoph Lameter
2007-11-16 23:09 ` [patch 01/30] cpu alloc: Simple version of the allocator (static allocations) Christoph Lameter
2007-11-16 23:09 ` [patch 02/30] cpu alloc: Use in SLUB Christoph Lameter
2007-11-16 23:09 ` [patch 03/30] cpu alloc: Remove SLUB fields Christoph Lameter
2007-11-16 23:09 ` [patch 04/30] cpu alloc: page allocator conversion Christoph Lameter
2007-11-16 23:09 ` [patch 05/30] cpu_alloc: Implement dynamically extendable cpu areas Christoph Lameter
2007-11-16 23:09 ` [patch 06/30] cpu alloc: x86 support Christoph Lameter
2007-11-16 23:09 ` [patch 07/30] cpu alloc: IA64 support Christoph Lameter
2007-11-16 23:32 ` Luck, Tony
2007-11-17 0:05 ` Christoph Lameter
2007-11-16 23:09 ` [patch 08/30] cpu_alloc: Sparc64 support Christoph Lameter
2007-11-16 23:09 ` [patch 09/30] cpu alloc: percpu_counter conversion Christoph Lameter
2007-11-16 23:09 ` [patch 10/30] cpu alloc: crash_notes conversion Christoph Lameter
2007-11-16 23:09 ` [patch 11/30] cpu alloc: workqueue conversion Christoph Lameter
2007-11-16 23:09 ` [patch 12/30] cpu alloc: ACPI cstate handling conversion Christoph Lameter
2007-11-16 23:09 ` [patch 13/30] cpu alloc: genhd statistics conversion Christoph Lameter
2007-11-16 23:09 ` [patch 14/30] cpu alloc: blktrace conversion Christoph Lameter
2007-11-16 23:09 ` [patch 15/30] cpu alloc: SRCU Christoph Lameter
2007-11-16 23:09 ` [patch 16/30] cpu alloc: XFS counters Christoph Lameter
2007-11-19 12:58 ` Christoph Hellwig
2007-11-16 23:09 ` [patch 17/30] cpu alloc: NFS statistics Christoph Lameter
2007-11-16 23:09 ` [patch 18/30] cpu alloc: neigbour statistics Christoph Lameter
2007-11-16 23:09 ` [patch 19/30] cpu alloc: tcp statistics Christoph Lameter
2007-11-16 23:09 ` [patch 20/30] cpu alloc: convert scatches Christoph Lameter
2007-11-16 23:09 ` [patch 21/30] cpu alloc: dmaengine conversion Christoph Lameter
2007-11-16 23:09 ` [patch 22/30] cpu alloc: convert loopback statistics Christoph Lameter
2007-11-16 23:09 ` [patch 23/30] cpu alloc: veth conversion Christoph Lameter
2007-11-16 23:09 ` [patch 24/30] cpu alloc: Chelsio statistics conversion Christoph Lameter
2007-11-16 23:09 ` [patch 25/30] cpu alloc: convert mib handling to cpu alloc Christoph Lameter
2007-11-16 23:09 ` [patch 26/30] cpu_alloc: convert network sockets Christoph Lameter
2007-11-16 23:09 ` Christoph Lameter [this message]
2007-11-16 23:09 ` [patch 28/30] cpu alloc: Use for infiniband Christoph Lameter
2007-11-16 23:09 ` [patch 29/30] cpu alloc: Use in the crypto subsystem Christoph Lameter
2007-11-16 23:09 ` [patch 30/30] cpu alloc: Remove the allocpercpu functionality Christoph Lameter
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20071116231108.797412104@sgi.com \
--to=clameter@sgi.com \
--cc=akpm@linux-foundation.org \
--cc=dada1@cosmosbay.com \
--cc=davem@davemloft.net \
--cc=linux-arch@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=peterz@infradead.org \
--cc=schwidefsky@de.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).