From: Olaf Hering <olaf@aepfle.de>
To: xen-devel@lists.xensource.com
Cc: George Dunlap <george.dunlap@citrix.com>
Subject: [PATCH 5 of 5] Allocate non-contiguous per-cpu trace buffers
Date: Fri, 06 May 2011 20:25:36 +0200 [thread overview]
Message-ID: <bcd0b17bf8a3ab08760b.1304706336@localhost> (raw)
In-Reply-To: <patchbomb.1304706331@localhost>
# HG changeset patch
# User Olaf Hering <olaf@aepfle.de>
# Date 1304706230 -7200
# Node ID bcd0b17bf8a3ab08760b8dcc1ca276defab1ed71
# Parent 1c5da4d9e33c821b9e3276d7aefe7ee16ce7b162
Allocate non-contiguous per-cpu trace buffers.
Signed-off-by: Olaf Hering <olaf@aepfle.de>
diff -r 1c5da4d9e33c -r bcd0b17bf8a3 xen/common/trace.c
--- a/xen/common/trace.c Fri May 06 18:54:41 2011 +0200
+++ b/xen/common/trace.c Fri May 06 20:23:50 2011 +0200
@@ -151,7 +151,7 @@ static int calculate_tbuf_size(unsigned
*/
static int alloc_trace_bufs(unsigned int pages)
{
- int i, cpu, order;
+ int i, cpu;
/* Start after a fixed-size array of NR_CPUS */
uint32_t *t_info_mfn_list;
uint32_t t_info_first_offset;
@@ -167,32 +167,10 @@ static int alloc_trace_bufs(unsigned int
t_info_first_offset = calc_tinfo_first_offset();
pages = calculate_tbuf_size(pages, t_info_first_offset);
- order = get_order_from_pages(pages);
t_info = alloc_xenheap_pages(get_order_from_pages(t_info_pages), 0);
if ( t_info == NULL )
- goto out_dealloc;
-
- /*
- * First, allocate buffers for all of the cpus. If any
- * fails, deallocate what you have so far and exit.
- */
- for_each_online_cpu(cpu)
- {
- void *rawbuf;
- struct t_buf *buf;
-
- if ( (rawbuf = alloc_xenheap_pages(
- order, MEMF_bits(32 + PAGE_SHIFT))) == NULL )
- {
- printk(XENLOG_INFO "xentrace: memory allocation failed "
- "on cpu %d\n", cpu);
- goto out_dealloc;
- }
-
- per_cpu(t_bufs, cpu) = buf = rawbuf;
- buf->cons = buf->prod = 0;
- }
+ goto out_dealloc_t_info;
offset = t_info_first_offset;
t_info_mfn_list = (uint32_t *)t_info;
@@ -204,27 +182,50 @@ static int alloc_trace_bufs(unsigned int
t_info->tbuf_size = pages;
/*
- * Now share the pages so xentrace can map them, and write them in
- * the global t_info structure.
+ * Allocate buffers for all of the cpus.
+ * If any fails, deallocate what you have so far and exit.
*/
for_each_online_cpu(cpu)
{
- void *rawbuf = per_cpu(t_bufs, cpu);
- struct page_info *p = virt_to_page(rawbuf);
- uint32_t mfn = virt_to_mfn(rawbuf);
-
- for ( i = 0; i < pages; i++ )
- {
- share_xen_page_with_privileged_guests(p + i, XENSHARE_writable);
-
- t_info_mfn_list[offset + i]=mfn + i;
- }
- t_info->mfn_offset[cpu]=offset;
- printk(XENLOG_INFO "xentrace: p%d mfn %"PRIx32" offset %d\n",
- cpu, mfn, offset);
- offset+=i;
+ void *p;
+ struct t_buf *buf;
+ struct page_info *pg;
spin_lock_init(&per_cpu(t_lock, cpu));
+ /* first allocate the first page, it contains the per-cpu metadata */
+ p = alloc_xenheap_pages(0, MEMF_bits(32 + PAGE_SHIFT));
+ if ( !p )
+ {
+ printk(XENLOG_INFO "xentrace: memory allocation failed "
+ "on cpu %d after %d pages\n", cpu, 0);
+ goto out_dealloc;
+ }
+ per_cpu(t_bufs, cpu) = buf = p;
+ buf->cons = buf->prod = 0;
+
+ t_info->mfn_offset[cpu] = offset;
+ t_info_mfn_list[offset] = virt_to_mfn(p);
+ pg = virt_to_page(p);
+ share_xen_page_with_privileged_guests(pg, XENSHARE_writable);
+
+ printk(XENLOG_INFO "xentrace: p%d mfn %lx offset %d\n",
+ cpu, virt_to_mfn(p), offset);
+
+ /* now the remaining trace pages */
+ offset++;
+ for ( i = 1; i < pages; i++ )
+ {
+ p = alloc_xenheap_pages(0, MEMF_bits(32 + PAGE_SHIFT));
+ if ( !p )
+ {
+ printk(XENLOG_INFO "xentrace: memory allocation failed "
+ "on cpu %d after %d pages\n", cpu, i);
+ goto out_dealloc;
+ }
+ t_info_mfn_list[offset++] = virt_to_mfn(p);
+ pg = virt_to_page(p);
+ share_xen_page_with_privileged_guests(pg, XENSHARE_writable);
+ }
}
data_size = (pages * PAGE_SIZE - sizeof(struct t_buf));
@@ -240,14 +241,18 @@ static int alloc_trace_bufs(unsigned int
out_dealloc:
for_each_online_cpu(cpu)
{
- void *rawbuf = per_cpu(t_bufs, cpu);
per_cpu(t_bufs, cpu) = NULL;
- if ( rawbuf )
+ offset = t_info->mfn_offset[cpu];
+ for ( i = 0; i < pages; i++ )
{
- ASSERT(!(virt_to_page(rawbuf)->count_info & PGC_allocated));
- free_xenheap_pages(rawbuf, order);
+ uint32_t mfn = t_info_mfn_list[offset + i];
+ if ( !mfn )
+ break;
+ ASSERT(!(mfn_to_page(mfn)->count_info & PGC_allocated));
+ free_xenheap_pages(mfn_to_virt(mfn), 0);
}
}
+out_dealloc_t_info:
free_xenheap_pages(t_info, get_order_from_pages(t_info_pages));
t_info = NULL;
printk(XENLOG_WARNING "xentrace: allocation failed! Tracing disabled.\n");
next prev parent reply other threads:[~2011-05-06 18:25 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-05-06 18:25 [PATCH 0 of 5] xentrace: non-contiguous allocation of per-cpu buffer Olaf Hering
2011-05-06 18:25 ` [PATCH 1 of 5] Move the global variable t_info_first_offset into calculate_tbuf_size() Olaf Hering
2011-05-06 18:25 ` [PATCH 2 of 5] Mark data_size __read_mostly because its only written once Olaf Hering
2011-05-06 18:25 ` [PATCH 3 of 5] Remove unneeded cast when assigning pointer value to dst Olaf Hering
2011-05-06 18:25 ` [PATCH 4 of 5] Update __insert_record() to copy the trace record to individual mfns Olaf Hering
2011-05-09 9:03 ` Keir Fraser
2011-05-09 9:31 ` Olaf Hering
2011-05-09 11:24 ` George Dunlap
2011-05-06 18:25 ` Olaf Hering [this message]
2011-05-08 15:07 ` [PATCH 0 of 5] xentrace: non-contiguous allocation of per-cpu buffer Olaf Hering
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=bcd0b17bf8a3ab08760b.1304706336@localhost \
--to=olaf@aepfle.de \
--cc=george.dunlap@citrix.com \
--cc=xen-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).