xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: George Dunlap <George.Dunlap@eu.citrix.com>
To: Olaf Hering <olaf@aepfle.de>
Cc: xen-devel@lists.xensource.com, George Dunlap <george.dunlap@citrix.com>
Subject: Re: [PATCH 3 of 4] xentrace: update __insert_record() to copy the trace record to individual mfns
Date: Thu, 26 May 2011 11:06:00 +0100	[thread overview]
Message-ID: <BANLkTinmkMfjoHVaQDywDpBCEKQWy6gMaA@mail.gmail.com> (raw)
In-Reply-To: <1a45e40add8b40753237.1305037958@localhost>

Acked-by: George Dunlap <george.dunlap@eu.citrix.com>

On Tue, May 10, 2011 at 3:32 PM, Olaf Hering <olaf@aepfle.de> wrote:
> # HG changeset patch
> # User Olaf Hering <olaf@aepfle.de>
> # Date 1305037539 -7200
> # Node ID 1a45e40add8b407532374c34f20bad51707808cf
> # Parent  575bf78214ef193e44806aa9766e084d721783b5
> xentrace: update __insert_record() to copy the trace record to individual mfns
>
> Update __insert_record() to copy the trace record to individual mfns.
> This is a prereq before changing the per-cpu allocation from contiguous
> to non-contiguous allocation.
>
> v2:
>  update offset calculation to use shift and mask
>  update type of mfn_offset to match type of data source
>
> Signed-off-by: Olaf Hering <olaf@aepfle.de>
>
> diff -r 575bf78214ef -r 1a45e40add8b xen/common/trace.c
> --- a/xen/common/trace.c        Tue May 10 16:23:01 2011 +0200
> +++ b/xen/common/trace.c        Tue May 10 16:25:39 2011 +0200
> @@ -52,7 +52,6 @@ static struct t_info *t_info;
>  static unsigned int t_info_pages;
>
>  static DEFINE_PER_CPU_READ_MOSTLY(struct t_buf *, t_bufs);
> -static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, t_data);
>  static DEFINE_PER_CPU_READ_MOSTLY(spinlock_t, t_lock);
>  static u32 data_size __read_mostly;
>
> @@ -208,7 +207,6 @@ static int alloc_trace_bufs(unsigned int
>
>         per_cpu(t_bufs, cpu) = buf = rawbuf;
>         buf->cons = buf->prod = 0;
> -        per_cpu(t_data, cpu) = (unsigned char *)(buf + 1);
>     }
>
>     offset = t_info_first_offset;
> @@ -472,10 +470,16 @@ static inline u32 calc_bytes_avail(const
>     return data_size - calc_unconsumed_bytes(buf);
>  }
>
> -static inline struct t_rec *next_record(const struct t_buf *buf,
> -                                        uint32_t *next)
> +static unsigned char *next_record(const struct t_buf *buf, uint32_t *next,
> +                                 unsigned char **next_page,
> +                                 uint32_t *offset_in_page)
>  {
>     u32 x = buf->prod, cons = buf->cons;
> +    uint16_t per_cpu_mfn_offset;
> +    uint32_t per_cpu_mfn_nr;
> +    uint32_t *mfn_list;
> +    uint32_t mfn;
> +    unsigned char *this_page;
>
>     barrier(); /* must read buf->prod and buf->cons only once */
>     *next = x;
> @@ -487,7 +491,27 @@ static inline struct t_rec *next_record(
>
>     ASSERT(x < data_size);
>
> -    return (struct t_rec *)&this_cpu(t_data)[x];
> +    /* add leading header to get total offset of next record */
> +    x += sizeof(struct t_buf);
> +    *offset_in_page = x & ~PAGE_MASK;
> +
> +    /* offset into array of mfns */
> +    per_cpu_mfn_nr = x >> PAGE_SHIFT;
> +    per_cpu_mfn_offset = t_info->mfn_offset[smp_processor_id()];
> +    mfn_list = (uint32_t *)t_info;
> +    mfn = mfn_list[per_cpu_mfn_offset + per_cpu_mfn_nr];
> +    this_page = mfn_to_virt(mfn);
> +    if (per_cpu_mfn_nr + 1 >= opt_tbuf_size)
> +    {
> +        /* reached end of buffer? */
> +        *next_page = NULL;
> +    }
> +    else
> +    {
> +        mfn = mfn_list[per_cpu_mfn_offset + per_cpu_mfn_nr + 1];
> +        *next_page = mfn_to_virt(mfn);
> +    }
> +    return this_page;
>  }
>
>  static inline void __insert_record(struct t_buf *buf,
> @@ -497,28 +521,37 @@ static inline void __insert_record(struc
>                                    unsigned int rec_size,
>                                    const void *extra_data)
>  {
> -    struct t_rec *rec;
> +    struct t_rec split_rec, *rec;
>     uint32_t *dst;
> +    unsigned char *this_page, *next_page;
>     unsigned int extra_word = extra / sizeof(u32);
>     unsigned int local_rec_size = calc_rec_size(cycles, extra);
>     uint32_t next;
> +    uint32_t offset;
> +    uint32_t remaining;
>
>     BUG_ON(local_rec_size != rec_size);
>     BUG_ON(extra & 3);
>
> -    rec = next_record(buf, &next);
> -    if ( !rec )
> +    this_page = next_record(buf, &next, &next_page, &offset);
> +    if ( !this_page )
>         return;
> -    /* Double-check once more that we have enough space.
> -     * Don't bugcheck here, in case the userland tool is doing
> -     * something stupid. */
> -    if ( (unsigned char *)rec + rec_size > this_cpu(t_data) + data_size )
> +
> +    remaining = PAGE_SIZE - offset;
> +
> +    if ( unlikely(rec_size > remaining) )
>     {
> -        if ( printk_ratelimit() )
> +        if ( next_page == NULL )
> +        {
> +            /* access beyond end of buffer */
>             printk(XENLOG_WARNING
> -                   "%s: size=%08x prod=%08x cons=%08x rec=%u\n",
> -                   __func__, data_size, next, buf->cons, rec_size);
> -        return;
> +                   "%s: size=%08x prod=%08x cons=%08x rec=%u remaining=%u\n",
> +                   __func__, data_size, next, buf->cons, rec_size, remaining);
> +            return;
> +        }
> +        rec = &split_rec;
> +    } else {
> +        rec = (struct t_rec*)(this_page + offset);
>     }
>
>     rec->event = event;
> @@ -535,6 +568,12 @@ static inline void __insert_record(struc
>     if ( extra_data && extra )
>         memcpy(dst, extra_data, extra);
>
> +    if ( unlikely(rec_size > remaining) )
> +    {
> +        memcpy(this_page + offset, rec, remaining);
> +        memcpy(next_page, (char *)rec + remaining, rec_size - remaining);
> +    }
> +
>     wmb();
>
>     next += rec_size;
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel
>

  reply	other threads:[~2011-05-26 10:06 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-05-10 14:32 [PATCH 0 of 4] xentrace [v2]: non-contiguous allocation of per-cpu buffer Olaf Hering
2011-05-10 14:32 ` [PATCH 1 of 4] xentrace: reduce trace buffer size to something mfn_offset can reach Olaf Hering
2011-05-26 10:05   ` George Dunlap
2011-05-10 14:32 ` [PATCH 2 of 4] xentrace: fix type of offset to avoid ouf-of-bounds access Olaf Hering
2011-05-26 10:05   ` George Dunlap
2011-05-10 14:32 ` [PATCH 3 of 4] xentrace: update __insert_record() to copy the trace record to individual mfns Olaf Hering
2011-05-26 10:06   ` George Dunlap [this message]
2011-05-10 14:32 ` [PATCH 4 of 4] xentrace: allocate non-contiguous per-cpu trace buffers Olaf Hering
2011-05-26 10:06   ` George Dunlap
2011-05-20  8:36 ` [PATCH 0 of 4] xentrace [v2]: non-contiguous allocation of per-cpu buffer Keir Fraser

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=BANLkTinmkMfjoHVaQDywDpBCEKQWy6gMaA@mail.gmail.com \
    --to=george.dunlap@eu.citrix.com \
    --cc=george.dunlap@citrix.com \
    --cc=olaf@aepfle.de \
    --cc=xen-devel@lists.xensource.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).