public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Chandra Seetharaman <sekharan@us.ibm.com>
To: Matt Helsley <matthltc@us.ibm.com>
Cc: LKML <linux-kernel@vger.kernel.org>,
	Evgeniy Polyakov <johnpol@2ka.mipt.ru>,
	Guillaume Thouvenin <guillaume.thouvenin@bull.net>
Subject: Re: [RFC][PATCH 3/3] Process events biarch bug: New process events connector value
Date: Tue, 27 Jun 2006 12:14:39 -0700	[thread overview]
Message-ID: <1151435679.1412.16.camel@linuxchandra> (raw)
In-Reply-To: <1151408975.21787.1815.camel@stark>

On Tue, 2006-06-27 at 04:49 -0700, Matt Helsley wrote:
> "Deprecate" existing Process Events connector interface and add a new one
> that works cleanly on biarch platforms.
> 
> Any expansion of the previous event structure would break userspace's ability
> to workaround the biarch incompatibility problem. Hence this patch creates a
> new interface and generates events (for both when necessary).

Is there a reason why the # of listeners part is removed (basically the
LISTEN/IGNORE) ? and why as part of this patch ?

<snip>

> @@ -158,16 +164,15 @@ static int cn_proc_watch_task(struct not
>  			      void *t)
>  {
>  	struct task_struct *task = t;
>  	struct cn_msg *msg;
>  	struct proc_event *ev;
> +	struct proc_event_deprecated *ev_old;
> +	struct timespec timestamp;
>  	__u8 buffer[CN_PROC_MSG_SIZE];
>  	int rc = NOTIFY_OK;
>  
> -	if (atomic_read(&proc_event_num_listeners) < 1)
> -		return rc;
> -
>  	msg = (struct cn_msg*)buffer;
>  	ev = (struct proc_event*)msg->data;
>  	switch (get_watch_event(val)) {
>  	case WATCH_TASK_CLONE:
>  		proc_fork_connector(task, ev);
> @@ -189,16 +194,26 @@ static int cn_proc_watch_task(struct not
>  		break;
>  	}
>  	if (rc != NOTIFY_OK)
>  		return rc;
>  	get_seq(&msg->seq, &ev->cpu);
> -	ktime_get_ts(&ev->timestamp); /* get high res monotonic timestamp */
> +	ktime_get_ts(&timestamp); /* get high res monotonic timestamp */
> +	ev->timestamp_ns = ((__u64)timestamp.tv_sec*(__u64)NSEC_PER_SEC) + (__u64)timestamp.tv_nsec;
>  	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
>  	msg->ack = 0; /* not used */
>  	msg->len = sizeof(*ev);
>  	cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
>  	/* If cn_netlink_send() fails, drop data */
> +
> +	if (atomic_read(&proc_event_num_old_listeners) < 1)
> +		return rc;
> +	ev_old = (struct proc_event_deprecated*)msg->data;
> +	msg->len = sizeof(*ev_old);

A comment saying the fields cpu, what, and ack are filled above and is
valid as is would help.

> +	memmove(&ev_old->event_data, &ev->event_data, sizeof(ev->event_data));
> +	memcpy(&ev_old->timestamp, &timestamp, sizeof(timestamp));
> +	cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
> +	/* If cn_netlink_send() fails, drop data */
>  	return rc;
>  }
>  
>  static struct notifier_block __read_mostly cn_proc_nb = {
>  	.notifier_call = cn_proc_watch_task,
> @@ -211,20 +226,27 @@ static struct notifier_block __read_most
>   */
>  static int __init cn_proc_init(void)
>  {
>  	int err;
>  
> -	if ((err = cn_add_callback(&cn_proc_event_id, "cn_proc",
> -	 			   &cn_proc_mcast_ctl))) {
> -		printk(KERN_WARNING "cn_proc failed to register\n");
> -		goto out;
> -	}
> +	err = cn_add_callback(&cn_proc_event_deprecated_id, "cn_proc_old",
> +			      &cn_proc_mcast_old_ctl);
> +	if (err)
> +		goto error_old;


> +	err = cn_add_callback(&cn_proc_event_id, "cn_proc", NULL);

is this needed if you are not going to have the callback ?

> +	if (err)
> +		goto error;
should not try to cn_del_callback(&cn_proc_event_id) ?! (goto error_old;
perhaps)

>  
>  	err = register_task_watcher(&cn_proc_nb);
> -	if (err != 0)
> -		cn_del_callback(&cn_proc_event_id);
> -out:
> +	if (err)
> +		goto error;
> +	return err;
> +error:
> +	cn_del_callback(&cn_proc_event_id);
> +error_old:
> +	cn_del_callback(&cn_proc_event_deprecated_id);
> +	printk(KERN_WARNING "cn_proc failed to register\n");
>  	return err;
>  }
>  
<snip>
-- 

----------------------------------------------------------------------
    Chandra Seetharaman               | Be careful what you choose....
              - sekharan@us.ibm.com   |      .......you may get it.
----------------------------------------------------------------------



  reply	other threads:[~2006-06-27 19:14 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <20060627112644.804066367@localhost.localdomain>
2006-06-27 11:47 ` [RFC][PATCH 1/3] Process events biarch bug: Name process event data union type and annotate for compatibility Matt Helsley
2006-06-27 11:48 ` [RFC][PATCH 2/3] Process events biarch bug: Process events timestamp bug Matt Helsley
2006-06-27 11:49 ` [RFC][PATCH 3/3] Process events biarch bug: New process events connector value Matt Helsley
2006-06-27 19:14   ` Chandra Seetharaman [this message]
2006-06-27 21:39     ` Matt Helsley
2006-06-27 23:54       ` Chandra Seetharaman
2006-06-28  1:29         ` Matt Helsley
2006-06-28  5:53       ` Evgeniy Polyakov
2006-06-30  8:46         ` Evgeniy Polyakov
2006-06-28  6:00 Albert Cahalan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1151435679.1412.16.camel@linuxchandra \
    --to=sekharan@us.ibm.com \
    --cc=guillaume.thouvenin@bull.net \
    --cc=johnpol@2ka.mipt.ru \
    --cc=linux-kernel@vger.kernel.org \
    --cc=matthltc@us.ibm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox