linux-hotplug.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] convert udevsend/udevd to DGRAM and single-threaded
@ 2004-02-06  6:08 Chris Friesen
  2004-02-06 11:27 ` Kay Sievers
                   ` (9 more replies)
  0 siblings, 10 replies; 11+ messages in thread
From: Chris Friesen @ 2004-02-06  6:08 UTC (permalink / raw)
  To: linux-hotplug

[-- Attachment #1: Type: text/plain, Size: 2053 bytes --]


Kay, you said "unless we can get rid of _all_ the threads or at least
getting faster, I don't want to change it."

Well how about we get rid of all the threads, *and* we get faster?

This patch applies to current bk trees, and does the following:

1) Switch to DGRAM sockets rather than STREAM.  This simplifies things
as mentioned in the previous message.

2) Invalid sequence numbers are mapped to -1 rather than zero, since
zero is a valid sequence number (I think).  Also, this allows for real
speed tests using scripts starting at a zero sequence number, since that
is what the initial expected sequence number is.

3) Get rid of all threading.  This is the biggie.  Some highlights:
	a) timeout using setitimer() and SIGALRM
	b) async child death notification via SIGCHLD
	c) these two signal handlers do nothing but raise volatile flags, all the
work is done in the main loop
	d) locking no longer required


I tested it using the following script

for i in `seq 0 999`;
do
	SEQNUM=$i ACTION=add DEVPATH=$i ./udevsend x;
done

Timing was done using the tsc, each number measures the total time taken
to receive and process a hundred messages, from just after the first
recv() call to just after the last child was reaped.

multi-threaded (secs):
0.433183
0.435601
0.289772
0.290449
0.298509
0.604979
0.998002
0.298365
0.827817
1.361001

single-threaded (secs):
0.205998
0.210246
0.219476
0.209425
0.203916
0.219981
0.215359
0.227679
0.218097
0.203311

Interestingly, not only is the single-threaded version faster, its also
more deterministic.

I also tested it from 1 to 1000, causing all events to be
delayed until the timeout period passed:

multithreaded:
3.213272
3.316999
3.446929
3.583432
3.711062
3.845539
3.987120
4.126300
4.244764
4.364072

single-threaded:
3.798218
3.799521
3.801352
3.803176
3.805020
3.806643
3.808498
3.810318
3.812169
3.814010

I do want to acknowledge the caveat that the threaded version may
perform better on NPTL, but I think this is decent enough behaviour to
warrent serious consideration.

Comments?

Chris

[-- Attachment #2: udev_nothread.patch --]
[-- Type: text/plain, Size: 16873 bytes --]

diff -Nur --exclude=RCS --exclude=CVS --exclude=SCCS --exclude=BitKeeper --exclude=ChangeSet udev/Makefile myudev/Makefile
--- udev/Makefile	Thu Feb  5 23:44:52 2004
+++ myudev/Makefile	Fri Feb  6 00:22:35 2004
@@ -262,7 +262,7 @@
 	$(STRIPCMD) $@
 
 $(DAEMON): udevd.h $(GEN_HEADERS) udevd.o
-	$(LD) $(LDFLAGS) -lpthread -o $@ $(CRT0) udevd.o $(LIB_OBJS) $(ARCH_LIB_OBJS)
+	$(LD) $(LDFLAGS) -o $@ $(CRT0) udevd.o $(LIB_OBJS) $(ARCH_LIB_OBJS)
 	$(STRIPCMD) $@
 
 $(SENDER): udevd.h $(GEN_HEADERS) udevsend.o
diff -Nur --exclude=RCS --exclude=CVS --exclude=SCCS --exclude=BitKeeper --exclude=ChangeSet udev/udevd.c myudev/udevd.c
--- udev/udevd.c	Fri Feb  6 00:43:06 2004
+++ myudev/udevd.c	Fri Feb  6 00:44:14 2004
@@ -19,7 +19,6 @@
  *
  */
 
-#include <pthread.h>
 #include <stddef.h>
 #include <sys/wait.h>
 #include <signal.h>
@@ -32,6 +31,7 @@
 #include <sys/types.h>
 #include <sys/socket.h>
 #include <sys/un.h>
+#include <sys/time.h>
 
 #include "list.h"
 #include "udev.h"
@@ -39,22 +39,17 @@
 #include "udevd.h"
 #include "logging.h"
 
-
 unsigned char logname[42];
-static pthread_mutex_t  msg_lock;
-static pthread_mutex_t  msg_active_lock;
-static pthread_cond_t msg_active;
-static pthread_mutex_t  exec_lock;
-static pthread_mutex_t  exec_active_lock;
-static pthread_cond_t exec_active;
-static pthread_mutex_t  running_lock;
-static pthread_attr_t thr_attr;
 static int expected_seqnum = 0;
+volatile static int children_waiting;
+volatile static int msg_q_timeout;
 
 LIST_HEAD(msg_list);
 LIST_HEAD(exec_list);
 LIST_HEAD(running_list);
 
+static void exec_queue_manager(void);
+static void msg_queue_manager(void);
 
 static void msg_dump_queue(void)
 {
@@ -75,17 +70,17 @@
 	struct hotplug_msg *new_msg;
 
 	new_msg = malloc(sizeof(struct hotplug_msg));
-	if (new_msg == NULL) {
+	if (!new_msg)
 		dbg("error malloc");
-		return NULL;
-	}
 	return new_msg;
 }
 
-static void msg_delete(struct hotplug_msg *msg)
+static void runq_del(struct hotplug_msg *msg)
 {
-	if (msg != NULL)
-		free(msg);
+	/* remove event from run list */
+	list_del(&msg->list);
+	free(msg);
+	exec_queue_manager();
 }
 
 /* orders the message in the queue by sequence number */
@@ -103,21 +98,16 @@
 	/* store timestamp of queuing */
 	msg->queue_time = time(NULL);
 
-	/* signal queue activity to manager */
-	pthread_mutex_lock(&msg_active_lock);
-	pthread_cond_signal(&msg_active);
-	pthread_mutex_unlock(&msg_active_lock);
+	/* run msg queue manager */
+	msg_queue_manager();
 
 	return ;
 }
 
 /* forks event and removes event from run queue when finished */
-static void *run_threads(void * parm)
+static void run_udev(struct hotplug_msg *msg)
 {
 	pid_t pid;
-	struct hotplug_msg *msg;
-
-	msg = parm;
 	setenv("ACTION", msg->action, 1);
 	setenv("DEVPATH", msg->devpath, 1);
 
@@ -131,190 +121,125 @@
 		break;
 	case -1:
 		dbg("fork of child failed");
-		goto exit;
+		runq_del(msg);
+		break;
 	default:
 		/* wait for exit of child */
-		dbg("==> exec seq %d [%d] working at '%s'",
-		    msg->seqnum, pid, msg->devpath);
-		wait(NULL);
-		dbg("<== exec seq %d came back", msg->seqnum);
+		dbg("==> exec seq %d [%d] working at '%s'", msg->seqnum, pid, msg->devpath);
+	   msg->pid = pid;
 	}
-
-exit:
-	/* remove event from run list */
-	pthread_mutex_lock(&running_lock);
-	list_del_init(&msg->list);
-	pthread_mutex_unlock(&running_lock);
-
-	msg_delete(msg);
-
-	/* signal queue activity to exec manager */
-	pthread_mutex_lock(&exec_active_lock);
-	pthread_cond_signal(&exec_active);
-	pthread_mutex_unlock(&exec_active_lock);
-
-	pthread_exit(0);
 }
 
 /* returns already running task with devpath */
 static struct hotplug_msg *running_with_devpath(struct hotplug_msg *msg)
 {
 	struct hotplug_msg *loop_msg;
-	struct hotplug_msg *tmp_msg;
-
-	list_for_each_entry_safe(loop_msg, tmp_msg, &running_list, list)
+	list_for_each_entry(loop_msg, &running_list, list)
 		if (strncmp(loop_msg->devpath, msg->devpath, sizeof(loop_msg->devpath)) == 0)
 			return loop_msg;
 	return NULL;
 }
 
-/* queue management executes the events and delays events for the same devpath */
-static void *exec_queue_manager(void * parm)
+/* exec queue management routine executes the events and delays events for the same devpath */
+static void exec_queue_manager()
 {
 	struct hotplug_msg *loop_msg;
 	struct hotplug_msg *tmp_msg;
 	struct hotplug_msg *msg;
-	pthread_t run_tid;
 
-	while (1) {
-		pthread_mutex_lock(&exec_lock);
-		list_for_each_entry_safe(loop_msg, tmp_msg, &exec_list, list) {
-			msg = running_with_devpath(loop_msg);
-			if (msg == NULL) {
-				/* move event to run list */
-				pthread_mutex_lock(&running_lock);
-				list_move_tail(&loop_msg->list, &running_list);
-				pthread_mutex_unlock(&running_lock);
-
-				pthread_create(&run_tid, &thr_attr, run_threads, (void *) loop_msg);
-
-				dbg("moved seq %d to running list", loop_msg->seqnum);
-			} else {
-				dbg("delay seq %d, cause seq %d already working on '%s'",
-				    loop_msg->seqnum, msg->seqnum, msg->devpath);
-			}
+	list_for_each_entry_safe(loop_msg, tmp_msg, &exec_list, list) {
+		msg = running_with_devpath(loop_msg);
+		if (!msg) {
+			/* move event to run list */
+			list_move_tail(&loop_msg->list, &running_list);
+			run_udev(loop_msg);
+			dbg("moved seq %d to running list", loop_msg->seqnum);
+		} else {
+			dbg("delay seq %d, cause seq %d already working on '%s'",
+				loop_msg->seqnum, msg->seqnum, msg->devpath);
 		}
-		pthread_mutex_unlock(&exec_lock);
-
-		/* wait for activation, new events or childs coming back */
-		pthread_mutex_lock(&exec_active_lock);
-		pthread_cond_wait(&exec_active, &exec_active_lock);
-		pthread_mutex_unlock(&exec_active_lock);
 	}
 }
 
-static void exec_queue_activate(void)
+static void msg_move_exec(struct hotplug_msg *msg)
 {
-	pthread_mutex_lock(&exec_active_lock);
-	pthread_cond_signal(&exec_active);
-	pthread_mutex_unlock(&exec_active_lock);
+	list_move_tail(&msg->list, &exec_list);
+	exec_queue_manager();
+	expected_seqnum = msg->seqnum+1;
+	dbg("moved seq %d to exec, reset next expected to %d",
+		msg->seqnum, expected_seqnum);
 }
 
-/* move message from incoming to exec queue */
-static void msg_move_exec(struct list_head *head)
-{
-	list_move_tail(head, &exec_list);
-	exec_queue_activate();
-}
-
-/* queue management thread handles the timeouts and dispatches the events */
-static void *msg_queue_manager(void * parm)
+/* msg queue management routine handles the timeouts and dispatches the events */
+static void msg_queue_manager()
 {
 	struct hotplug_msg *loop_msg;
 	struct hotplug_msg *tmp_msg;
 	time_t msg_age = 0;
-	struct timespec tv;
 
-	while (1) {
-		dbg("msg queue manager, next expected is %d", expected_seqnum);
-		pthread_mutex_lock(&msg_lock);
-		pthread_mutex_lock(&exec_lock);
+	dbg("msg queue manager, next expected is %d", expected_seqnum);
 recheck:
-		list_for_each_entry_safe(loop_msg, tmp_msg, &msg_list, list) {
-			/* move event with expected sequence to the exec list */
-			if (loop_msg->seqnum == expected_seqnum) {
-				msg_move_exec(&loop_msg->list);
-				expected_seqnum++;
-				dbg("moved seq %d to exec, next expected is %d",
-				    loop_msg->seqnum, expected_seqnum);
-				continue;
-			}
-
-			/* move event with expired timeout to the exec list */
-			msg_age = time(NULL) - loop_msg->queue_time;
-			if (msg_age > EVENT_TIMEOUT_SEC-1) {
-				msg_move_exec(&loop_msg->list);
-				expected_seqnum = loop_msg->seqnum+1;
-				dbg("moved seq %d to exec, reset next expected to %d",
-				    loop_msg->seqnum, expected_seqnum);
-				goto recheck;
-			} else {
-				break;
-			}
+	list_for_each_entry_safe(loop_msg, tmp_msg, &msg_list, list) {
+		/* move event with expected sequence to the exec list */
+		if (loop_msg->seqnum == expected_seqnum) {
+			msg_move_exec(loop_msg);
+			continue;
 		}
 
-		msg_dump_queue();
-		pthread_mutex_unlock(&exec_lock);
-		pthread_mutex_unlock(&msg_lock);
-
-		/* wait until queue gets active or next message timeout expires */
-		pthread_mutex_lock(&msg_active_lock);
-
-		if (list_empty(&msg_list) == 0) {
-			tv.tv_sec = time(NULL) + EVENT_TIMEOUT_SEC - msg_age;
-			tv.tv_nsec = 0;
-			dbg("next event expires in %li seconds",
-			    EVENT_TIMEOUT_SEC - msg_age);
-			pthread_cond_timedwait(&msg_active, &msg_active_lock, &tv);
+		/* move event with expired timeout to the exec list */
+		msg_age = time(NULL) - loop_msg->queue_time;
+		if (msg_age > EVENT_TIMEOUT_SEC-1) {
+			msg_move_exec(loop_msg);
+			goto recheck;
 		} else {
-			pthread_cond_wait(&msg_active, &msg_active_lock);
+			break;
 		}
-		pthread_mutex_unlock(&msg_active_lock);
 	}
+
+	msg_dump_queue();
+
+	/* wait until queue gets active or next message timeout expires */
+
+	if (list_empty(&msg_list) == 0) {
+		struct itimerval itv = {{0, 0}, {EVENT_TIMEOUT_SEC - msg_age, 0}};
+		dbg("next event expires in %li seconds",
+		    EVENT_TIMEOUT_SEC - msg_age);
+		setitimer(ITIMER_REAL, &itv, 0);
+		}
 }
 
-/* every connect creates a thread which gets the msg, queues it and exits */
-static void *client_threads(void * parm)
+/* receive the msg, do some basic sanity checks, and queue it */
+static void handle_msg(int sock)
 {
-	int sock;
 	struct hotplug_msg *msg;
 	int retval;
 
-	sock = (int) parm;
-
 	msg = msg_create();
 	if (msg == NULL) {
 		dbg("unable to store message");
-		goto exit;
+		return;
 	}
 
 	retval = recv(sock, msg, sizeof(struct hotplug_msg), 0);
 	if (retval <  0) {
-		dbg("unable to receive message");
-		goto exit;
+		if (errno != EINTR)
+			dbg("unable to receive message");
+		return;
 	}
-
+	
 	if (strncmp(msg->magic, UDEV_MAGIC, sizeof(UDEV_MAGIC)) != 0 ) {
 		dbg("message magic '%s' doesn't match, ignore it", msg->magic);
-		msg_delete(msg);
-		goto exit;
+		free(msg);
+		return;
 	}
 
 	/* if no seqnum is given, we move straight to exec queue */
-	if (msg->seqnum == 0) {
-		pthread_mutex_lock(&exec_lock);
+	if (msg->seqnum == -1) {
 		list_add(&msg->list, &exec_list);
-		exec_queue_activate();
-		pthread_mutex_unlock(&exec_lock);
+		exec_queue_manager();
 	} else {
-		pthread_mutex_lock(&msg_lock);
 		msg_queue_insert(msg);
-		pthread_mutex_unlock(&msg_lock);
 	}
-
-exit:
-	close(sock);
-	pthread_exit(0);
 }
 
 static void sig_handler(int signum)
@@ -324,77 +249,91 @@
 		case SIGTERM:
 			exit(20 + signum);
 			break;
+		case SIGALRM:
+			msg_q_timeout = 1;
+			break;
+		case SIGCHLD:
+			children_waiting = 1;
+			break;
 		default:
 			dbg("unhandled signal");
 	}
 }
 
+static void jobdone(int pid)
+{
+	/* find msg associated with pid and delete it */
+	struct hotplug_msg *msg;
+
+	list_for_each_entry(msg, &running_list, list) {
+		if (msg->pid == pid) {
+			dbg("<== exec seq %d came back", msg->seqnum);
+			runq_del(msg);
+			return;
+		}
+	}
+}
+
 int main(int argc, char *argv[])
 {
 	int ssock;
-	int csock;
 	struct sockaddr_un saddr;
-	struct sockaddr_un caddr;
 	socklen_t addrlen;
-	socklen_t clen;
-	pthread_t cli_tid;
-	pthread_t mgr_msg_tid;
-	pthread_t mgr_exec_tid;
 	int retval;
 
 	init_logging("udevd");
 
 	signal(SIGINT, sig_handler);
 	signal(SIGTERM, sig_handler);
+	signal(SIGALRM, sig_handler);
+	signal(SIGCHLD, sig_handler);
+
+	/* we want these two to interrupt system calls */
+	siginterrupt(SIGALRM, 1);
+	siginterrupt(SIGCHLD, 1);
 
 	memset(&saddr, 0x00, sizeof(saddr));
 	saddr.sun_family = AF_LOCAL;
 	/* use abstract namespace for socket path */
-	strcpy(&saddr.sun_path[1], UDEVD_SOCK_PATH);
+	strcpy(&saddr.sun_path[1], UDEV_SOCK_NAME);
 	addrlen = offsetof(struct sockaddr_un, sun_path) + strlen(saddr.sun_path+1) + 1;
 
-	ssock = socket(AF_LOCAL, SOCK_STREAM, 0);
+	ssock = socket(AF_LOCAL, SOCK_DGRAM, 0);
 	if (ssock == -1) {
 		dbg("error getting socket");
 		exit(1);
 	}
 
+	/* the bind() takes care of ensuring only one copy
+	 * running at a time.  A second copy will fail with
+	 * errno set to EADDRINUSE
+	 */
 	retval = bind(ssock, &saddr, addrlen);
 	if (retval < 0) {
 		dbg("bind failed\n");
 		goto exit;
 	}
 
-	retval = listen(ssock, SOMAXCONN);
-	if (retval < 0) {
-		dbg("listen failed\n");
-		goto exit;
-	}
-
-	pthread_mutex_init(&msg_lock, NULL);
-	pthread_mutex_init(&msg_active_lock, NULL);
-	pthread_mutex_init(&exec_lock, NULL);
-	pthread_mutex_init(&exec_active_lock, NULL);
-	pthread_mutex_init(&running_lock, NULL);
-
-	/* set default attributes for created threads */
-	pthread_attr_init(&thr_attr);
-	pthread_attr_setdetachstate(&thr_attr, PTHREAD_CREATE_DETACHED);
-	pthread_attr_setstacksize(&thr_attr, 16 * 1024);
-
-	/* init queue management */
-	pthread_create(&mgr_msg_tid, &thr_attr, msg_queue_manager, NULL);
-	pthread_create(&mgr_exec_tid, &thr_attr, exec_queue_manager, NULL);
-
-	clen = sizeof(caddr);
 	/* main loop */
 	while (1) {
-		csock = accept(ssock, &caddr, &clen);
-		if (csock < 0) {
-			dbg("client accept failed\n");
-			continue;
+
+		handle_msg(ssock);
+
+		while(msg_q_timeout) {
+			msg_q_timeout = 0;
+			msg_queue_manager();
+		}
+
+		while(children_waiting) {
+			children_waiting = 0;
+			/* reap all dead children */
+			while(1) {
+				int pid = waitpid(-1, 0, WNOHANG);
+				if ((pid == -1) || (pid == 0))
+					break;
+				jobdone(pid);
+			}
 		}
-		pthread_create(&cli_tid, &thr_attr, client_threads, (void *) csock);
 	}
 exit:
 	close(ssock);
diff -Nur --exclude=RCS --exclude=CVS --exclude=SCCS --exclude=BitKeeper --exclude=ChangeSet udev/udevd.h myudev/udevd.h
--- udev/udevd.h	Thu Feb  5 23:44:52 2004
+++ myudev/udevd.h	Fri Feb  6 00:05:50 2004
@@ -24,6 +24,7 @@
 
 #include "list.h"
 
+#define UDEV_SOCK_NAME "udevd"
 #define UDEV_MAGIC			"udevd_" UDEV_VERSION
 #define EVENT_TIMEOUT_SEC		5
 #define UDEVSEND_CONNECT_RETRY		20 /* x 100 millisec */
diff -Nur --exclude=RCS --exclude=CVS --exclude=SCCS --exclude=BitKeeper --exclude=ChangeSet udev/udevsend.c myudev/udevsend.c
--- udev/udevsend.c	Fri Feb  6 00:43:06 2004
+++ myudev/udevsend.c	Fri Feb  6 00:40:54 2004
@@ -33,6 +33,7 @@
 #include <string.h>
 #include <unistd.h>
 #include <time.h>
+#include <linux/stddef.h>
 
 #include "udev.h"
 #include "udev_version.h"
@@ -119,13 +120,14 @@
 	char *subsystem;
 	char *seqnum;
 	int seq;
-	int retval = -EINVAL;
+	int retval=1;
 	int size;
 	int loop;
 	struct timespec tspec;
 	int sock;
 	struct sockaddr_un saddr;
 	socklen_t addrlen;
+	int started_daemon = 0;
 
 #ifdef DEBUG
 	init_logging("udevsend");
@@ -151,11 +153,11 @@
 
 	seqnum = get_seqnum();
 	if (seqnum == NULL)
-		seq = 0;
+		seq = -1;
 	else
 		seq = atoi(seqnum);
 
-	sock = socket(AF_LOCAL, SOCK_STREAM, 0);
+	sock = socket(AF_LOCAL, SOCK_DGRAM, 0);
 	if (sock == -1) {
 		dbg("error getting socket");
 		goto exit;
@@ -164,51 +166,47 @@
 	memset(&saddr, 0x00, sizeof(saddr));
 	saddr.sun_family = AF_LOCAL;
 	/* use abstract namespace for socket path */
-	strcpy(&saddr.sun_path[1], UDEVD_SOCK_PATH);
+	strcpy(&saddr.sun_path[1], UDEV_SOCK_NAME);
 	addrlen = offsetof(struct sockaddr_un, sun_path) + strlen(saddr.sun_path+1) + 1;
 
-	/* try to connect, if it fails start daemon */
-	retval = connect(sock, (struct sockaddr *) &saddr, addrlen);
-	if (retval != -1) {
-		goto send;
-	} else {
-		dbg("connect failed, try starting daemon...");
-		retval = start_daemon();
-		if (retval == 0) {
+	size = build_hotplugmsg(&message, action, devpath, subsystem, seq);
+	
+	/* Send to daemon.  If daemon doesn't exist, start it and loop on sending
+	 * while daemon starts.
+	 */
+	loop = UDEVSEND_CONNECT_RETRY;
+	while (loop--) {
+		retval = sendto(sock, &message, size, 0, (struct sockaddr*)&saddr, addrlen);
+		if (retval != -1) {
+			retval = 0;
+			goto close_and_exit;
+		}
+		
+		if (errno != ECONNREFUSED) {
+			dbg("error sending message");
+			goto close_and_exit;
+		}
+		
+		if (!started_daemon) {
+			dbg("connect failed, try starting daemon...");
+			retval = start_daemon();
+			if (retval) {
+				dbg("error starting daemon");
+				goto exit;
+			}
+			
 			dbg("daemon started");
+			started_daemon = 1;
 		} else {
-			dbg("error starting daemon");
-			goto exit;
+			dbg("retry to connect %d", UDEVSEND_CONNECT_RETRY - loop);
+			tspec.tv_sec = 0;
+			tspec.tv_nsec = 100000000;  /* 100 millisec */
+			nanosleep(&tspec, NULL);
 		}
 	}
-
-	/* try to connect while daemon to starts */
-	tspec.tv_sec = 0;
-	tspec.tv_nsec = 100000000;  /* 100 millisec */
-	loop = UDEVSEND_CONNECT_RETRY;
-	while (loop--) {
-		retval = connect(sock, (struct sockaddr *) &saddr, sizeof(saddr));
-		if (retval != -1)
-			goto send;
-		else
-			dbg("retry to connect %d",
-			    UDEVSEND_CONNECT_RETRY - loop);
-		nanosleep(&tspec, NULL);
-	}
-	dbg("error connecting to daemon, start daemon failed");
-	goto exit;
-
-send:
-	size = build_hotplugmsg(&message, action, devpath, subsystem, seq);
-	retval = send(sock, &message, size, 0);
-	if (retval == -1) {
-		dbg("error sending message");
-		close (sock);
-		goto exit;
-	}
-	close (sock);
-	return 0;
-
+	
+close_and_exit:
+	close(sock);
 exit:
-	return 1;
+	return retval;
 }

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] convert udevsend/udevd to DGRAM and single-threaded
  2004-02-06  6:08 [PATCH] convert udevsend/udevd to DGRAM and single-threaded Chris Friesen
@ 2004-02-06 11:27 ` Kay Sievers
  2004-02-06 16:03 ` Robert Love
                   ` (8 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Kay Sievers @ 2004-02-06 11:27 UTC (permalink / raw)
  To: linux-hotplug

[-- Attachment #1: Type: text/plain, Size: 1387 bytes --]

On Fri, Feb 06, 2004 at 01:08:24AM -0500, Chris Friesen wrote:
> 
> Kay, you said "unless we can get rid of _all_ the threads or at least
> getting faster, I don't want to change it."
> 
> Well how about we get rid of all the threads, *and* we get faster?

Yes, we are twice as fast now on my box :)


> This patch applies to current bk trees, and does the following:
> 
> 1) Switch to DGRAM sockets rather than STREAM.  This simplifies things
> as mentioned in the previous message.
> 
> 2) Invalid sequence numbers are mapped to -1 rather than zero, since
> zero is a valid sequence number (I think).  Also, this allows for real
> speed tests using scripts starting at a zero sequence number, since that
> is what the initial expected sequence number is.
> 
> 3) Get rid of all threading.  This is the biggie.  Some highlights:
> 	a) timeout using setitimer() and SIGALRM
> 	b) async child death notification via SIGCHLD
> 	c) these two signal handlers do nothing but raise volatile flags, 
> 	all the
> work is done in the main loop
> 	d) locking no longer required


I cleaned up the rest of the comments, the whitespace and a few names to match
the whole thing. Please recheck it. Test script is switched to work on subsystem
'test' to let udev ignore it.

Chris, very nice work. Sweet to get rid of the threads.
Greg, if it works for you too, I'm fine to switch over.

thanks,
Kay

[-- Attachment #2: 01-kill-threads.patch --]
[-- Type: text/plain, Size: 18436 bytes --]

===== Makefile 1.104 vs edited =====
--- 1.104/Makefile	Fri Feb  6 02:39:07 2004
+++ edited/Makefile	Fri Feb  6 10:40:14 2004
@@ -262,7 +262,7 @@
 	$(STRIPCMD) $@
 
 $(DAEMON): udevd.h $(GEN_HEADERS) udevd.o
-	$(LD) $(LDFLAGS) -lpthread -o $@ $(CRT0) udevd.o $(LIB_OBJS) $(ARCH_LIB_OBJS)
+	$(LD) $(LDFLAGS) -o $@ $(CRT0) udevd.o $(LIB_OBJS) $(ARCH_LIB_OBJS)
 	$(STRIPCMD) $@
 
 $(SENDER): udevd.h $(GEN_HEADERS) udevsend.o
===== udevd.c 1.14 vs edited =====
--- 1.14/udevd.c	Fri Feb  6 02:37:52 2004
+++ edited/udevd.c	Fri Feb  6 12:02:10 2004
@@ -2,6 +2,7 @@
  * udevd.c - hotplug event serializer
  *
  * Copyright (C) 2004 Kay Sievers <kay.sievers@vrfy.org>
+ * Copyright (C) 2004 Chris Friesen <chris_friesen@sympatico.ca>
  *
  *
  *	This program is free software; you can redistribute it and/or modify it
@@ -19,7 +20,6 @@
  *
  */
 
-#include <pthread.h>
 #include <stddef.h>
 #include <sys/wait.h>
 #include <signal.h>
@@ -32,6 +32,7 @@
 #include <sys/types.h>
 #include <sys/socket.h>
 #include <sys/un.h>
+#include <sys/time.h>
 
 #include "list.h"
 #include "udev.h"
@@ -39,22 +40,17 @@
 #include "udevd.h"
 #include "logging.h"
 
-
 unsigned char logname[42];
-static pthread_mutex_t  msg_lock;
-static pthread_mutex_t  msg_active_lock;
-static pthread_cond_t msg_active;
-static pthread_mutex_t  exec_lock;
-static pthread_mutex_t  exec_active_lock;
-static pthread_cond_t exec_active;
-static pthread_mutex_t  running_lock;
-static pthread_attr_t thr_attr;
 static int expected_seqnum = 0;
+volatile static int children_waiting;
+volatile static int msg_q_timeout;
 
 LIST_HEAD(msg_list);
 LIST_HEAD(exec_list);
 LIST_HEAD(running_list);
 
+static void exec_queue_manager(void);
+static void msg_queue_manager(void);
 
 static void msg_dump_queue(void)
 {
@@ -75,17 +71,16 @@
 	struct hotplug_msg *new_msg;
 
 	new_msg = malloc(sizeof(struct hotplug_msg));
-	if (new_msg == NULL) {
+	if (new_msg == NULL)
 		dbg("error malloc");
-		return NULL;
-	}
 	return new_msg;
 }
 
-static void msg_delete(struct hotplug_msg *msg)
+static void run_queue_delete(struct hotplug_msg *msg)
 {
-	if (msg != NULL)
-		free(msg);
+	list_del(&msg->list);
+	free(msg);
+	exec_queue_manager();
 }
 
 /* orders the message in the queue by sequence number */
@@ -103,21 +98,16 @@
 	/* store timestamp of queuing */
 	msg->queue_time = time(NULL);
 
-	/* signal queue activity to manager */
-	pthread_mutex_lock(&msg_active_lock);
-	pthread_cond_signal(&msg_active);
-	pthread_mutex_unlock(&msg_active_lock);
+	/* run msg queue manager */
+	msg_queue_manager();
 
 	return ;
 }
 
 /* forks event and removes event from run queue when finished */
-static void *run_threads(void * parm)
+static void udev_run(struct hotplug_msg *msg)
 {
 	pid_t pid;
-	struct hotplug_msg *msg;
-
-	msg = parm;
 	setenv("ACTION", msg->action, 1);
 	setenv("DEVPATH", msg->devpath, 1);
 
@@ -131,190 +121,124 @@
 		break;
 	case -1:
 		dbg("fork of child failed");
-		goto exit;
+		run_queue_delete(msg);
+		break;
 	default:
-		/* wait for exit of child */
-		dbg("==> exec seq %d [%d] working at '%s'",
-		    msg->seqnum, pid, msg->devpath);
-		wait(NULL);
-		dbg("<== exec seq %d came back", msg->seqnum);
-	}
-
-exit:
-	/* remove event from run list */
-	pthread_mutex_lock(&running_lock);
-	list_del_init(&msg->list);
-	pthread_mutex_unlock(&running_lock);
-
-	msg_delete(msg);
-
-	/* signal queue activity to exec manager */
-	pthread_mutex_lock(&exec_active_lock);
-	pthread_cond_signal(&exec_active);
-	pthread_mutex_unlock(&exec_active_lock);
-
-	pthread_exit(0);
+		/* get SIGCHLD in main loop */
+		dbg("==> exec seq %d [%d] working at '%s'", msg->seqnum, pid, msg->devpath);
+		msg->pid = pid;
+	}
 }
 
 /* returns already running task with devpath */
 static struct hotplug_msg *running_with_devpath(struct hotplug_msg *msg)
 {
 	struct hotplug_msg *loop_msg;
-	struct hotplug_msg *tmp_msg;
-
-	list_for_each_entry_safe(loop_msg, tmp_msg, &running_list, list)
+	list_for_each_entry(loop_msg, &running_list, list)
 		if (strncmp(loop_msg->devpath, msg->devpath, sizeof(loop_msg->devpath)) == 0)
 			return loop_msg;
 	return NULL;
 }
 
-/* queue management executes the events and delays events for the same devpath */
-static void *exec_queue_manager(void * parm)
+/* exec queue management routine executes the events and delays events for the same devpath */
+static void exec_queue_manager()
 {
 	struct hotplug_msg *loop_msg;
 	struct hotplug_msg *tmp_msg;
 	struct hotplug_msg *msg;
-	pthread_t run_tid;
 
-	while (1) {
-		pthread_mutex_lock(&exec_lock);
-		list_for_each_entry_safe(loop_msg, tmp_msg, &exec_list, list) {
-			msg = running_with_devpath(loop_msg);
-			if (msg == NULL) {
-				/* move event to run list */
-				pthread_mutex_lock(&running_lock);
-				list_move_tail(&loop_msg->list, &running_list);
-				pthread_mutex_unlock(&running_lock);
-
-				pthread_create(&run_tid, &thr_attr, run_threads, (void *) loop_msg);
-
-				dbg("moved seq %d to running list", loop_msg->seqnum);
-			} else {
-				dbg("delay seq %d, cause seq %d already working on '%s'",
-				    loop_msg->seqnum, msg->seqnum, msg->devpath);
-			}
+	list_for_each_entry_safe(loop_msg, tmp_msg, &exec_list, list) {
+		msg = running_with_devpath(loop_msg);
+		if (!msg) {
+			/* move event to run list */
+			list_move_tail(&loop_msg->list, &running_list);
+			udev_run(loop_msg);
+			dbg("moved seq %d to running list", loop_msg->seqnum);
+		} else {
+			dbg("delay seq %d, cause seq %d already working on '%s'",
+				loop_msg->seqnum, msg->seqnum, msg->devpath);
 		}
-		pthread_mutex_unlock(&exec_lock);
-
-		/* wait for activation, new events or childs coming back */
-		pthread_mutex_lock(&exec_active_lock);
-		pthread_cond_wait(&exec_active, &exec_active_lock);
-		pthread_mutex_unlock(&exec_active_lock);
 	}
 }
 
-static void exec_queue_activate(void)
+static void msg_move_exec(struct hotplug_msg *msg)
 {
-	pthread_mutex_lock(&exec_active_lock);
-	pthread_cond_signal(&exec_active);
-	pthread_mutex_unlock(&exec_active_lock);
+	list_move_tail(&msg->list, &exec_list);
+	exec_queue_manager();
+	expected_seqnum = msg->seqnum+1;
+	dbg("moved seq %d to exec, next expected is %d",
+		msg->seqnum, expected_seqnum);
 }
 
-/* move message from incoming to exec queue */
-static void msg_move_exec(struct list_head *head)
-{
-	list_move_tail(head, &exec_list);
-	exec_queue_activate();
-}
-
-/* queue management thread handles the timeouts and dispatches the events */
-static void *msg_queue_manager(void * parm)
+/* msg queue management routine handles the timeouts and dispatches the events */
+static void msg_queue_manager()
 {
 	struct hotplug_msg *loop_msg;
 	struct hotplug_msg *tmp_msg;
 	time_t msg_age = 0;
-	struct timespec tv;
 
-	while (1) {
-		dbg("msg queue manager, next expected is %d", expected_seqnum);
-		pthread_mutex_lock(&msg_lock);
-		pthread_mutex_lock(&exec_lock);
+	dbg("msg queue manager, next expected is %d", expected_seqnum);
 recheck:
-		list_for_each_entry_safe(loop_msg, tmp_msg, &msg_list, list) {
-			/* move event with expected sequence to the exec list */
-			if (loop_msg->seqnum == expected_seqnum) {
-				msg_move_exec(&loop_msg->list);
-				expected_seqnum++;
-				dbg("moved seq %d to exec, next expected is %d",
-				    loop_msg->seqnum, expected_seqnum);
-				continue;
-			}
-
-			/* move event with expired timeout to the exec list */
-			msg_age = time(NULL) - loop_msg->queue_time;
-			if (msg_age > EVENT_TIMEOUT_SEC-1) {
-				msg_move_exec(&loop_msg->list);
-				expected_seqnum = loop_msg->seqnum+1;
-				dbg("moved seq %d to exec, reset next expected to %d",
-				    loop_msg->seqnum, expected_seqnum);
-				goto recheck;
-			} else {
-				break;
-			}
+	list_for_each_entry_safe(loop_msg, tmp_msg, &msg_list, list) {
+		/* move event with expected sequence to the exec list */
+		if (loop_msg->seqnum == expected_seqnum) {
+			msg_move_exec(loop_msg);
+			continue;
 		}
 
-		msg_dump_queue();
-		pthread_mutex_unlock(&exec_lock);
-		pthread_mutex_unlock(&msg_lock);
-
-		/* wait until queue gets active or next message timeout expires */
-		pthread_mutex_lock(&msg_active_lock);
-
-		if (list_empty(&msg_list) == 0) {
-			tv.tv_sec = time(NULL) + EVENT_TIMEOUT_SEC - msg_age;
-			tv.tv_nsec = 0;
-			dbg("next event expires in %li seconds",
-			    EVENT_TIMEOUT_SEC - msg_age);
-			pthread_cond_timedwait(&msg_active, &msg_active_lock, &tv);
+		/* move event with expired timeout to the exec list */
+		msg_age = time(NULL) - loop_msg->queue_time;
+		if (msg_age > EVENT_TIMEOUT_SEC-1) {
+			msg_move_exec(loop_msg);
+			goto recheck;
 		} else {
-			pthread_cond_wait(&msg_active, &msg_active_lock);
+			break;
 		}
-		pthread_mutex_unlock(&msg_active_lock);
+	}
+
+	msg_dump_queue();
+
+	if (list_empty(&msg_list) == 0) {
+		/* set timeout for remaining queued events */
+		struct itimerval itv = {{0, 0}, {EVENT_TIMEOUT_SEC - msg_age, 0}};
+		dbg("next event expires in %li seconds",
+		    EVENT_TIMEOUT_SEC - msg_age);
+		setitimer(ITIMER_REAL, &itv, 0);
 	}
 }
 
-/* every connect creates a thread which gets the msg, queues it and exits */
-static void *client_threads(void * parm)
+/* receive the msg, do some basic sanity checks, and queue it */
+static void handle_msg(int sock)
 {
-	int sock;
 	struct hotplug_msg *msg;
 	int retval;
 
-	sock = (int) parm;
-
 	msg = msg_create();
 	if (msg == NULL) {
 		dbg("unable to store message");
-		goto exit;
+		return;
 	}
 
 	retval = recv(sock, msg, sizeof(struct hotplug_msg), 0);
 	if (retval <  0) {
-		dbg("unable to receive message");
-		goto exit;
+		if (errno != EINTR)
+			dbg("unable to receive message");
+		return;
 	}
-
+	
 	if (strncmp(msg->magic, UDEV_MAGIC, sizeof(UDEV_MAGIC)) != 0 ) {
 		dbg("message magic '%s' doesn't match, ignore it", msg->magic);
-		msg_delete(msg);
-		goto exit;
+		free(msg);
+		return;
 	}
 
 	/* if no seqnum is given, we move straight to exec queue */
-	if (msg->seqnum == 0) {
-		pthread_mutex_lock(&exec_lock);
+	if (msg->seqnum == -1) {
 		list_add(&msg->list, &exec_list);
-		exec_queue_activate();
-		pthread_mutex_unlock(&exec_lock);
+		exec_queue_manager();
 	} else {
-		pthread_mutex_lock(&msg_lock);
 		msg_queue_insert(msg);
-		pthread_mutex_unlock(&msg_lock);
 	}
-
-exit:
-	close(sock);
-	pthread_exit(0);
 }
 
 static void sig_handler(int signum)
@@ -324,28 +248,48 @@
 		case SIGTERM:
 			exit(20 + signum);
 			break;
+		case SIGALRM:
+			msg_q_timeout = 1;
+			break;
+		case SIGCHLD:
+			children_waiting = 1;
+			break;
 		default:
 			dbg("unhandled signal");
 	}
 }
 
+static void udev_done(int pid)
+{
+	/* find msg associated with pid and delete it */
+	struct hotplug_msg *msg;
+
+	list_for_each_entry(msg, &running_list, list) {
+		if (msg->pid == pid) {
+			dbg("<== exec seq %d came back", msg->seqnum);
+			run_queue_delete(msg);
+			return;
+		}
+	}
+}
+
 int main(int argc, char *argv[])
 {
 	int ssock;
-	int csock;
 	struct sockaddr_un saddr;
-	struct sockaddr_un caddr;
 	socklen_t addrlen;
-	socklen_t clen;
-	pthread_t cli_tid;
-	pthread_t mgr_msg_tid;
-	pthread_t mgr_exec_tid;
 	int retval;
 
 	init_logging("udevd");
 
 	signal(SIGINT, sig_handler);
 	signal(SIGTERM, sig_handler);
+	signal(SIGALRM, sig_handler);
+	signal(SIGCHLD, sig_handler);
+
+	/* we want these two to interrupt system calls */
+	siginterrupt(SIGALRM, 1);
+	siginterrupt(SIGCHLD, 1);
 
 	memset(&saddr, 0x00, sizeof(saddr));
 	saddr.sun_family = AF_LOCAL;
@@ -353,48 +297,37 @@
 	strcpy(&saddr.sun_path[1], UDEVD_SOCK_PATH);
 	addrlen = offsetof(struct sockaddr_un, sun_path) + strlen(saddr.sun_path+1) + 1;
 
-	ssock = socket(AF_LOCAL, SOCK_STREAM, 0);
+	ssock = socket(AF_LOCAL, SOCK_DGRAM, 0);
 	if (ssock == -1) {
 		dbg("error getting socket");
 		exit(1);
 	}
 
+	/* the bind takes care of ensuring only one copy running */
 	retval = bind(ssock, &saddr, addrlen);
 	if (retval < 0) {
 		dbg("bind failed\n");
 		goto exit;
 	}
 
-	retval = listen(ssock, SOMAXCONN);
-	if (retval < 0) {
-		dbg("listen failed\n");
-		goto exit;
-	}
+	while (1) {
+		handle_msg(ssock);
 
-	pthread_mutex_init(&msg_lock, NULL);
-	pthread_mutex_init(&msg_active_lock, NULL);
-	pthread_mutex_init(&exec_lock, NULL);
-	pthread_mutex_init(&exec_active_lock, NULL);
-	pthread_mutex_init(&running_lock, NULL);
-
-	/* set default attributes for created threads */
-	pthread_attr_init(&thr_attr);
-	pthread_attr_setdetachstate(&thr_attr, PTHREAD_CREATE_DETACHED);
-	pthread_attr_setstacksize(&thr_attr, 16 * 1024);
-
-	/* init queue management */
-	pthread_create(&mgr_msg_tid, &thr_attr, msg_queue_manager, NULL);
-	pthread_create(&mgr_exec_tid, &thr_attr, exec_queue_manager, NULL);
+		while(msg_q_timeout) {
+			msg_q_timeout = 0;
+			msg_queue_manager();
+		}
 
-	clen = sizeof(caddr);
-	/* main loop */
-	while (1) {
-		csock = accept(ssock, &caddr, &clen);
-		if (csock < 0) {
-			dbg("client accept failed\n");
-			continue;
+		while(children_waiting) {
+			children_waiting = 0;
+			/* reap all dead children */
+			while(1) {
+				int pid = waitpid(-1, 0, WNOHANG);
+				if ((pid == -1) || (pid == 0))
+					break;
+				udev_done(pid);
+			}
 		}
-		pthread_create(&cli_tid, &thr_attr, client_threads, (void *) csock);
 	}
 exit:
 	close(ssock);
===== udevsend.c 1.17 vs edited =====
--- 1.17/udevsend.c	Thu Feb  5 10:40:58 2004
+++ edited/udevsend.c	Fri Feb  6 10:54:08 2004
@@ -33,6 +33,7 @@
 #include <string.h>
 #include <unistd.h>
 #include <time.h>
+#include <linux/stddef.h>
 
 #include "udev.h"
 #include "udev_version.h"
@@ -119,13 +120,14 @@
 	char *subsystem;
 	char *seqnum;
 	int seq;
-	int retval = -EINVAL;
+	int retval = 1;
 	int size;
 	int loop;
 	struct timespec tspec;
 	int sock;
 	struct sockaddr_un saddr;
 	socklen_t addrlen;
+	int started_daemon = 0;
 
 #ifdef DEBUG
 	init_logging("udevsend");
@@ -151,11 +153,11 @@
 
 	seqnum = get_seqnum();
 	if (seqnum == NULL)
-		seq = 0;
+		seq = -1;
 	else
 		seq = atoi(seqnum);
 
-	sock = socket(AF_LOCAL, SOCK_STREAM, 0);
+	sock = socket(AF_LOCAL, SOCK_DGRAM, 0);
 	if (sock == -1) {
 		dbg("error getting socket");
 		goto exit;
@@ -167,48 +169,42 @@
 	strcpy(&saddr.sun_path[1], UDEVD_SOCK_PATH);
 	addrlen = offsetof(struct sockaddr_un, sun_path) + strlen(saddr.sun_path+1) + 1;
 
-	/* try to connect, if it fails start daemon */
-	retval = connect(sock, (struct sockaddr *) &saddr, addrlen);
-	if (retval != -1) {
-		goto send;
-	} else {
-		dbg("connect failed, try starting daemon...");
-		retval = start_daemon();
-		if (retval == 0) {
+	size = build_hotplugmsg(&message, action, devpath, subsystem, seq);
+	
+	/* If we can't send, try to start daemon and resend message */
+	loop = UDEVSEND_CONNECT_RETRY;
+	while (loop--) {
+		retval = sendto(sock, &message, size, 0, (struct sockaddr*)&saddr, addrlen);
+		if (retval != -1) {
+			retval = 0;
+			goto close_and_exit;
+		}
+		
+		if (errno != ECONNREFUSED) {
+			dbg("error sending message");
+			goto close_and_exit;
+		}
+		
+		if (!started_daemon) {
+			dbg("connect failed, try starting daemon...");
+			retval = start_daemon();
+			if (retval) {
+				dbg("error starting daemon");
+				goto exit;
+			}
+			
 			dbg("daemon started");
+			started_daemon = 1;
 		} else {
-			dbg("error starting daemon");
-			goto exit;
+			dbg("retry to connect %d", UDEVSEND_CONNECT_RETRY - loop);
+			tspec.tv_sec = 0;
+			tspec.tv_nsec = 100000000;  /* 100 millisec */
+			nanosleep(&tspec, NULL);
 		}
 	}
-
-	/* try to connect while daemon to starts */
-	tspec.tv_sec = 0;
-	tspec.tv_nsec = 100000000;  /* 100 millisec */
-	loop = UDEVSEND_CONNECT_RETRY;
-	while (loop--) {
-		retval = connect(sock, (struct sockaddr *) &saddr, sizeof(saddr));
-		if (retval != -1)
-			goto send;
-		else
-			dbg("retry to connect %d",
-			    UDEVSEND_CONNECT_RETRY - loop);
-		nanosleep(&tspec, NULL);
-	}
-	dbg("error connecting to daemon, start daemon failed");
-	goto exit;
-
-send:
-	size = build_hotplugmsg(&message, action, devpath, subsystem, seq);
-	retval = send(sock, &message, size, 0);
-	if (retval == -1) {
-		dbg("error sending message");
-		close (sock);
-		goto exit;
-	}
-	close (sock);
-	return 0;
-
+	
+close_and_exit:
+	close(sock);
 exit:
-	return 1;
+	return retval;
 }
===== test/udevd_test.sh 1.5 vs edited =====
--- 1.5/test/udevd_test.sh	Sat Jan 31 04:30:06 2004
+++ edited/test/udevd_test.sh	Fri Feb  6 11:37:30 2004
@@ -3,81 +3,86 @@
 # add/rem/add/rem/add sequence of sda/sdb/sdc
 # a few days longer and the socket of my usb-flash-reader is gone :)
 
+export SEQNUM=0
+export ACTION=add
+export DEVPATH=/test/init
+./udevsend test
+
 export SEQNUM=3
 export ACTION=add
-export DEVPATH=/block/sdc
-./udevsend block
+export DEVPATH=/test/sdc
+./udevsend test
 
 export SEQNUM=1
 export ACTION=add
-export DEVPATH=/block/sda
-./udevsend block
+export DEVPATH=/test/sda
+./udevsend test
 
 export SEQNUM=2
 export ACTION=add
-export DEVPATH=/block/sdb
-./udevsend block
+export DEVPATH=/test/sdb
+./udevsend test
 
 export SEQNUM=4
 export ACTION=remove
-export DEVPATH=/block/sdc
-./udevsend block
+export DEVPATH=/test/sdc
+./udevsend test
 
 export SEQNUM=5
 export ACTION=remove
-export DEVPATH=/block/sdb
-./udevsend block
+export DEVPATH=/test/sdb
+./udevsend test
 
 export SEQNUM=8
 export ACTION=add
-export DEVPATH=/block/sdb
-./udevsend block
+export DEVPATH=/test/sdb
+./udevsend test
 
 export SEQNUM=6
 export ACTION=remove
-export DEVPATH=/block/sda
-./udevsend block
+export DEVPATH=/test/sda
+./udevsend test
 
 export SEQNUM=7
 export ACTION=add
-export DEVPATH=/block/sda
-#./udevsend block
+export DEVPATH=/test/sda
+#./udevsend test
 
 sleep 2
 
 export SEQNUM=9
 export ACTION=add
-export DEVPATH=/block/sdc
-./udevsend block
+export DEVPATH=/test/sdc
+./udevsend test
 
 export SEQNUM=11
 export ACTION=remove
-export DEVPATH=/block/sdb
-./udevsend block
+export DEVPATH=/test/sdb
+./udevsend test
 
 export SEQNUM=10
 export ACTION=remove
-export DEVPATH=/block/sdc
-./udevsend block
+export DEVPATH=/test/sdc
+./udevsend test
 
 export SEQNUM=13
 export ACTION=add
-export DEVPATH=/block/sda
-./udevsend block
+export DEVPATH=/test/sda
+./udevsend test
 
 export SEQNUM=14
 export ACTION=add
-export DEVPATH=/block/sdb
-./udevsend block
+export DEVPATH=/test/sdb
+./udevsend test
 
 export SEQNUM=15
 export ACTION=add
-export DEVPATH=/block/sdc
-./udevsend block
+export DEVPATH=/test/sdc
+./udevsend test
 
 sleep 2
 
 export SEQNUM=12
 export ACTION=remove
-export DEVPATH=/block/sda
-./udevsend block
+export DEVPATH=/test/sda
+./udevsend test

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] convert udevsend/udevd to DGRAM and single-threaded
  2004-02-06  6:08 [PATCH] convert udevsend/udevd to DGRAM and single-threaded Chris Friesen
  2004-02-06 11:27 ` Kay Sievers
@ 2004-02-06 16:03 ` Robert Love
  2004-02-06 16:58 ` Patrick Mansfield
                   ` (7 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Robert Love @ 2004-02-06 16:03 UTC (permalink / raw)
  To: linux-hotplug

On Fri, 2004-02-06 at 12:27 +0100, Kay Sievers wrote:

> Chris, very nice work. Sweet to get rid of the threads.
> Greg, if it works for you too, I'm fine to switch over.

*clap* *clap* *clap*

Thanks, Chris.  Oh so sweet.

	Robert Love




-------------------------------------------------------
The SF.Net email is sponsored by EclipseCon 2004
Premiere Conference on Open Tools Development and Integration
See the breadth of Eclipse activity. February 3-5 in Anaheim, CA.
http://www.eclipsecon.org/osdn
_______________________________________________
Linux-hotplug-devel mailing list  http://linux-hotplug.sourceforge.net
Linux-hotplug-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-hotplug-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] convert udevsend/udevd to DGRAM and single-threaded
  2004-02-06  6:08 [PATCH] convert udevsend/udevd to DGRAM and single-threaded Chris Friesen
  2004-02-06 11:27 ` Kay Sievers
  2004-02-06 16:03 ` Robert Love
@ 2004-02-06 16:58 ` Patrick Mansfield
  2004-02-06 22:21 ` Greg KH
                   ` (6 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Patrick Mansfield @ 2004-02-06 16:58 UTC (permalink / raw)
  To: linux-hotplug

On Fri, Feb 06, 2004 at 12:27:55PM +0100, Kay Sievers wrote:
> On Fri, Feb 06, 2004 at 01:08:24AM -0500, Chris Friesen wrote:

> > This patch applies to current bk trees, and does the following:
> > 
> > 1) Switch to DGRAM sockets rather than STREAM.  This simplifies things
> > as mentioned in the previous message.

> -	ssock = socket(AF_LOCAL, SOCK_STREAM, 0);
> +	ssock = socket(AF_LOCAL, SOCK_DGRAM, 0);

How are dropped packets handled?

Should you use SOCK_RDM? (I've never used it, and didn't even know it
existed until just reading man socket).

-- Patrick Mansfield


-------------------------------------------------------
The SF.Net email is sponsored by EclipseCon 2004
Premiere Conference on Open Tools Development and Integration
See the breadth of Eclipse activity. February 3-5 in Anaheim, CA.
http://www.eclipsecon.org/osdn
_______________________________________________
Linux-hotplug-devel mailing list  http://linux-hotplug.sourceforge.net
Linux-hotplug-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-hotplug-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] convert udevsend/udevd to DGRAM and single-threaded
  2004-02-06  6:08 [PATCH] convert udevsend/udevd to DGRAM and single-threaded Chris Friesen
                   ` (2 preceding siblings ...)
  2004-02-06 16:58 ` Patrick Mansfield
@ 2004-02-06 22:21 ` Greg KH
  2004-02-06 22:22 ` Chris Friesen
                   ` (5 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Greg KH @ 2004-02-06 22:21 UTC (permalink / raw)
  To: linux-hotplug

On Fri, Feb 06, 2004 at 12:27:55PM +0100, Kay Sievers wrote:
> On Fri, Feb 06, 2004 at 01:08:24AM -0500, Chris Friesen wrote:
> > 
> > Kay, you said "unless we can get rid of _all_ the threads or at least
> > getting faster, I don't want to change it."
> > 
> > Well how about we get rid of all the threads, *and* we get faster?
> 
> Yes, we are twice as fast now on my box :)

Very nice, thanks a lot Chris.

I've applied this patch.

greg k-h


-------------------------------------------------------
The SF.Net email is sponsored by EclipseCon 2004
Premiere Conference on Open Tools Development and Integration
See the breadth of Eclipse activity. February 3-5 in Anaheim, CA.
http://www.eclipsecon.org/osdn
_______________________________________________
Linux-hotplug-devel mailing list  http://linux-hotplug.sourceforge.net
Linux-hotplug-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-hotplug-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] convert udevsend/udevd to DGRAM and single-threaded
  2004-02-06  6:08 [PATCH] convert udevsend/udevd to DGRAM and single-threaded Chris Friesen
                   ` (3 preceding siblings ...)
  2004-02-06 22:21 ` Greg KH
@ 2004-02-06 22:22 ` Chris Friesen
  2004-02-07  1:24 ` Patrick Mansfield
                   ` (4 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Chris Friesen @ 2004-02-06 22:22 UTC (permalink / raw)
  To: linux-hotplug

Patrick Mansfield wrote:

>>On Fri, Feb 06, 2004 at 01:08:24AM -0500, Chris Friesen wrote:

>>-	ssock = socket(AF_LOCAL, SOCK_STREAM, 0);
>>+	ssock = socket(AF_LOCAL, SOCK_DGRAM, 0);
> 
> How are dropped packets handled?

If udevd is not present, then the udevsend creates it.  Otherwise, it blocks 
until the message is placed in udevd's buffer.  No problem there.

If udevd crashes after the message was placed in its rx buffer but before 
handling it, then you have a problem, but this problem exists for any protocol 
and to solve it you need to have an ack message sent back to udevsend *after* 
the udev has run and returned for that message.


Chris



-------------------------------------------------------
The SF.Net email is sponsored by EclipseCon 2004
Premiere Conference on Open Tools Development and Integration
See the breadth of Eclipse activity. February 3-5 in Anaheim, CA.
http://www.eclipsecon.org/osdn
_______________________________________________
Linux-hotplug-devel mailing list  http://linux-hotplug.sourceforge.net
Linux-hotplug-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-hotplug-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] convert udevsend/udevd to DGRAM and single-threaded
  2004-02-06  6:08 [PATCH] convert udevsend/udevd to DGRAM and single-threaded Chris Friesen
                   ` (4 preceding siblings ...)
  2004-02-06 22:22 ` Chris Friesen
@ 2004-02-07  1:24 ` Patrick Mansfield
  2004-02-07  2:04 ` Mike Waychison
                   ` (3 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Patrick Mansfield @ 2004-02-07  1:24 UTC (permalink / raw)
  To: linux-hotplug

On Fri, Feb 06, 2004 at 05:22:23PM -0500, Chris Friesen wrote:
> Patrick Mansfield wrote:
> 
> >>On Fri, Feb 06, 2004 at 01:08:24AM -0500, Chris Friesen wrote:
> 
> >>-	ssock = socket(AF_LOCAL, SOCK_STREAM, 0);
> >>+	ssock = socket(AF_LOCAL, SOCK_DGRAM, 0);
> > 
> > How are dropped packets handled?

> If udevd is not present, then the udevsend creates it.  Otherwise, it blocks 
> until the message is placed in udevd's buffer.  No problem there.
> 
> If udevd crashes after the message was placed in its rx buffer but before 
> handling it, then you have a problem, but this problem exists for any protocol 
> and to solve it you need to have an ack message sent back to udevsend *after* 
> the udev has run and returned for that message.

I mean, SOCK_DGRAM is an unreliable transport, so what happens if the
transport drops packets? It might be unlikely, especially for the AF_LOCAL,
but it is possible.

I don't see any ack or retransmit code in udevsend.

-- Patrick Mansfield


-------------------------------------------------------
The SF.Net email is sponsored by EclipseCon 2004
Premiere Conference on Open Tools Development and Integration
See the breadth of Eclipse activity. February 3-5 in Anaheim, CA.
http://www.eclipsecon.org/osdn
_______________________________________________
Linux-hotplug-devel mailing list  http://linux-hotplug.sourceforge.net
Linux-hotplug-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-hotplug-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] convert udevsend/udevd to DGRAM and single-threaded
  2004-02-06  6:08 [PATCH] convert udevsend/udevd to DGRAM and single-threaded Chris Friesen
                   ` (5 preceding siblings ...)
  2004-02-07  1:24 ` Patrick Mansfield
@ 2004-02-07  2:04 ` Mike Waychison
  2004-02-07  7:06 ` Chris Friesen
                   ` (2 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Mike Waychison @ 2004-02-07  2:04 UTC (permalink / raw)
  To: linux-hotplug

Patrick Mansfield wrote:

>On Fri, Feb 06, 2004 at 05:22:23PM -0500, Chris Friesen wrote:
>  
>
>>Patrick Mansfield wrote:
>>
>>    
>>
>>>>On Fri, Feb 06, 2004 at 01:08:24AM -0500, Chris Friesen wrote:
>>>>        
>>>>
>>>>-	ssock = socket(AF_LOCAL, SOCK_STREAM, 0);
>>>>+	ssock = socket(AF_LOCAL, SOCK_DGRAM, 0);
>>>>        
>>>>
>>>How are dropped packets handled?
>>>      
>>>
>
>  
>
>>If udevd is not present, then the udevsend creates it.  Otherwise, it blocks 
>>until the message is placed in udevd's buffer.  No problem there.
>>
>>If udevd crashes after the message was placed in its rx buffer but before 
>>handling it, then you have a problem, but this problem exists for any protocol 
>>and to solve it you need to have an ack message sent back to udevsend *after* 
>>the udev has run and returned for that message.
>>    
>>
>
>I mean, SOCK_DGRAM is an unreliable transport, so what happens if the
>transport drops packets? It might be unlikely, especially for the AF_LOCAL,
>but it is possible.
>
>I don't see any ack or retransmit code in udevsend.
>
>  
>

SOCK_DGRAM is a reliable transport under AF_UNIX / AF_LOCAL.  From unix(7):

 
       Valid types are SOCK_STREAM for a stream oriented socket and 
SOCK_DGRAM
       for  a datagram oriented socket that preserves message 
boundaries. Unix
       sockets are always reliable and don't reorder datagrams.


-- 
Mike Waychison
Sun Microsystems, Inc.
1 (650) 352-5299 voice
1 (416) 202-8336 voice
mailto: Michael.Waychison@Sun.COM
http://www.sun.com

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
NOTICE:  The opinions expressed in this email are held by me, 
and may not represent the views of Sun Microsystems, Inc.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 



-------------------------------------------------------
The SF.Net email is sponsored by EclipseCon 2004
Premiere Conference on Open Tools Development and Integration
See the breadth of Eclipse activity. February 3-5 in Anaheim, CA.
http://www.eclipsecon.org/osdn
_______________________________________________
Linux-hotplug-devel mailing list  http://linux-hotplug.sourceforge.net
Linux-hotplug-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-hotplug-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] convert udevsend/udevd to DGRAM and single-threaded
  2004-02-06  6:08 [PATCH] convert udevsend/udevd to DGRAM and single-threaded Chris Friesen
                   ` (6 preceding siblings ...)
  2004-02-07  2:04 ` Mike Waychison
@ 2004-02-07  7:06 ` Chris Friesen
  2004-02-07  7:32 ` Patrick Mansfield
  2004-02-07 10:06 ` Kay Sievers
  9 siblings, 0 replies; 11+ messages in thread
From: Chris Friesen @ 2004-02-07  7:06 UTC (permalink / raw)
  To: linux-hotplug

Patrick Mansfield wrote:

> I mean, SOCK_DGRAM is an unreliable transport, so what happens if the
> transport drops packets? It might be unlikely, especially for the AF_LOCAL,
> but it is possible.
> 
> I don't see any ack or retransmit code in udevsend.

Unix sockets are basically reliable.  Barring something really wierd going on, 
if the server is present, a blocking sendto() (with valid parameters, of course) 
will succeed eventually, which guarantees that the message was queued up in the 
server's receive buffer.  If the server is not present, then you get an errno of 
ECONNREFUSED, and you know to start the server.

 From the man page: "Unix sockets are always reliable and don't reorder datagrams."

It would be easy to make it totally reliable by doing periodic retries in 
udevsend and have udevd send back an ack when cleaning up the message after udev 
completes.  I could send this in if people are interested.


Chris



-------------------------------------------------------
The SF.Net email is sponsored by EclipseCon 2004
Premiere Conference on Open Tools Development and Integration
See the breadth of Eclipse activity. February 3-5 in Anaheim, CA.
http://www.eclipsecon.org/osdn
_______________________________________________
Linux-hotplug-devel mailing list  http://linux-hotplug.sourceforge.net
Linux-hotplug-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-hotplug-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] convert udevsend/udevd to DGRAM and single-threaded
  2004-02-06  6:08 [PATCH] convert udevsend/udevd to DGRAM and single-threaded Chris Friesen
                   ` (7 preceding siblings ...)
  2004-02-07  7:06 ` Chris Friesen
@ 2004-02-07  7:32 ` Patrick Mansfield
  2004-02-07 10:06 ` Kay Sievers
  9 siblings, 0 replies; 11+ messages in thread
From: Patrick Mansfield @ 2004-02-07  7:32 UTC (permalink / raw)
  To: linux-hotplug

On Sat, Feb 07, 2004 at 02:06:25AM -0500, Chris Friesen wrote:
> Patrick Mansfield wrote:
> 
> > I mean, SOCK_DGRAM is an unreliable transport, so what happens if the
> > transport drops packets? It might be unlikely, especially for the AF_LOCAL,
> > but it is possible.
> > 
> > I don't see any ack or retransmit code in udevsend.
> 
> Unix sockets are basically reliable.  Barring something really wierd going on, 
> if the server is present, a blocking sendto() (with valid parameters, of course) 
> will succeed eventually, which guarantees that the message was queued up in the 
> server's receive buffer.  If the server is not present, then you get an errno of 
> ECONNREFUSED, and you know to start the server.
> 
>  From the man page: "Unix sockets are always reliable and don't reorder datagrams."

OK, thanks for the explanations, I did not know about the "man unix" ;-)

-- Patrick Mansfield


-------------------------------------------------------
The SF.Net email is sponsored by EclipseCon 2004
Premiere Conference on Open Tools Development and Integration
See the breadth of Eclipse activity. February 3-5 in Anaheim, CA.
http://www.eclipsecon.org/osdn
_______________________________________________
Linux-hotplug-devel mailing list  http://linux-hotplug.sourceforge.net
Linux-hotplug-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-hotplug-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] convert udevsend/udevd to DGRAM and single-threaded
  2004-02-06  6:08 [PATCH] convert udevsend/udevd to DGRAM and single-threaded Chris Friesen
                   ` (8 preceding siblings ...)
  2004-02-07  7:32 ` Patrick Mansfield
@ 2004-02-07 10:06 ` Kay Sievers
  9 siblings, 0 replies; 11+ messages in thread
From: Kay Sievers @ 2004-02-07 10:06 UTC (permalink / raw)
  To: linux-hotplug

[-- Attachment #1: Type: text/plain, Size: 678 bytes --]

On Fri, Feb 06, 2004 at 02:21:44PM -0800, Greg KH wrote:
> On Fri, Feb 06, 2004 at 12:27:55PM +0100, Kay Sievers wrote:
> > On Fri, Feb 06, 2004 at 01:08:24AM -0500, Chris Friesen wrote:
> > > 
> > > Kay, you said "unless we can get rid of _all_ the threads or at least
> > > getting faster, I don't want to change it."
> > > 
> > > Well how about we get rid of all the threads, *and* we get faster?
> > 
> > Yes, we are twice as fast now on my box :)
> 
> Very nice, thanks a lot Chris.

I tried to compile udevd with klibc. Here are the neccessary changes to
compile it, but I doesn't work well now. There seems to be a issue with
the signal handling in klibc.
Any idea?

Kay

[-- Attachment #2: 01-udevd-klibc.patch --]
[-- Type: text/plain, Size: 1431 bytes --]

===== udevd.c 1.15 vs edited =====
--- 1.15/udevd.c	Fri Feb  6 13:02:10 2004
+++ edited/udevd.c	Sat Feb  7 10:49:54 2004
@@ -279,6 +279,7 @@
 	struct sockaddr_un saddr;
 	socklen_t addrlen;
 	int retval;
+	struct sigaction act;
 
 	init_logging("udevd");
 
@@ -288,8 +289,12 @@
 	signal(SIGCHLD, sig_handler);
 
 	/* we want these two to interrupt system calls */
-	siginterrupt(SIGALRM, 1);
-	siginterrupt(SIGCHLD, 1);
+	sigaction(SIGALRM, NULL, &act);
+	act.sa_flags &= ~SA_RESTART;
+	sigaction(SIGALRM, &act, NULL);
+	sigaction(SIGCHLD, NULL, &act);
+	act.sa_flags &= ~SA_RESTART;
+	sigaction(SIGCHLD, &act, NULL);
 
 	memset(&saddr, 0x00, sizeof(saddr));
 	saddr.sun_family = AF_LOCAL;
@@ -304,7 +309,7 @@
 	}
 
 	/* the bind takes care of ensuring only one copy running */
-	retval = bind(ssock, &saddr, addrlen);
+	retval = bind(ssock, (struct sockaddr *) &saddr, addrlen);
 	if (retval < 0) {
 		dbg("bind failed\n");
 		goto exit;
===== udevsend.c 1.18 vs edited =====
--- 1.18/udevsend.c	Fri Feb  6 11:54:08 2004
+++ edited/udevsend.c	Sat Feb  7 10:41:13 2004
@@ -174,7 +174,7 @@
 	/* If we can't send, try to start daemon and resend message */
 	loop = UDEVSEND_CONNECT_RETRY;
 	while (loop--) {
-		retval = sendto(sock, &message, size, 0, (struct sockaddr*)&saddr, addrlen);
+		retval = sendto(sock, &message, size, 0, (struct sockaddr *)&saddr, addrlen);
 		if (retval != -1) {
 			retval = 0;
 			goto close_and_exit;

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2004-02-07 10:06 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2004-02-06  6:08 [PATCH] convert udevsend/udevd to DGRAM and single-threaded Chris Friesen
2004-02-06 11:27 ` Kay Sievers
2004-02-06 16:03 ` Robert Love
2004-02-06 16:58 ` Patrick Mansfield
2004-02-06 22:21 ` Greg KH
2004-02-06 22:22 ` Chris Friesen
2004-02-07  1:24 ` Patrick Mansfield
2004-02-07  2:04 ` Mike Waychison
2004-02-07  7:06 ` Chris Friesen
2004-02-07  7:32 ` Patrick Mansfield
2004-02-07 10:06 ` Kay Sievers

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).