From: Kay Sievers <kay.sievers@vrfy.org>
To: linux-hotplug@vger.kernel.org
Subject: udevd - throttle fork() if we get to many childs in running
Date: Sun, 26 Dec 2004 03:13:27 +0000 [thread overview]
Message-ID: <1104030808.5805.34.camel@localhost.localdomain> (raw)
[-- Attachment #1: Type: text/plain, Size: 347 bytes --]
We count the number of "R" processes in our session group and stop
forking if we reach a specified limit.
Sleeping events are not counted. Running hotplug.d/ and dev.d/ scripts
are counted, as they belong to our session group.
This should help the "lots of disks" setups where the kernel may spit
out lots of events in parallel.
Thanks,
Kay
[-- Attachment #2: udevd-throttle-fork-01.patch --]
[-- Type: text/x-patch, Size: 4149 bytes --]
diff -Nru a/udevd.c b/udevd.c
--- a/udevd.c 2004-12-26 03:53:53 +01:00
+++ b/udevd.c 2004-12-26 03:53:53 +01:00
@@ -28,11 +28,13 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <ctype.h>
+#include <dirent.h>
+#include <fcntl.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/un.h>
-#include <fcntl.h>
#include <sys/sysinfo.h>
#include <sys/stat.h>
@@ -45,6 +47,7 @@
/* global variables*/
static int udevsendsock;
+static int sid;
static int pipefds[2];
static long startup_time;
@@ -133,6 +136,8 @@
/* child */
close(udevsendsock);
logging_close();
+
+ setpriority(PRIO_PROCESS, 0, UDEV_PRIORITY);
execve(udev_bin, argv, msg->envp);
dbg("exec of child failed");
_exit(1);
@@ -140,9 +145,6 @@
case -1:
dbg("fork of child failed");
run_queue_delete(msg);
- /* note: we never managed to run, so we had no impact on
- * running_with_devpath(), so don't bother setting run_exec_q
- */
break;
default:
/* get SIGCHLD in main loop */
@@ -151,6 +153,60 @@
}
}
+static int running_processes_in_session(void)
+{
+ DIR *dir;
+ struct dirent *dent;
+ int running = 0;
+
+ dir = opendir("/proc");
+ if (!dir)
+ return -1;
+
+ /* read all processes in /proc */
+ for (dent = readdir(dir); dent != NULL; dent = readdir(dir)) {
+ int f;
+ char procdir[64];
+ char line[256];
+ const char *pos;
+ pid_t pid, ppid, pgrp, session;
+ char state;
+
+ if (!isdigit(dent->d_name[0]))
+ continue;
+
+ snprintf(procdir, sizeof(procdir), "/proc/%s/stat", dent->d_name);
+ procdir[sizeof(procdir)-1] = '\0';
+
+ f = open(procdir, O_RDONLY);
+ if (f == -1)
+ continue;
+
+ read(f, line, sizeof(line));
+ close(f);
+
+ /* skip ugly program name */
+ pos = strrchr(line, ')') + 2;
+
+ pid = strtol(line, NULL, 10);
+ if (sscanf(pos, "%c %d %d %d ", &state, &ppid, &pgrp, &session) != 4)
+ continue;
+
+ /* count only processes in our session */
+ if (session != sid)
+ continue;
+
+ /* count only running, no sleeping processes */
+ if (state != 'R')
+ continue;
+
+ running++;
+ }
+ closedir(dir);
+
+ return running;
+}
+
static int compare_devpath(const char *running, const char *waiting)
{
int i;
@@ -207,13 +263,26 @@
struct hotplug_msg *loop_msg;
struct hotplug_msg *tmp_msg;
struct hotplug_msg *msg;
+ int active;
+ int recheck = THROTTLE_RUNNING_CHILDS_COUNT;
list_for_each_entry_safe(loop_msg, tmp_msg, &exec_list, list) {
+ /* check running processes in our session and possibly throttle */
+ if (recheck >= THROTTLE_RUNNING_CHILDS_COUNT) {
+ active = running_processes_in_session();
+ if (active > THROTTLE_RUNNING_CHILDS_COUNT) {
+ dbg("skip exec, %d processes already in running state", active);
+ return;
+ }
+ recheck = 0;
+ }
+
msg = running_with_devpath(loop_msg);
if (!msg) {
/* move event to run list */
list_move_tail(&loop_msg->list, &running_list);
udev_run(loop_msg);
+ recheck++;
dbg("moved seq %llu to running list", loop_msg->seqnum);
} else {
dbg("delay seq %llu (%s), cause seq %llu (%s) is still running",
@@ -490,7 +559,10 @@
close(fd);
/* become session leader */
- setsid();
+ sid = setsid();
+
+ /*set a reasonable scheduling priority for the daemon */
+ setpriority(PRIO_PROCESS, 0, UDEVD_PRIORITY);
/* setup signal handler pipe */
retval = pipe(pipefds);
diff -Nru a/udevd.h b/udevd.h
--- a/udevd.h 2004-12-26 03:53:53 +01:00
+++ b/udevd.h 2004-12-26 03:53:53 +01:00
@@ -28,9 +28,19 @@
#define UDEVD_SOCK_PATH "udevd"
#define SEND_WAIT_MAX_SECONDS 3
#define SEND_WAIT_LOOP_PER_SECOND 10
+
+#define UDEVD_PRIORITY -5
+#define UDEV_PRIORITY -2
+
+/* duration of initialization phase with shorter timeout */
#define INIT_TIME_SEC 5
#define EVENT_INIT_TIMEOUT_SEC 2
+
+/* timeout to wait for missing events */
#define EVENT_TIMEOUT_SEC 10
+
+/* start to throttle forking if maximum number of running childs in our session is reached */
+#define THROTTLE_RUNNING_CHILDS_COUNT 10
/* environment buffer, should match the kernel's size in lib/kobject_uevent.h */
#define HOTPLUG_BUFFER_SIZE 1024
next reply other threads:[~2004-12-26 3:13 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2004-12-26 3:13 Kay Sievers [this message]
2005-01-16 16:16 ` udevd - throttle fork() if we get to many childs in running Kay Sievers
2005-01-17 18:52 ` udevd - throttle fork() if we get to many childs in running state Patrick Mansfield
2005-01-17 19:16 ` udevd - throttle fork() if we get to many childs in running Kay Sievers
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1104030808.5805.34.camel@localhost.localdomain \
--to=kay.sievers@vrfy.org \
--cc=linux-hotplug@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).