git.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/8] fetch submodules in parallel
@ 2015-09-28 23:13 Stefan Beller
  2015-09-28 23:13 ` [PATCH 1/8] submodule.c: write "Fetching submodule <foo>" to stderr Stefan Beller
                   ` (7 more replies)
  0 siblings, 8 replies; 18+ messages in thread
From: Stefan Beller @ 2015-09-28 23:13 UTC (permalink / raw)
  To: git
  Cc: Stefan Beller, ramsay, jacob.keller, peff, gitster, jrnieder,
	johannes.schindelin, Jens.Lehmann, ericsunshine

Changes to v4: (diff below)
* Some functions wanted to be static (Thanks Ramsay!)
* The patch to factor out return code handling has been dropped as
  the return code handling is slightly different in finish_command and
  the parallel case.
* We can handle signals a bit more gracefully now.
* More documentation in run-command.h 
* I thought it is a good idea to introduce `sigchain_pop_common`.

Jonathan Nieder (1):
  submodule.c: write "Fetching submodule <foo>" to stderr

Stefan Beller (7):
  xread: poll on non blocking fds
  xread_nonblock: add functionality to read from fds without blocking
  strbuf: add strbuf_read_once to read without blocking
  sigchain: add command to pop all common signals
  run-command: add an asynchronous parallel child processor
  fetch_populated_submodules: use new parallel job processing
  submodules: allow parallel fetching, add tests and documentation

 Documentation/fetch-options.txt |   7 +
 builtin/fetch.c                 |   6 +-
 builtin/pull.c                  |   6 +
 git-compat-util.h               |   1 +
 run-command.c                   | 348 ++++++++++++++++++++++++++++++++++++++++
 run-command.h                   |  63 ++++++++
 sigchain.c                      |   9 ++
 sigchain.h                      |   1 +
 strbuf.c                        |  11 ++
 strbuf.h                        |   9 ++
 submodule.c                     | 127 +++++++++++----
 submodule.h                     |   2 +-
 t/t0061-run-command.sh          |  20 +++
 t/t5526-fetch-submodules.sh     |  70 +++++---
 test-run-command.c              |  24 +++
 wrapper.c                       |  35 +++-
 16 files changed, 675 insertions(+), 64 deletions(-)

diff --git a/run-command.c b/run-command.c
index 494e1f8..df84985 100644
--- a/run-command.c
+++ b/run-command.c
@@ -234,35 +234,6 @@ static inline void set_cloexec(int fd)
 		fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
 }
 
-static int determine_return_value(int wait_status,
-				  int *result,
-				  int *error_code,
-				  const char *argv0)
-{
-	if (WIFSIGNALED(wait_status)) {
-		*result = WTERMSIG(wait_status);
-		if (*result != SIGINT && *result != SIGQUIT)
-			error("%s died of signal %d", argv0, *result);
-		/*
-		 * This return value is chosen so that code & 0xff
-		 * mimics the exit code that a POSIX shell would report for
-		 * a program that died from this signal.
-		 */
-		*result += 128;
-	} else if (WIFEXITED(wait_status)) {
-		*result = WEXITSTATUS(wait_status);
-		/*
-		 * Convert special exit code when execvp failed.
-		 */
-		if (*result == 127) {
-			*result = -1;
-			*error_code = ENOENT;
-		}
-	} else
-		return -1;
-	return 0;
-}
-
 static int wait_or_whine(pid_t pid, const char *argv0)
 {
 	int status, code = -1;
@@ -275,12 +246,29 @@ static int wait_or_whine(pid_t pid, const char *argv0)
 	if (waiting < 0) {
 		failed_errno = errno;
 		error("waitpid for %s failed: %s", argv0, strerror(errno));
+	} else if (waiting != pid) {
+		error("waitpid is confused (%s)", argv0);
+	} else if (WIFSIGNALED(status)) {
+		code = WTERMSIG(status);
+		if (code != SIGINT && code != SIGQUIT)
+			error("%s died of signal %d", argv0, code);
+		/*
+		 * This return value is chosen so that code & 0xff
+		 * mimics the exit code that a POSIX shell would report for
+		 * a program that died from this signal.
+		 */
+		code += 128;
+	} else if (WIFEXITED(status)) {
+		code = WEXITSTATUS(status);
+		/*
+		 * Convert special exit code when execvp failed.
+		 */
+		if (code == 127) {
+			code = -1;
+			failed_errno = ENOENT;
+		}
 	} else {
-		if (waiting != pid || (determine_return_value(status,
-							      &code,
-							      &failed_errno,
-							      argv0) < 0))
-			error("waitpid is confused (%s)", argv0);
+		error("waitpid is confused (%s)", argv0);
 	}
 
 	clear_child_for_cleanup(pid);
@@ -888,46 +876,67 @@ struct parallel_processes {
 	 */
 	struct pollfd *pfd;
 
+	unsigned shutdown : 1;
+
 	int output_owner;
 	struct strbuf buffered_output; /* of finished children */
-};
+} parallel_processes_struct;
 
-void default_start_failure(void *data,
-			   struct child_process *cp,
-			   struct strbuf *err)
+static int default_start_failure(void *data,
+				 struct child_process *cp,
+				 struct strbuf *err)
 {
 	int i;
-	struct strbuf sb = STRBUF_INIT;
 
+	strbuf_addstr(err, "Starting a child failed:");
 	for (i = 0; cp->argv[i]; i++)
-		strbuf_addf(&sb, " %s", cp->argv[i]);
+		strbuf_addf(err, " %s", cp->argv[i]);
 
-	die_errno("Starting a child failed:%s", sb.buf);
+	return 0;
 }
 
-void default_return_value(void *data,
-			  struct child_process *cp,
-			  int result)
+static int default_return_value(void *data,
+				struct child_process *cp,
+				struct strbuf *err,
+				int result)
 {
 	int i;
-	struct strbuf sb = STRBUF_INIT;
 
 	if (!result)
-		return;
+		return 0;
 
+	strbuf_addf(err, "A child failed with return code %d:", result);
 	for (i = 0; cp->argv[i]; i++)
-		strbuf_addf(&sb, " %s", cp->argv[i]);
+		strbuf_addf(err, " %s", cp->argv[i]);
 
-	die_errno("A child failed with return code %d:%s", result, sb.buf);
+	return 0;
 }
 
-static void pp_init(struct parallel_processes *pp,
-					int n, void *data,
-					get_next_task_fn get_next_task,
-					start_failure_fn start_failure,
-					return_value_fn return_value)
+static void kill_children(struct parallel_processes *pp, int signo)
+{
+	int i, n = pp->max_processes;
+
+	for (i = 0; i < n; i++)
+		if (pp->children[i].in_use)
+			kill(pp->children[i].process.pid, signo);
+}
+
+static void handle_children_on_signal(int signo)
+{
+	struct parallel_processes *pp = &parallel_processes_struct;
+
+	kill_children(pp, signo);
+	sigchain_pop(signo);
+	raise(signo);
+}
+
+static struct parallel_processes *pp_init(int n, void *data,
+					  get_next_task_fn get_next_task,
+					  start_failure_fn start_failure,
+					  return_value_fn return_value)
 {
 	int i;
+	struct parallel_processes *pp = &parallel_processes_struct;
 
 	if (n < 1)
 		n = online_cpus();
@@ -952,6 +961,8 @@ static void pp_init(struct parallel_processes *pp,
 		pp->pfd[i].events = POLLIN;
 		pp->pfd[i].fd = -1;
 	}
+	sigchain_push_common(handle_children_on_signal);
+	return pp;
 }
 
 static void pp_cleanup(struct parallel_processes *pp)
@@ -964,6 +975,8 @@ static void pp_cleanup(struct parallel_processes *pp)
 	free(pp->children);
 	free(pp->pfd);
 	strbuf_release(&pp->buffered_output);
+
+	sigchain_pop_common();
 }
 
 static void set_nonblocking(int fd)
@@ -977,7 +990,12 @@ static void set_nonblocking(int fd)
 			"output will be degraded");
 }
 
-/* returns 1 if a process was started, 0 otherwise */
+/* returns
+ *  0 if a new task was started.
+ *  1 if no new jobs was started (get_next_task ran out of work, non critical
+ *    problem with starting a new command)
+ * -1 no new job was started, user wishes to shutdown early.
+ */
 static int pp_start_one(struct parallel_processes *pp)
 {
 	int i;
@@ -993,10 +1011,14 @@ static int pp_start_one(struct parallel_processes *pp)
 			       &pp->children[i].err))
 		return 1;
 
-	if (start_command(&pp->children[i].process))
-		pp->start_failure(pp->data,
-				  &pp->children[i].process,
-				  &pp->children[i].err);
+	if (start_command(&pp->children[i].process)) {
+		int code = pp->start_failure(pp->data,
+					     &pp->children[i].process,
+					     &pp->children[i].err);
+		strbuf_addbuf(&pp->buffered_output, &pp->children[i].err);
+		strbuf_reset(&pp->children[i].err);
+		return code ? -1 : 1;
+	}
 
 	set_nonblocking(pp->children[i].process.err);
 
@@ -1006,11 +1028,11 @@ static int pp_start_one(struct parallel_processes *pp)
 	return 0;
 }
 
-static void pp_buffer_stderr(struct parallel_processes *pp)
+static void pp_buffer_stderr(struct parallel_processes *pp, int output_timeout)
 {
 	int i;
 
-	while ((i = poll(pp->pfd, pp->max_processes, 100)) < 0) {
+	while ((i = poll(pp->pfd, pp->max_processes, output_timeout)) < 0) {
 		if (errno == EINTR)
 			continue;
 		pp_cleanup(pp);
@@ -1038,17 +1060,18 @@ static void pp_output(struct parallel_processes *pp)
 	}
 }
 
-static void pp_collect_finished(struct parallel_processes *pp)
+static int pp_collect_finished(struct parallel_processes *pp)
 {
 	int i = 0;
 	pid_t pid;
 	int wait_status, code;
 	int n = pp->max_processes;
+	int result = 0;
 
 	while (pp->nr_processes > 0) {
 		pid = waitpid(-1, &wait_status, WNOHANG);
 		if (pid == 0)
-			return;
+			return 0;
 
 		if (pid < 0)
 			die_errno("wait");
@@ -1064,12 +1087,38 @@ static void pp_collect_finished(struct parallel_processes *pp)
 				pp->children[i].process.err, 0) < 0)
 			die_errno("strbuf_read");
 
-		if (determine_return_value(wait_status, &code, &errno,
-					   pp->children[i].process.argv[0]) < 0)
-			error("waitpid is confused (%s)",
-			      pp->children[i].process.argv[0]);
+		if (WIFSIGNALED(wait_status)) {
+			code = WTERMSIG(wait_status);
+			if (!pp->shutdown &&
+			    code != SIGINT && code != SIGQUIT)
+				strbuf_addf(&pp->children[i].err,
+					    "%s died of signal %d",
+					    pp->children[i].process.argv[0],
+					    code);
+			/*
+			 * This return value is chosen so that code & 0xff
+			 * mimics the exit code that a POSIX shell would report for
+			 * a program that died from this signal.
+			 */
+			code += 128;
+		} else if (WIFEXITED(wait_status)) {
+			code = WEXITSTATUS(wait_status);
+			/*
+			 * Convert special exit code when execvp failed.
+			 */
+			if (code == 127) {
+				code = -1;
+				errno = ENOENT;
+			}
+		} else
+			strbuf_addf(&pp->children[i].err,
+				    "waitpid is confused (%s)",
+				    pp->children[i].process.argv[0]);
+
 
-		pp->return_value(pp->data, &pp->children[i].process, code);
+		if (pp->return_value(pp->data, &pp->children[i].process,
+				     &pp->children[i].err, code))
+			result = 1;
 
 		argv_array_clear(&pp->children[i].process.args);
 		argv_array_clear(&pp->children[i].process.env_array);
@@ -1103,6 +1152,7 @@ static void pp_collect_finished(struct parallel_processes *pp)
 			pp->output_owner = (pp->output_owner + i) % n;
 		}
 	}
+	return result;
 }
 
 int run_processes_parallel(int n, void *data,
@@ -1110,21 +1160,43 @@ int run_processes_parallel(int n, void *data,
 			   start_failure_fn start_failure,
 			   return_value_fn return_value)
 {
-	struct parallel_processes pp;
-	pp_init(&pp, n, data, get_next_task, start_failure, return_value);
+	int no_more_task = 0;
+	struct parallel_processes *pp;
 
+	pp = pp_init(n, data, get_next_task, start_failure, return_value);
 	while (1) {
-		while (pp.nr_processes < pp.max_processes &&
-		       !pp_start_one(&pp))
-			; /* nothing */
-		if (!pp.nr_processes)
+		int i;
+		int output_timeout = 100;
+		int spawn_cap = 4;
+
+		if (!no_more_task) {
+			for (i = 0; i < spawn_cap; i++) {
+				int code;
+				if (pp->nr_processes == pp->max_processes)
+					break;
+
+				code = pp_start_one(pp);
+				if (!code)
+					continue;
+				if (code < 0) {
+					pp->shutdown = 1;
+					kill_children(pp, SIGTERM);
+				}
+				no_more_task = 1;
+				break;
+			}
+		}
+		if (no_more_task && !pp->nr_processes)
 			break;
-		pp_buffer_stderr(&pp);
-		pp_output(&pp);
-		pp_collect_finished(&pp);
+		pp_buffer_stderr(pp, output_timeout);
+		pp_output(pp);
+		if (pp_collect_finished(pp)) {
+			kill_children(pp, SIGTERM);
+			pp->shutdown = 1;
+			no_more_task = 1;
+		}
 	}
 
-	pp_cleanup(&pp);
-
+	pp_cleanup(pp);
 	return 0;
 }
diff --git a/run-command.h b/run-command.h
index 3807fd1..1179cb0 100644
--- a/run-command.h
+++ b/run-command.h
@@ -132,13 +132,36 @@ typedef int (*get_next_task_fn)(void *data,
 				struct child_process *cp,
 				struct strbuf *err);
 
-typedef void (*start_failure_fn)(void *data,
-				 struct child_process *cp,
-				 struct strbuf *err);
-
-typedef void (*return_value_fn)(void *data,
+/**
+ * This callback is called whenever there are problems starting
+ * a new process.
+ *
+ * You must not write to stdout or stderr in this function. Add your
+ * message to the strbuf err instead, which will be printed without
+ * messing up the output of the other parallel processes.
+ *
+ * Return 0 to continue the parallel processing. To abort gracefully,
+ * return non zero.
+ */
+typedef int (*start_failure_fn)(void *data,
 				struct child_process *cp,
-				int result);
+				struct strbuf *err);
+
+/**
+ * This callback is called on every there are problems starting
+ * a new process.
+ *
+ * You must not write to stdout or stderr in this function. Add your
+ * message to the strbuf err instead, which will be printed without
+ * messing up the output of the other parallel processes.
+ *
+ * Return 0 to continue the parallel processing. To abort gracefully,
+ * return non zero.
+ */
+typedef int (*return_value_fn)(void *data,
+			       struct child_process *cp,
+			       struct strbuf *err,
+			       int result);
 
 /**
  * Runs up to n processes at the same time. Whenever a process can be
@@ -148,6 +171,10 @@ typedef void (*return_value_fn)(void *data,
  * The children started via this function run in parallel and their output
  * to stderr is buffered, while one of the children will directly output
  * to stderr.
+ *
+ * If start_failure_fn and return_value_fn are NULL, default handlers
+ * will be used. The default handlers will print an error message on
+ * error without issuing an emergency stop.
  */
 
 int run_processes_parallel(int n, void *data,
diff --git a/sigchain.c b/sigchain.c
index faa375d..9262307 100644
--- a/sigchain.c
+++ b/sigchain.c
@@ -50,3 +50,12 @@ void sigchain_push_common(sigchain_fun f)
 	sigchain_push(SIGQUIT, f);
 	sigchain_push(SIGPIPE, f);
 }
+
+void sigchain_pop_common(void)
+{
+	sigchain_pop(SIGINT);
+	sigchain_pop(SIGHUP);
+	sigchain_pop(SIGTERM);
+	sigchain_pop(SIGQUIT);
+	sigchain_pop(SIGPIPE);
+}
diff --git a/sigchain.h b/sigchain.h
index 618083b..138b20f 100644
--- a/sigchain.h
+++ b/sigchain.h
@@ -7,5 +7,6 @@ int sigchain_push(int sig, sigchain_fun f);
 int sigchain_pop(int sig);
 
 void sigchain_push_common(sigchain_fun f);
+void sigchain_pop_common(void);
 
 #endif /* SIGCHAIN_H */
diff --git a/submodule.c b/submodule.c
index fdaf3e4..7ab89f4 100644
--- a/submodule.c
+++ b/submodule.c
@@ -630,18 +630,25 @@ struct submodule_parallel_fetch {
 int get_next_submodule(void *data, struct child_process *cp,
 		       struct strbuf *err);
 
-void handle_submodule_fetch_start_err(void *data, struct child_process *cp, struct strbuf *err)
+static int fetch_start_failure(void *data, struct child_process *cp,
+			       struct strbuf *err)
 {
 	struct submodule_parallel_fetch *spf = data;
+
 	spf->result = 1;
+
+	return 0;
 }
 
-void handle_submodule_fetch_finish( void *data, struct child_process *cp, int retvalue)
+static int fetch_finish(void *data, struct child_process *cp,
+			struct strbuf *err, int retvalue)
 {
 	struct submodule_parallel_fetch *spf = data;
 
 	if (retvalue)
 		spf->result = 1;
+
+	return 0;
 }
 
 int fetch_populated_submodules(const struct argv_array *options,
@@ -671,8 +678,8 @@ int fetch_populated_submodules(const struct argv_array *options,
 	calculate_changed_submodule_paths();
 	run_processes_parallel(max_parallel_jobs, &spf,
 			       get_next_submodule,
-			       handle_submodule_fetch_start_err,
-			       handle_submodule_fetch_finish);
+			       fetch_start_failure,
+			       fetch_finish);
 
 	argv_array_clear(&spf.args);
 out:
diff --git a/test-run-command.c b/test-run-command.c
index 94c6eee..2555791 100644
--- a/test-run-command.c
+++ b/test-run-command.c
@@ -16,9 +16,9 @@
 #include <errno.h>
 
 static int number_callbacks;
-int parallel_next(void *data,
-		  struct child_process *cp,
-		  struct strbuf *err)
+static int parallel_next(void *data,
+			 struct child_process *cp,
+			 struct strbuf *err)
 {
 	struct child_process *d = data;
 	if (number_callbacks >= 4)


-- 
2.5.0.273.g6fa2560.dirty

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 1/8] submodule.c: write "Fetching submodule <foo>" to stderr
  2015-09-28 23:13 [PATCH 0/8] fetch submodules in parallel Stefan Beller
@ 2015-09-28 23:13 ` Stefan Beller
  2015-09-28 23:14 ` [PATCH 2/8] xread: poll on non blocking fds Stefan Beller
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 18+ messages in thread
From: Stefan Beller @ 2015-09-28 23:13 UTC (permalink / raw)
  To: git
  Cc: Jonathan Nieder, ramsay, jacob.keller, peff, gitster,
	johannes.schindelin, Jens.Lehmann, ericsunshine, Stefan Beller

From: Jonathan Nieder <jrnieder@gmail.com>

The "Pushing submodule <foo>" progress output correctly goes to
stderr, but "Fetching submodule <foo>" is going to stdout by
mistake.  Fix it to write to stderr.

Noticed while trying to implement a parallel submodule fetch.  When
this particular output line went to a different file descriptor, it
was buffered separately, resulting in wrongly interleaved output if
we copied it to the terminal naively.

Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
---
 submodule.c                 |  2 +-
 t/t5526-fetch-submodules.sh | 51 +++++++++++++++++++++++----------------------
 2 files changed, 27 insertions(+), 26 deletions(-)

diff --git a/submodule.c b/submodule.c
index 9fcc86f..1d64e57 100644
--- a/submodule.c
+++ b/submodule.c
@@ -694,7 +694,7 @@ int fetch_populated_submodules(const struct argv_array *options,
 			git_dir = submodule_git_dir.buf;
 		if (is_directory(git_dir)) {
 			if (!quiet)
-				printf("Fetching submodule %s%s\n", prefix, ce->name);
+				fprintf(stderr, "Fetching submodule %s%s\n", prefix, ce->name);
 			cp.dir = submodule_path.buf;
 			argv_array_push(&argv, default_argv);
 			argv_array_push(&argv, "--submodule-prefix");
diff --git a/t/t5526-fetch-submodules.sh b/t/t5526-fetch-submodules.sh
index a4532b0..17759b1 100755
--- a/t/t5526-fetch-submodules.sh
+++ b/t/t5526-fetch-submodules.sh
@@ -16,7 +16,8 @@ add_upstream_commit() {
 		git add subfile &&
 		git commit -m new subfile &&
 		head2=$(git rev-parse --short HEAD) &&
-		echo "From $pwd/submodule" > ../expect.err &&
+		echo "Fetching submodule submodule" > ../expect.err &&
+		echo "From $pwd/submodule" >> ../expect.err &&
 		echo "   $head1..$head2  master     -> origin/master" >> ../expect.err
 	) &&
 	(
@@ -27,6 +28,7 @@ add_upstream_commit() {
 		git add deepsubfile &&
 		git commit -m new deepsubfile &&
 		head2=$(git rev-parse --short HEAD) &&
+		echo "Fetching submodule submodule/subdir/deepsubmodule" >> ../expect.err
 		echo "From $pwd/deepsubmodule" >> ../expect.err &&
 		echo "   $head1..$head2  master     -> origin/master" >> ../expect.err
 	)
@@ -56,9 +58,7 @@ test_expect_success setup '
 	(
 		cd downstream &&
 		git submodule update --init --recursive
-	) &&
-	echo "Fetching submodule submodule" > expect.out &&
-	echo "Fetching submodule submodule/subdir/deepsubmodule" >> expect.out
+	)
 '
 
 test_expect_success "fetch --recurse-submodules recurses into submodules" '
@@ -67,7 +67,7 @@ test_expect_success "fetch --recurse-submodules recurses into submodules" '
 		cd downstream &&
 		git fetch --recurse-submodules >../actual.out 2>../actual.err
 	) &&
-	test_i18ncmp expect.out actual.out &&
+	test_must_be_empty actual.out &&
 	test_i18ncmp expect.err actual.err
 '
 
@@ -96,7 +96,7 @@ test_expect_success "using fetchRecurseSubmodules=true in .gitmodules recurses i
 		git config -f .gitmodules submodule.submodule.fetchRecurseSubmodules true &&
 		git fetch >../actual.out 2>../actual.err
 	) &&
-	test_i18ncmp expect.out actual.out &&
+	test_must_be_empty actual.out &&
 	test_i18ncmp expect.err actual.err
 '
 
@@ -127,7 +127,7 @@ test_expect_success "--recurse-submodules overrides fetchRecurseSubmodules setti
 		git config --unset -f .gitmodules submodule.submodule.fetchRecurseSubmodules &&
 		git config --unset submodule.submodule.fetchRecurseSubmodules
 	) &&
-	test_i18ncmp expect.out actual.out &&
+	test_must_be_empty actual.out &&
 	test_i18ncmp expect.err actual.err
 '
 
@@ -146,7 +146,7 @@ test_expect_success "--dry-run propagates to submodules" '
 		cd downstream &&
 		git fetch --recurse-submodules --dry-run >../actual.out 2>../actual.err
 	) &&
-	test_i18ncmp expect.out actual.out &&
+	test_must_be_empty actual.out &&
 	test_i18ncmp expect.err actual.err
 '
 
@@ -155,7 +155,7 @@ test_expect_success "Without --dry-run propagates to submodules" '
 		cd downstream &&
 		git fetch --recurse-submodules >../actual.out 2>../actual.err
 	) &&
-	test_i18ncmp expect.out actual.out &&
+	test_must_be_empty actual.out &&
 	test_i18ncmp expect.err actual.err
 '
 
@@ -166,7 +166,7 @@ test_expect_success "recurseSubmodules=true propagates into submodules" '
 		git config fetch.recurseSubmodules true
 		git fetch >../actual.out 2>../actual.err
 	) &&
-	test_i18ncmp expect.out actual.out &&
+	test_must_be_empty actual.out &&
 	test_i18ncmp expect.err actual.err
 '
 
@@ -180,7 +180,7 @@ test_expect_success "--recurse-submodules overrides config in submodule" '
 		) &&
 		git fetch --recurse-submodules >../actual.out 2>../actual.err
 	) &&
-	test_i18ncmp expect.out actual.out &&
+	test_must_be_empty actual.out &&
 	test_i18ncmp expect.err actual.err
 '
 
@@ -214,16 +214,15 @@ test_expect_success "Recursion stops when no new submodule commits are fetched"
 	git add submodule &&
 	git commit -m "new submodule" &&
 	head2=$(git rev-parse --short HEAD) &&
-	echo "Fetching submodule submodule" > expect.out.sub &&
 	echo "From $pwd/." > expect.err.sub &&
 	echo "   $head1..$head2  master     -> origin/master" >>expect.err.sub &&
-	head -2 expect.err >> expect.err.sub &&
+	head -3 expect.err >> expect.err.sub &&
 	(
 		cd downstream &&
 		git fetch >../actual.out 2>../actual.err
 	) &&
 	test_i18ncmp expect.err.sub actual.err &&
-	test_i18ncmp expect.out.sub actual.out
+	test_must_be_empty actual.out
 '
 
 test_expect_success "Recursion doesn't happen when new superproject commits don't change any submodules" '
@@ -269,7 +268,7 @@ test_expect_success "Recursion picks up config in submodule" '
 		)
 	) &&
 	test_i18ncmp expect.err.sub actual.err &&
-	test_i18ncmp expect.out actual.out
+	test_must_be_empty actual.out
 '
 
 test_expect_success "Recursion picks up all submodules when necessary" '
@@ -285,7 +284,8 @@ test_expect_success "Recursion picks up all submodules when necessary" '
 		git add subdir/deepsubmodule &&
 		git commit -m "new deepsubmodule"
 		head2=$(git rev-parse --short HEAD) &&
-		echo "From $pwd/submodule" > ../expect.err.sub &&
+		echo "Fetching submodule submodule" > ../expect.err.sub &&
+		echo "From $pwd/submodule" >> ../expect.err.sub &&
 		echo "   $head1..$head2  master     -> origin/master" >> ../expect.err.sub
 	) &&
 	head1=$(git rev-parse --short HEAD) &&
@@ -295,13 +295,13 @@ test_expect_success "Recursion picks up all submodules when necessary" '
 	echo "From $pwd/." > expect.err.2 &&
 	echo "   $head1..$head2  master     -> origin/master" >> expect.err.2 &&
 	cat expect.err.sub >> expect.err.2 &&
-	tail -2 expect.err >> expect.err.2 &&
+	tail -3 expect.err >> expect.err.2 &&
 	(
 		cd downstream &&
 		git fetch >../actual.out 2>../actual.err
 	) &&
 	test_i18ncmp expect.err.2 actual.err &&
-	test_i18ncmp expect.out actual.out
+	test_must_be_empty actual.out
 '
 
 test_expect_success "'--recurse-submodules=on-demand' doesn't recurse when no new commits are fetched in the superproject (and ignores config)" '
@@ -317,7 +317,8 @@ test_expect_success "'--recurse-submodules=on-demand' doesn't recurse when no ne
 		git add subdir/deepsubmodule &&
 		git commit -m "new deepsubmodule" &&
 		head2=$(git rev-parse --short HEAD) &&
-		echo "From $pwd/submodule" > ../expect.err.sub &&
+		echo Fetching submodule submodule > ../expect.err.sub &&
+		echo "From $pwd/submodule" >> ../expect.err.sub &&
 		echo "   $head1..$head2  master     -> origin/master" >> ../expect.err.sub
 	) &&
 	(
@@ -335,7 +336,7 @@ test_expect_success "'--recurse-submodules=on-demand' recurses as deep as necess
 	git add submodule &&
 	git commit -m "new submodule" &&
 	head2=$(git rev-parse --short HEAD) &&
-	tail -2 expect.err > expect.err.deepsub &&
+	tail -3 expect.err > expect.err.deepsub &&
 	echo "From $pwd/." > expect.err &&
 	echo "   $head1..$head2  master     -> origin/master" >>expect.err &&
 	cat expect.err.sub >> expect.err &&
@@ -354,7 +355,7 @@ test_expect_success "'--recurse-submodules=on-demand' recurses as deep as necess
 			git config --unset -f .gitmodules submodule.subdir/deepsubmodule.fetchRecursive
 		)
 	) &&
-	test_i18ncmp expect.out actual.out &&
+	test_must_be_empty actual.out &&
 	test_i18ncmp expect.err actual.err
 '
 
@@ -388,7 +389,7 @@ test_expect_success "'fetch.recurseSubmodules=on-demand' overrides global config
 	head2=$(git rev-parse --short HEAD) &&
 	echo "From $pwd/." > expect.err.2 &&
 	echo "   $head1..$head2  master     -> origin/master" >>expect.err.2 &&
-	head -2 expect.err >> expect.err.2 &&
+	head -3 expect.err >> expect.err.2 &&
 	(
 		cd downstream &&
 		git config fetch.recurseSubmodules on-demand &&
@@ -399,7 +400,7 @@ test_expect_success "'fetch.recurseSubmodules=on-demand' overrides global config
 		cd downstream &&
 		git config --unset fetch.recurseSubmodules
 	) &&
-	test_i18ncmp expect.out.sub actual.out &&
+	test_must_be_empty actual.out &&
 	test_i18ncmp expect.err.2 actual.err
 '
 
@@ -416,7 +417,7 @@ test_expect_success "'submodule.<sub>.fetchRecurseSubmodules=on-demand' override
 	head2=$(git rev-parse --short HEAD) &&
 	echo "From $pwd/." > expect.err.2 &&
 	echo "   $head1..$head2  master     -> origin/master" >>expect.err.2 &&
-	head -2 expect.err >> expect.err.2 &&
+	head -3 expect.err >> expect.err.2 &&
 	(
 		cd downstream &&
 		git config submodule.submodule.fetchRecurseSubmodules on-demand &&
@@ -427,7 +428,7 @@ test_expect_success "'submodule.<sub>.fetchRecurseSubmodules=on-demand' override
 		cd downstream &&
 		git config --unset submodule.submodule.fetchRecurseSubmodules
 	) &&
-	test_i18ncmp expect.out.sub actual.out &&
+	test_must_be_empty actual.out &&
 	test_i18ncmp expect.err.2 actual.err
 '
 
-- 
2.5.0.273.g6fa2560.dirty

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 2/8] xread: poll on non blocking fds
  2015-09-28 23:13 [PATCH 0/8] fetch submodules in parallel Stefan Beller
  2015-09-28 23:13 ` [PATCH 1/8] submodule.c: write "Fetching submodule <foo>" to stderr Stefan Beller
@ 2015-09-28 23:14 ` Stefan Beller
  2015-09-28 23:14 ` [PATCH 3/8] xread_nonblock: add functionality to read from fds without blocking Stefan Beller
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 18+ messages in thread
From: Stefan Beller @ 2015-09-28 23:14 UTC (permalink / raw)
  To: git
  Cc: Stefan Beller, ramsay, jacob.keller, peff, gitster, jrnieder,
	johannes.schindelin, Jens.Lehmann, ericsunshine

>From the man page:
EAGAIN The file descriptor fd refers to a file other than a socket
       and has been marked nonblocking (O_NONBLOCK), and the read
       would block.

EAGAIN or EWOULDBLOCK
       The file descriptor fd refers to a socket and has been marked
       nonblocking (O_NONBLOCK), and the read would block.  POSIX.1-2001
       allows either error to be returned for this case, and does not
       require these constants to have the same value, so a portable
       application should check for both possibilities.

If we get an EAGAIN or EWOULDBLOCK the fd must have set O_NONBLOCK.
As the intent of xread is to read as much as possible either until the
fd is EOF or an actual error occurs, we can ease the feeder of the fd
by not spinning the whole time, but rather wait for it politely by not
busy waiting.

We should not care if the call to poll failed, as we're in an infinite
loop and can only get out with the correct read().

Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
---
 wrapper.c | 13 +++++++++++--
 1 file changed, 11 insertions(+), 2 deletions(-)

diff --git a/wrapper.c b/wrapper.c
index ff49807..5517928 100644
--- a/wrapper.c
+++ b/wrapper.c
@@ -201,8 +201,17 @@ ssize_t xread(int fd, void *buf, size_t len)
 	    len = MAX_IO_SIZE;
 	while (1) {
 		nr = read(fd, buf, len);
-		if ((nr < 0) && (errno == EAGAIN || errno == EINTR))
-			continue;
+		if (nr < 0) {
+			if (errno == EINTR)
+				continue;
+			if (errno == EAGAIN || errno == EWOULDBLOCK) {
+				struct pollfd pfd;
+				pfd.events = POLLIN;
+				pfd.fd = fd;
+				/* We deliberately ignore the return value */
+				poll(&pfd, 1, -1);
+			}
+		}
 		return nr;
 	}
 }
-- 
2.5.0.273.g6fa2560.dirty

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 3/8] xread_nonblock: add functionality to read from fds without blocking
  2015-09-28 23:13 [PATCH 0/8] fetch submodules in parallel Stefan Beller
  2015-09-28 23:13 ` [PATCH 1/8] submodule.c: write "Fetching submodule <foo>" to stderr Stefan Beller
  2015-09-28 23:14 ` [PATCH 2/8] xread: poll on non blocking fds Stefan Beller
@ 2015-09-28 23:14 ` Stefan Beller
  2015-09-28 23:14 ` [PATCH 4/8] strbuf: add strbuf_read_once to read " Stefan Beller
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 18+ messages in thread
From: Stefan Beller @ 2015-09-28 23:14 UTC (permalink / raw)
  To: git
  Cc: Stefan Beller, ramsay, jacob.keller, peff, gitster, jrnieder,
	johannes.schindelin, Jens.Lehmann, ericsunshine

Provide a wrapper to read(), similar to xread(), that restarts on
EINTR but not EAGAIN (or EWOULDBLOCK). This enables the caller to
handle polling itself, possibly polling multiple sockets or performing
some other action.

Helped-by: Jacob Keller <jacob.keller@gmail.com>
Helped-by: Jeff King <peff@peff.net>,
Helped-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
---
 git-compat-util.h |  1 +
 wrapper.c         | 22 ++++++++++++++++++++++
 2 files changed, 23 insertions(+)

diff --git a/git-compat-util.h b/git-compat-util.h
index c6d391f..9ccea85 100644
--- a/git-compat-util.h
+++ b/git-compat-util.h
@@ -718,6 +718,7 @@ extern void *xcalloc(size_t nmemb, size_t size);
 extern void *xmmap(void *start, size_t length, int prot, int flags, int fd, off_t offset);
 extern void *xmmap_gently(void *start, size_t length, int prot, int flags, int fd, off_t offset);
 extern ssize_t xread(int fd, void *buf, size_t len);
+extern ssize_t xread_nonblock(int fd, void *buf, size_t len);
 extern ssize_t xwrite(int fd, const void *buf, size_t len);
 extern ssize_t xpread(int fd, void *buf, size_t len, off_t offset);
 extern int xdup(int fd);
diff --git a/wrapper.c b/wrapper.c
index 5517928..41a21e1 100644
--- a/wrapper.c
+++ b/wrapper.c
@@ -217,6 +217,28 @@ ssize_t xread(int fd, void *buf, size_t len)
 }
 
 /*
+ * xread_nonblock() is the same a read(), but it automatically restarts read()
+ * interrupted operations (EINTR). xread_nonblock() DOES NOT GUARANTEE that
+ * "len" bytes is read. EWOULDBLOCK is turned into EAGAIN.
+ */
+ssize_t xread_nonblock(int fd, void *buf, size_t len)
+{
+	ssize_t nr;
+	if (len > MAX_IO_SIZE)
+		len = MAX_IO_SIZE;
+	while (1) {
+		nr = read(fd, buf, len);
+		if (nr < 0) {
+			if (errno == EINTR)
+				continue;
+			if (errno == EWOULDBLOCK)
+				errno = EAGAIN;
+		}
+		return nr;
+	}
+}
+
+/*
  * xwrite() is the same a write(), but it automatically restarts write()
  * operations with a recoverable error (EAGAIN and EINTR). xwrite() DOES NOT
  * GUARANTEE that "len" bytes is written even if the operation is successful.
-- 
2.5.0.273.g6fa2560.dirty

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 4/8] strbuf: add strbuf_read_once to read without blocking
  2015-09-28 23:13 [PATCH 0/8] fetch submodules in parallel Stefan Beller
                   ` (2 preceding siblings ...)
  2015-09-28 23:14 ` [PATCH 3/8] xread_nonblock: add functionality to read from fds without blocking Stefan Beller
@ 2015-09-28 23:14 ` Stefan Beller
  2015-09-28 23:14 ` [PATCH 5/8] sigchain: add command to pop all common signals Stefan Beller
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 18+ messages in thread
From: Stefan Beller @ 2015-09-28 23:14 UTC (permalink / raw)
  To: git
  Cc: Stefan Beller, ramsay, jacob.keller, peff, gitster, jrnieder,
	johannes.schindelin, Jens.Lehmann, ericsunshine

The new call will read from a file descriptor into a strbuf once. The
underlying call xread_nonblock is meant to execute without blocking if
the file descriptor is set to O_NONBLOCK. It is a bug to call
strbuf_read_once on a file descriptor which would block.

Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
---
 strbuf.c | 11 +++++++++++
 strbuf.h |  9 +++++++++
 2 files changed, 20 insertions(+)

diff --git a/strbuf.c b/strbuf.c
index cce5eed..35e71b8 100644
--- a/strbuf.c
+++ b/strbuf.c
@@ -384,6 +384,17 @@ ssize_t strbuf_read(struct strbuf *sb, int fd, size_t hint)
 	return sb->len - oldlen;
 }
 
+ssize_t strbuf_read_once(struct strbuf *sb, int fd, size_t hint)
+{
+	ssize_t cnt;
+
+	strbuf_grow(sb, hint ? hint : 8192);
+	cnt = xread_nonblock(fd, sb->buf + sb->len, sb->alloc - sb->len - 1);
+	if (cnt > 0)
+		strbuf_setlen(sb, sb->len + cnt);
+	return cnt;
+}
+
 #define STRBUF_MAXLINK (2*PATH_MAX)
 
 int strbuf_readlink(struct strbuf *sb, const char *path, size_t hint)
diff --git a/strbuf.h b/strbuf.h
index aef2794..ea69665 100644
--- a/strbuf.h
+++ b/strbuf.h
@@ -367,6 +367,15 @@ extern size_t strbuf_fread(struct strbuf *, size_t, FILE *);
 extern ssize_t strbuf_read(struct strbuf *, int fd, size_t hint);
 
 /**
+ * Read from a file descriptor that is marked as O_NONBLOCK without
+ * blocking.  Returns the number of new bytes appended to the sb.
+ * Negative return value signals there was an error returned from
+ * underlying read(2), in which case the caller should check errno.
+ * e.g. errno == EAGAIN when the read may have blocked.
+ */
+extern ssize_t strbuf_read_once(struct strbuf *, int fd, size_t hint);
+
+/**
  * Read the contents of a file, specified by its path. The third argument
  * can be used to give a hint about the file size, to avoid reallocs.
  */
-- 
2.5.0.273.g6fa2560.dirty

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 5/8] sigchain: add command to pop all common signals
  2015-09-28 23:13 [PATCH 0/8] fetch submodules in parallel Stefan Beller
                   ` (3 preceding siblings ...)
  2015-09-28 23:14 ` [PATCH 4/8] strbuf: add strbuf_read_once to read " Stefan Beller
@ 2015-09-28 23:14 ` Stefan Beller
  2015-09-30  5:23   ` Junio C Hamano
  2015-09-28 23:14 ` [PATCH 6/8] run-command: add an asynchronous parallel child processor Stefan Beller
                   ` (2 subsequent siblings)
  7 siblings, 1 reply; 18+ messages in thread
From: Stefan Beller @ 2015-09-28 23:14 UTC (permalink / raw)
  To: git
  Cc: Stefan Beller, ramsay, jacob.keller, peff, gitster, jrnieder,
	johannes.schindelin, Jens.Lehmann, ericsunshine

The new method removes all common signal handlers that were installed
by sigchain_push.

CC: Jeff King <peff@peff.net>
Signed-off-by: Stefan Beller <sbeller@google.com>
---
 sigchain.c | 9 +++++++++
 sigchain.h | 1 +
 2 files changed, 10 insertions(+)

diff --git a/sigchain.c b/sigchain.c
index faa375d..9262307 100644
--- a/sigchain.c
+++ b/sigchain.c
@@ -50,3 +50,12 @@ void sigchain_push_common(sigchain_fun f)
 	sigchain_push(SIGQUIT, f);
 	sigchain_push(SIGPIPE, f);
 }
+
+void sigchain_pop_common(void)
+{
+	sigchain_pop(SIGINT);
+	sigchain_pop(SIGHUP);
+	sigchain_pop(SIGTERM);
+	sigchain_pop(SIGQUIT);
+	sigchain_pop(SIGPIPE);
+}
diff --git a/sigchain.h b/sigchain.h
index 618083b..138b20f 100644
--- a/sigchain.h
+++ b/sigchain.h
@@ -7,5 +7,6 @@ int sigchain_push(int sig, sigchain_fun f);
 int sigchain_pop(int sig);
 
 void sigchain_push_common(sigchain_fun f);
+void sigchain_pop_common(void);
 
 #endif /* SIGCHAIN_H */
-- 
2.5.0.273.g6fa2560.dirty

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 6/8] run-command: add an asynchronous parallel child processor
  2015-09-28 23:13 [PATCH 0/8] fetch submodules in parallel Stefan Beller
                   ` (4 preceding siblings ...)
  2015-09-28 23:14 ` [PATCH 5/8] sigchain: add command to pop all common signals Stefan Beller
@ 2015-09-28 23:14 ` Stefan Beller
  2015-09-30  3:12   ` Junio C Hamano
  2015-09-28 23:14 ` [PATCH 7/8] fetch_populated_submodules: use new parallel job processing Stefan Beller
  2015-09-28 23:14 ` [PATCH 8/8] submodules: allow parallel fetching, add tests and documentation Stefan Beller
  7 siblings, 1 reply; 18+ messages in thread
From: Stefan Beller @ 2015-09-28 23:14 UTC (permalink / raw)
  To: git
  Cc: Stefan Beller, ramsay, jacob.keller, peff, gitster, jrnieder,
	johannes.schindelin, Jens.Lehmann, ericsunshine

This allows to run external commands in parallel with ordered output
on stderr.

If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.

Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:

 time -->
 output: |---A---| |-B-| |-------C-------| |-D-| |-E-|

When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:

process 1: |---A---| |-D-| |-E-|

process 2: |-B-| |-------C-------|

output:    |---A---|B|---C-------|DE

So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.

So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.

For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:

 |----A----| |--B--| |-C-|

will be scheduled to be all parallel:

process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output:    |----A----|CB

This happens because C finished before B did, so it will be queued for
output before B.

Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
---
 run-command.c          | 348 +++++++++++++++++++++++++++++++++++++++++++++++++
 run-command.h          |  63 +++++++++
 t/t0061-run-command.sh |  20 +++
 test-run-command.c     |  24 ++++
 4 files changed, 455 insertions(+)

diff --git a/run-command.c b/run-command.c
index 28e1d55..df84985 100644
--- a/run-command.c
+++ b/run-command.c
@@ -3,6 +3,8 @@
 #include "exec_cmd.h"
 #include "sigchain.h"
 #include "argv-array.h"
+#include "thread-utils.h"
+#include "strbuf.h"
 
 void child_process_init(struct child_process *child)
 {
@@ -852,3 +854,349 @@ int capture_command(struct child_process *cmd, struct strbuf *buf, size_t hint)
 	close(cmd->out);
 	return finish_command(cmd);
 }
+
+struct parallel_processes {
+	void *data;
+
+	int max_processes;
+	int nr_processes;
+
+	get_next_task_fn get_next_task;
+	start_failure_fn start_failure;
+	return_value_fn return_value;
+
+	struct {
+		unsigned in_use : 1;
+		struct child_process process;
+		struct strbuf err;
+	} *children;
+	/*
+	 * The struct pollfd is logically part of *children,
+	 * but the system call expects it as its own array.
+	 */
+	struct pollfd *pfd;
+
+	unsigned shutdown : 1;
+
+	int output_owner;
+	struct strbuf buffered_output; /* of finished children */
+} parallel_processes_struct;
+
+static int default_start_failure(void *data,
+				 struct child_process *cp,
+				 struct strbuf *err)
+{
+	int i;
+
+	strbuf_addstr(err, "Starting a child failed:");
+	for (i = 0; cp->argv[i]; i++)
+		strbuf_addf(err, " %s", cp->argv[i]);
+
+	return 0;
+}
+
+static int default_return_value(void *data,
+				struct child_process *cp,
+				struct strbuf *err,
+				int result)
+{
+	int i;
+
+	if (!result)
+		return 0;
+
+	strbuf_addf(err, "A child failed with return code %d:", result);
+	for (i = 0; cp->argv[i]; i++)
+		strbuf_addf(err, " %s", cp->argv[i]);
+
+	return 0;
+}
+
+static void kill_children(struct parallel_processes *pp, int signo)
+{
+	int i, n = pp->max_processes;
+
+	for (i = 0; i < n; i++)
+		if (pp->children[i].in_use)
+			kill(pp->children[i].process.pid, signo);
+}
+
+static void handle_children_on_signal(int signo)
+{
+	struct parallel_processes *pp = &parallel_processes_struct;
+
+	kill_children(pp, signo);
+	sigchain_pop(signo);
+	raise(signo);
+}
+
+static struct parallel_processes *pp_init(int n, void *data,
+					  get_next_task_fn get_next_task,
+					  start_failure_fn start_failure,
+					  return_value_fn return_value)
+{
+	int i;
+	struct parallel_processes *pp = &parallel_processes_struct;
+
+	if (n < 1)
+		n = online_cpus();
+
+	pp->max_processes = n;
+	pp->data = data;
+	if (!get_next_task)
+		die("BUG: you need to specify a get_next_task function");
+	pp->get_next_task = get_next_task;
+
+	pp->start_failure = start_failure ? start_failure : default_start_failure;
+	pp->return_value = return_value ? return_value : default_return_value;
+
+	pp->nr_processes = 0;
+	pp->output_owner = 0;
+	pp->children = xcalloc(n, sizeof(*pp->children));
+	pp->pfd = xcalloc(n, sizeof(*pp->pfd));
+	strbuf_init(&pp->buffered_output, 0);
+
+	for (i = 0; i < n; i++) {
+		strbuf_init(&pp->children[i].err, 0);
+		pp->pfd[i].events = POLLIN;
+		pp->pfd[i].fd = -1;
+	}
+	sigchain_push_common(handle_children_on_signal);
+	return pp;
+}
+
+static void pp_cleanup(struct parallel_processes *pp)
+{
+	int i;
+
+	for (i = 0; i < pp->max_processes; i++)
+		strbuf_release(&pp->children[i].err);
+
+	free(pp->children);
+	free(pp->pfd);
+	strbuf_release(&pp->buffered_output);
+
+	sigchain_pop_common();
+}
+
+static void set_nonblocking(int fd)
+{
+	int flags = fcntl(fd, F_GETFL);
+	if (flags < 0)
+		warning("Could not get file status flags, "
+			"output will be degraded");
+	else if (fcntl(fd, F_SETFL, flags | O_NONBLOCK))
+		warning("Could not set file status flags, "
+			"output will be degraded");
+}
+
+/* returns
+ *  0 if a new task was started.
+ *  1 if no new jobs was started (get_next_task ran out of work, non critical
+ *    problem with starting a new command)
+ * -1 no new job was started, user wishes to shutdown early.
+ */
+static int pp_start_one(struct parallel_processes *pp)
+{
+	int i;
+
+	for (i = 0; i < pp->max_processes; i++)
+		if (!pp->children[i].in_use)
+			break;
+	if (i == pp->max_processes)
+		die("BUG: bookkeeping is hard");
+
+	if (!pp->get_next_task(pp->data,
+			       &pp->children[i].process,
+			       &pp->children[i].err))
+		return 1;
+
+	if (start_command(&pp->children[i].process)) {
+		int code = pp->start_failure(pp->data,
+					     &pp->children[i].process,
+					     &pp->children[i].err);
+		strbuf_addbuf(&pp->buffered_output, &pp->children[i].err);
+		strbuf_reset(&pp->children[i].err);
+		return code ? -1 : 1;
+	}
+
+	set_nonblocking(pp->children[i].process.err);
+
+	pp->nr_processes++;
+	pp->children[i].in_use = 1;
+	pp->pfd[i].fd = pp->children[i].process.err;
+	return 0;
+}
+
+static void pp_buffer_stderr(struct parallel_processes *pp, int output_timeout)
+{
+	int i;
+
+	while ((i = poll(pp->pfd, pp->max_processes, output_timeout)) < 0) {
+		if (errno == EINTR)
+			continue;
+		pp_cleanup(pp);
+		die_errno("poll");
+	}
+
+	/* Buffer output from all pipes. */
+	for (i = 0; i < pp->max_processes; i++) {
+		if (pp->children[i].in_use &&
+		    pp->pfd[i].revents & POLLIN)
+			if (strbuf_read_once(&pp->children[i].err,
+					     pp->children[i].process.err, 0) < 0)
+				if (errno != EAGAIN)
+					die_errno("read");
+	}
+}
+
+static void pp_output(struct parallel_processes *pp)
+{
+	int i = pp->output_owner;
+	if (pp->children[i].in_use &&
+	    pp->children[i].err.len) {
+		fputs(pp->children[i].err.buf, stderr);
+		strbuf_reset(&pp->children[i].err);
+	}
+}
+
+static int pp_collect_finished(struct parallel_processes *pp)
+{
+	int i = 0;
+	pid_t pid;
+	int wait_status, code;
+	int n = pp->max_processes;
+	int result = 0;
+
+	while (pp->nr_processes > 0) {
+		pid = waitpid(-1, &wait_status, WNOHANG);
+		if (pid == 0)
+			return 0;
+
+		if (pid < 0)
+			die_errno("wait");
+
+		for (i = 0; i < pp->max_processes; i++)
+			if (pp->children[i].in_use &&
+			    pid == pp->children[i].process.pid)
+				break;
+		if (i == pp->max_processes)
+			die("BUG: found a child process we were not aware of");
+
+		if (strbuf_read(&pp->children[i].err,
+				pp->children[i].process.err, 0) < 0)
+			die_errno("strbuf_read");
+
+		if (WIFSIGNALED(wait_status)) {
+			code = WTERMSIG(wait_status);
+			if (!pp->shutdown &&
+			    code != SIGINT && code != SIGQUIT)
+				strbuf_addf(&pp->children[i].err,
+					    "%s died of signal %d",
+					    pp->children[i].process.argv[0],
+					    code);
+			/*
+			 * This return value is chosen so that code & 0xff
+			 * mimics the exit code that a POSIX shell would report for
+			 * a program that died from this signal.
+			 */
+			code += 128;
+		} else if (WIFEXITED(wait_status)) {
+			code = WEXITSTATUS(wait_status);
+			/*
+			 * Convert special exit code when execvp failed.
+			 */
+			if (code == 127) {
+				code = -1;
+				errno = ENOENT;
+			}
+		} else
+			strbuf_addf(&pp->children[i].err,
+				    "waitpid is confused (%s)",
+				    pp->children[i].process.argv[0]);
+
+
+		if (pp->return_value(pp->data, &pp->children[i].process,
+				     &pp->children[i].err, code))
+			result = 1;
+
+		argv_array_clear(&pp->children[i].process.args);
+		argv_array_clear(&pp->children[i].process.env_array);
+
+		pp->nr_processes--;
+		pp->children[i].in_use = 0;
+		pp->pfd[i].fd = -1;
+
+		if (i != pp->output_owner) {
+			strbuf_addbuf(&pp->buffered_output, &pp->children[i].err);
+			strbuf_reset(&pp->children[i].err);
+		} else {
+			fputs(pp->children[i].err.buf, stderr);
+			strbuf_reset(&pp->children[i].err);
+
+			/* Output all other finished child processes */
+			fputs(pp->buffered_output.buf, stderr);
+			strbuf_reset(&pp->buffered_output);
+
+			/*
+			 * Pick next process to output live.
+			 * NEEDSWORK:
+			 * For now we pick it randomly by doing a round
+			 * robin. Later we may want to pick the one with
+			 * the most output or the longest or shortest
+			 * running process time.
+			 */
+			for (i = 0; i < n; i++)
+				if (pp->children[(pp->output_owner + i) % n].in_use)
+					break;
+			pp->output_owner = (pp->output_owner + i) % n;
+		}
+	}
+	return result;
+}
+
+int run_processes_parallel(int n, void *data,
+			   get_next_task_fn get_next_task,
+			   start_failure_fn start_failure,
+			   return_value_fn return_value)
+{
+	int no_more_task = 0;
+	struct parallel_processes *pp;
+
+	pp = pp_init(n, data, get_next_task, start_failure, return_value);
+	while (1) {
+		int i;
+		int output_timeout = 100;
+		int spawn_cap = 4;
+
+		if (!no_more_task) {
+			for (i = 0; i < spawn_cap; i++) {
+				int code;
+				if (pp->nr_processes == pp->max_processes)
+					break;
+
+				code = pp_start_one(pp);
+				if (!code)
+					continue;
+				if (code < 0) {
+					pp->shutdown = 1;
+					kill_children(pp, SIGTERM);
+				}
+				no_more_task = 1;
+				break;
+			}
+		}
+		if (no_more_task && !pp->nr_processes)
+			break;
+		pp_buffer_stderr(pp, output_timeout);
+		pp_output(pp);
+		if (pp_collect_finished(pp)) {
+			kill_children(pp, SIGTERM);
+			pp->shutdown = 1;
+			no_more_task = 1;
+		}
+	}
+
+	pp_cleanup(pp);
+	return 0;
+}
diff --git a/run-command.h b/run-command.h
index 5b4425a..1179cb0 100644
--- a/run-command.h
+++ b/run-command.h
@@ -119,4 +119,67 @@ struct async {
 int start_async(struct async *async);
 int finish_async(struct async *async);
 
+/**
+ * This callback should initialize the child process and preload the
+ * error channel. The preloading of is useful if you want to have a message
+ * printed directly before the output of the child process.
+ * You MUST set stdout_to_stderr.
+ *
+ * Return 1 if the next child is ready to run.
+ * Return 0 if there are no more tasks to be processed.
+ */
+typedef int (*get_next_task_fn)(void *data,
+				struct child_process *cp,
+				struct strbuf *err);
+
+/**
+ * This callback is called whenever there are problems starting
+ * a new process.
+ *
+ * You must not write to stdout or stderr in this function. Add your
+ * message to the strbuf err instead, which will be printed without
+ * messing up the output of the other parallel processes.
+ *
+ * Return 0 to continue the parallel processing. To abort gracefully,
+ * return non zero.
+ */
+typedef int (*start_failure_fn)(void *data,
+				struct child_process *cp,
+				struct strbuf *err);
+
+/**
+ * This callback is called on every there are problems starting
+ * a new process.
+ *
+ * You must not write to stdout or stderr in this function. Add your
+ * message to the strbuf err instead, which will be printed without
+ * messing up the output of the other parallel processes.
+ *
+ * Return 0 to continue the parallel processing. To abort gracefully,
+ * return non zero.
+ */
+typedef int (*return_value_fn)(void *data,
+			       struct child_process *cp,
+			       struct strbuf *err,
+			       int result);
+
+/**
+ * Runs up to n processes at the same time. Whenever a process can be
+ * started, the callback `get_next_task` is called to obtain the data
+ * fed to the child process.
+ *
+ * The children started via this function run in parallel and their output
+ * to stderr is buffered, while one of the children will directly output
+ * to stderr.
+ *
+ * If start_failure_fn and return_value_fn are NULL, default handlers
+ * will be used. The default handlers will print an error message on
+ * error without issuing an emergency stop.
+ */
+
+int run_processes_parallel(int n, void *data,
+			   get_next_task_fn,
+			   start_failure_fn,
+			   return_value_fn);
+
 #endif
diff --git a/t/t0061-run-command.sh b/t/t0061-run-command.sh
index 9acf628..49aa3db 100755
--- a/t/t0061-run-command.sh
+++ b/t/t0061-run-command.sh
@@ -47,4 +47,24 @@ test_expect_success POSIXPERM,SANITY 'unreadable directory in PATH' '
 	test_cmp expect actual
 '
 
+cat >expect <<-EOF
+preloaded output of a child
+Hello
+World
+preloaded output of a child
+Hello
+World
+preloaded output of a child
+Hello
+World
+preloaded output of a child
+Hello
+World
+EOF
+
+test_expect_success 'run_command runs in parallel' '
+	test-run-command run-command-parallel-4 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
+	test_cmp expect actual
+'
+
 test_done
diff --git a/test-run-command.c b/test-run-command.c
index 89c7de2..2555791 100644
--- a/test-run-command.c
+++ b/test-run-command.c
@@ -10,9 +10,29 @@
 
 #include "git-compat-util.h"
 #include "run-command.h"
+#include "argv-array.h"
+#include "strbuf.h"
 #include <string.h>
 #include <errno.h>
 
+static int number_callbacks;
+static int parallel_next(void *data,
+			 struct child_process *cp,
+			 struct strbuf *err)
+{
+	struct child_process *d = data;
+	if (number_callbacks >= 4)
+		return 0;
+
+	argv_array_pushv(&cp->args, d->argv);
+	cp->stdout_to_stderr = 1;
+	cp->no_stdin = 1;
+	cp->err = -1;
+	strbuf_addf(err, "preloaded output of a child\n");
+	number_callbacks++;
+	return 1;
+}
+
 int main(int argc, char **argv)
 {
 	struct child_process proc = CHILD_PROCESS_INIT;
@@ -30,6 +50,10 @@ int main(int argc, char **argv)
 	if (!strcmp(argv[1], "run-command"))
 		exit(run_command(&proc));
 
+	if (!strcmp(argv[1], "run-command-parallel-4"))
+		exit(run_processes_parallel(4, &proc, parallel_next,
+					 NULL, NULL));
+
 	fprintf(stderr, "check usage\n");
 	return 1;
 }
-- 
2.5.0.273.g6fa2560.dirty

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 7/8] fetch_populated_submodules: use new parallel job processing
  2015-09-28 23:13 [PATCH 0/8] fetch submodules in parallel Stefan Beller
                   ` (5 preceding siblings ...)
  2015-09-28 23:14 ` [PATCH 6/8] run-command: add an asynchronous parallel child processor Stefan Beller
@ 2015-09-28 23:14 ` Stefan Beller
  2015-09-28 23:14 ` [PATCH 8/8] submodules: allow parallel fetching, add tests and documentation Stefan Beller
  7 siblings, 0 replies; 18+ messages in thread
From: Stefan Beller @ 2015-09-28 23:14 UTC (permalink / raw)
  To: git
  Cc: Stefan Beller, ramsay, jacob.keller, peff, gitster, jrnieder,
	johannes.schindelin, Jens.Lehmann, ericsunshine

In a later patch we enable parallel processing of submodules, this
only adds the possibility for it. So this change should not change
any user facing behavior.

Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
---
 submodule.c | 126 ++++++++++++++++++++++++++++++++++++++++++++----------------
 1 file changed, 92 insertions(+), 34 deletions(-)

diff --git a/submodule.c b/submodule.c
index 1d64e57..8134fe7 100644
--- a/submodule.c
+++ b/submodule.c
@@ -12,6 +12,7 @@
 #include "sha1-array.h"
 #include "argv-array.h"
 #include "blob.h"
+#include "thread-utils.h"
 
 static int config_fetch_recurse_submodules = RECURSE_SUBMODULES_ON_DEMAND;
 static struct string_list changed_submodule_paths;
@@ -615,37 +616,89 @@ static void calculate_changed_submodule_paths(void)
 	initialized_fetch_ref_tips = 0;
 }
 
+struct submodule_parallel_fetch {
+	int count;
+	struct argv_array args;
+	const char *work_tree;
+	const char *prefix;
+	int command_line_option;
+	int quiet;
+	int result;
+};
+#define SPF_INIT {0, ARGV_ARRAY_INIT, NULL, NULL, 0, 0, 0}
+
+int get_next_submodule(void *data, struct child_process *cp,
+		       struct strbuf *err);
+
+static int fetch_start_failure(void *data, struct child_process *cp,
+			       struct strbuf *err)
+{
+	struct submodule_parallel_fetch *spf = data;
+
+	spf->result = 1;
+
+	return 0;
+}
+
+static int fetch_finish(void *data, struct child_process *cp,
+			struct strbuf *err, int retvalue)
+{
+	struct submodule_parallel_fetch *spf = data;
+
+	if (retvalue)
+		spf->result = 1;
+
+	return 0;
+}
+
 int fetch_populated_submodules(const struct argv_array *options,
 			       const char *prefix, int command_line_option,
 			       int quiet)
 {
-	int i, result = 0;
-	struct child_process cp = CHILD_PROCESS_INIT;
-	struct argv_array argv = ARGV_ARRAY_INIT;
-	const char *work_tree = get_git_work_tree();
-	if (!work_tree)
+	int i;
+	int max_parallel_jobs = 1;
+	struct submodule_parallel_fetch spf = SPF_INIT;
+
+	spf.work_tree = get_git_work_tree();
+	spf.command_line_option = command_line_option;
+	spf.quiet = quiet;
+	spf.prefix = prefix;
+
+	if (!spf.work_tree)
 		goto out;
 
 	if (read_cache() < 0)
 		die("index file corrupt");
 
-	argv_array_push(&argv, "fetch");
+	argv_array_push(&spf.args, "fetch");
 	for (i = 0; i < options->argc; i++)
-		argv_array_push(&argv, options->argv[i]);
-	argv_array_push(&argv, "--recurse-submodules-default");
+		argv_array_push(&spf.args, options->argv[i]);
+	argv_array_push(&spf.args, "--recurse-submodules-default");
 	/* default value, "--submodule-prefix" and its value are added later */
 
-	cp.env = local_repo_env;
-	cp.git_cmd = 1;
-	cp.no_stdin = 1;
-
 	calculate_changed_submodule_paths();
+	run_processes_parallel(max_parallel_jobs, &spf,
+			       get_next_submodule,
+			       fetch_start_failure,
+			       fetch_finish);
+
+	argv_array_clear(&spf.args);
+out:
+	string_list_clear(&changed_submodule_paths, 1);
+	return spf.result;
+}
 
-	for (i = 0; i < active_nr; i++) {
+int get_next_submodule(void *data, struct child_process *cp,
+		       struct strbuf *err)
+{
+	int ret = 0;
+	struct submodule_parallel_fetch *spf = data;
+
+	for ( ; spf->count < active_nr; spf->count++) {
 		struct strbuf submodule_path = STRBUF_INIT;
 		struct strbuf submodule_git_dir = STRBUF_INIT;
 		struct strbuf submodule_prefix = STRBUF_INIT;
-		const struct cache_entry *ce = active_cache[i];
+		const struct cache_entry *ce = active_cache[spf->count];
 		const char *git_dir, *default_argv;
 		const struct submodule *submodule;
 
@@ -657,7 +710,7 @@ int fetch_populated_submodules(const struct argv_array *options,
 			submodule = submodule_from_name(null_sha1, ce->name);
 
 		default_argv = "yes";
-		if (command_line_option == RECURSE_SUBMODULES_DEFAULT) {
+		if (spf->command_line_option == RECURSE_SUBMODULES_DEFAULT) {
 			if (submodule &&
 			    submodule->fetch_recurse !=
 						RECURSE_SUBMODULES_NONE) {
@@ -680,40 +733,45 @@ int fetch_populated_submodules(const struct argv_array *options,
 					default_argv = "on-demand";
 				}
 			}
-		} else if (command_line_option == RECURSE_SUBMODULES_ON_DEMAND) {
+		} else if (spf->command_line_option == RECURSE_SUBMODULES_ON_DEMAND) {
 			if (!unsorted_string_list_lookup(&changed_submodule_paths, ce->name))
 				continue;
 			default_argv = "on-demand";
 		}
 
-		strbuf_addf(&submodule_path, "%s/%s", work_tree, ce->name);
+		strbuf_addf(&submodule_path, "%s/%s", spf->work_tree, ce->name);
 		strbuf_addf(&submodule_git_dir, "%s/.git", submodule_path.buf);
-		strbuf_addf(&submodule_prefix, "%s%s/", prefix, ce->name);
+		strbuf_addf(&submodule_prefix, "%s%s/", spf->prefix, ce->name);
 		git_dir = read_gitfile(submodule_git_dir.buf);
 		if (!git_dir)
 			git_dir = submodule_git_dir.buf;
 		if (is_directory(git_dir)) {
-			if (!quiet)
-				fprintf(stderr, "Fetching submodule %s%s\n", prefix, ce->name);
-			cp.dir = submodule_path.buf;
-			argv_array_push(&argv, default_argv);
-			argv_array_push(&argv, "--submodule-prefix");
-			argv_array_push(&argv, submodule_prefix.buf);
-			cp.argv = argv.argv;
-			if (run_command(&cp))
-				result = 1;
-			argv_array_pop(&argv);
-			argv_array_pop(&argv);
-			argv_array_pop(&argv);
+			child_process_init(cp);
+			cp->dir = strbuf_detach(&submodule_path, NULL);
+			cp->env = local_repo_env;
+			cp->git_cmd = 1;
+			cp->no_stdin = 1;
+			cp->stdout_to_stderr = 1;
+			cp->err = -1;
+			if (!spf->quiet)
+				strbuf_addf(err, "Fetching submodule %s%s\n",
+					    spf->prefix, ce->name);
+			argv_array_init(&cp->args);
+			argv_array_pushv(&cp->args, spf->args.argv);
+			argv_array_push(&cp->args, default_argv);
+			argv_array_push(&cp->args, "--submodule-prefix");
+			argv_array_push(&cp->args, submodule_prefix.buf);
+			ret = 1;
 		}
 		strbuf_release(&submodule_path);
 		strbuf_release(&submodule_git_dir);
 		strbuf_release(&submodule_prefix);
+		if (ret) {
+			spf->count++;
+			return 1;
+		}
 	}
-	argv_array_clear(&argv);
-out:
-	string_list_clear(&changed_submodule_paths, 1);
-	return result;
+	return 0;
 }
 
 unsigned is_submodule_modified(const char *path, int ignore_untracked)
-- 
2.5.0.273.g6fa2560.dirty

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 8/8] submodules: allow parallel fetching, add tests and documentation
  2015-09-28 23:13 [PATCH 0/8] fetch submodules in parallel Stefan Beller
                   ` (6 preceding siblings ...)
  2015-09-28 23:14 ` [PATCH 7/8] fetch_populated_submodules: use new parallel job processing Stefan Beller
@ 2015-09-28 23:14 ` Stefan Beller
  7 siblings, 0 replies; 18+ messages in thread
From: Stefan Beller @ 2015-09-28 23:14 UTC (permalink / raw)
  To: git
  Cc: Stefan Beller, ramsay, jacob.keller, peff, gitster, jrnieder,
	johannes.schindelin, Jens.Lehmann, ericsunshine

This enables the work of the previous patches.

Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
---
 Documentation/fetch-options.txt |  7 +++++++
 builtin/fetch.c                 |  6 +++++-
 builtin/pull.c                  |  6 ++++++
 submodule.c                     |  3 +--
 submodule.h                     |  2 +-
 t/t5526-fetch-submodules.sh     | 19 +++++++++++++++++++
 6 files changed, 39 insertions(+), 4 deletions(-)

diff --git a/Documentation/fetch-options.txt b/Documentation/fetch-options.txt
index 45583d8..6b109f6 100644
--- a/Documentation/fetch-options.txt
+++ b/Documentation/fetch-options.txt
@@ -100,6 +100,13 @@ ifndef::git-pull[]
 	reference to a commit that isn't already in the local submodule
 	clone.
 
+-j::
+--jobs=<n>::
+	Number of parallel children to be used for fetching submodules.
+	Each will fetch from different submodules, such that fetching many
+	submodules will be faster. By default submodules will be fetched
+	one at a time.
+
 --no-recurse-submodules::
 	Disable recursive fetching of submodules (this has the same effect as
 	using the '--recurse-submodules=no' option).
diff --git a/builtin/fetch.c b/builtin/fetch.c
index ee1f1a9..f28eac6 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -37,6 +37,7 @@ static int prune = -1; /* unspecified */
 static int all, append, dry_run, force, keep, multiple, update_head_ok, verbosity;
 static int progress = -1, recurse_submodules = RECURSE_SUBMODULES_DEFAULT;
 static int tags = TAGS_DEFAULT, unshallow, update_shallow;
+static int max_children = 1;
 static const char *depth;
 static const char *upload_pack;
 static struct strbuf default_rla = STRBUF_INIT;
@@ -99,6 +100,8 @@ static struct option builtin_fetch_options[] = {
 		    N_("fetch all tags and associated objects"), TAGS_SET),
 	OPT_SET_INT('n', NULL, &tags,
 		    N_("do not fetch all tags (--no-tags)"), TAGS_UNSET),
+	OPT_INTEGER('j', "jobs", &max_children,
+		    N_("number of submodules fetched in parallel")),
 	OPT_BOOL('p', "prune", &prune,
 		 N_("prune remote-tracking branches no longer on remote")),
 	{ OPTION_CALLBACK, 0, "recurse-submodules", NULL, N_("on-demand"),
@@ -1217,7 +1220,8 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
 		result = fetch_populated_submodules(&options,
 						    submodule_prefix,
 						    recurse_submodules,
-						    verbosity < 0);
+						    verbosity < 0,
+						    max_children);
 		argv_array_clear(&options);
 	}
 
diff --git a/builtin/pull.c b/builtin/pull.c
index 722a83c..f0af196 100644
--- a/builtin/pull.c
+++ b/builtin/pull.c
@@ -94,6 +94,7 @@ static int opt_force;
 static char *opt_tags;
 static char *opt_prune;
 static char *opt_recurse_submodules;
+static char *max_children;
 static int opt_dry_run;
 static char *opt_keep;
 static char *opt_depth;
@@ -177,6 +178,9 @@ static struct option pull_options[] = {
 		N_("on-demand"),
 		N_("control recursive fetching of submodules"),
 		PARSE_OPT_OPTARG),
+	OPT_PASSTHRU('j', "jobs", &max_children, N_("n"),
+		N_("number of submodules pulled in parallel"),
+		PARSE_OPT_OPTARG),
 	OPT_BOOL(0, "dry-run", &opt_dry_run,
 		N_("dry run")),
 	OPT_PASSTHRU('k', "keep", &opt_keep, NULL,
@@ -524,6 +528,8 @@ static int run_fetch(const char *repo, const char **refspecs)
 		argv_array_push(&args, opt_prune);
 	if (opt_recurse_submodules)
 		argv_array_push(&args, opt_recurse_submodules);
+	if (max_children)
+		argv_array_push(&args, max_children);
 	if (opt_dry_run)
 		argv_array_push(&args, "--dry-run");
 	if (opt_keep)
diff --git a/submodule.c b/submodule.c
index 8134fe7..7ab89f4 100644
--- a/submodule.c
+++ b/submodule.c
@@ -653,10 +653,9 @@ static int fetch_finish(void *data, struct child_process *cp,
 
 int fetch_populated_submodules(const struct argv_array *options,
 			       const char *prefix, int command_line_option,
-			       int quiet)
+			       int quiet, int max_parallel_jobs)
 {
 	int i;
-	int max_parallel_jobs = 1;
 	struct submodule_parallel_fetch spf = SPF_INIT;
 
 	spf.work_tree = get_git_work_tree();
diff --git a/submodule.h b/submodule.h
index 5507c3d..cbc0003 100644
--- a/submodule.h
+++ b/submodule.h
@@ -31,7 +31,7 @@ void set_config_fetch_recurse_submodules(int value);
 void check_for_new_submodule_commits(unsigned char new_sha1[20]);
 int fetch_populated_submodules(const struct argv_array *options,
 			       const char *prefix, int command_line_option,
-			       int quiet);
+			       int quiet, int max_parallel_jobs);
 unsigned is_submodule_modified(const char *path, int ignore_untracked);
 int submodule_uses_gitfile(const char *path);
 int ok_to_remove_submodule(const char *path);
diff --git a/t/t5526-fetch-submodules.sh b/t/t5526-fetch-submodules.sh
index 17759b1..1b4ce69 100755
--- a/t/t5526-fetch-submodules.sh
+++ b/t/t5526-fetch-submodules.sh
@@ -71,6 +71,16 @@ test_expect_success "fetch --recurse-submodules recurses into submodules" '
 	test_i18ncmp expect.err actual.err
 '
 
+test_expect_success "fetch --recurse-submodules -j2 has the same output behaviour" '
+	add_upstream_commit &&
+	(
+		cd downstream &&
+		git fetch --recurse-submodules -j2 2>../actual.err
+	) &&
+	test_must_be_empty actual.out &&
+	test_i18ncmp expect.err actual.err
+'
+
 test_expect_success "fetch alone only fetches superproject" '
 	add_upstream_commit &&
 	(
@@ -140,6 +150,15 @@ test_expect_success "--quiet propagates to submodules" '
 	! test -s actual.err
 '
 
+test_expect_success "--quiet propagates to parallel submodules" '
+	(
+		cd downstream &&
+		git fetch --recurse-submodules -j 2 --quiet  >../actual.out 2>../actual.err
+	) &&
+	! test -s actual.out &&
+	! test -s actual.err
+'
+
 test_expect_success "--dry-run propagates to submodules" '
 	add_upstream_commit &&
 	(
-- 
2.5.0.273.g6fa2560.dirty

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* Re: [PATCH 6/8] run-command: add an asynchronous parallel child processor
  2015-09-28 23:14 ` [PATCH 6/8] run-command: add an asynchronous parallel child processor Stefan Beller
@ 2015-09-30  3:12   ` Junio C Hamano
  2015-09-30 18:28     ` Stefan Beller
  2015-09-30 18:48     ` Junio C Hamano
  0 siblings, 2 replies; 18+ messages in thread
From: Junio C Hamano @ 2015-09-30  3:12 UTC (permalink / raw)
  To: Stefan Beller
  Cc: git, ramsay, jacob.keller, peff, jrnieder, johannes.schindelin,
	Jens.Lehmann, ericsunshine

Stefan Beller <sbeller@google.com> writes:

> +	while (1) {
> +		int i;
> +		int output_timeout = 100;
> +		int spawn_cap = 4;
> +
> +		if (!no_more_task) {
> +			for (i = 0; i < spawn_cap; i++) {
> +				int code;
> +				if (pp->nr_processes == pp->max_processes)
> +					break;
> +
> +				code = pp_start_one(pp);
> +				if (!code)
> +					continue;
> +				if (code < 0) {
> +					pp->shutdown = 1;
> +					kill_children(pp, SIGTERM);
> +				}
> +				no_more_task = 1;
> +				break;
> +			}
> +		}
> +		if (no_more_task && !pp->nr_processes)
> +			break;

I may have comments on other parts of this patch, but I noticed this
a bit hard to read while reading the end result.

Losing the outer "if (!no_more_task)" and replacing the above with

	for (no_more_task = 0, i = 0;
             !no_more_task && i < spawn_cap;
             i++) {
        	... do things that may or may not set
                ... no_more_task
	}
	if (no_more_task && ...)
        	break;

would make it clear that regardless of spawn_cap, no_more_task is
honored.

Also I think that having the outer "if (!no_more_task)" and not
having "no_more_task = 0" after each iteration is buggy.  Even when
next_task() told start_one() that it does not have more tasks for
now, as long as there are running processes, it is entirely plausible
that next call to next_task() can return "now we have some more task
to do".

Although I think it would make it unsightly, if you want to have the
large indentation that protects the spawn_cap loop from getting
entered, the condition would be 

	if (!pp->shutdown) {
		for (... spawn_cap loop ...) {
                	...
		}
	}

That structure could make sense.  But even then I would probably
write it more like

	...
	int spawn_cap = 4;

	pp = pp_init(...);
        while (1) {
        	int no_more_task = 0;

                for (i = 0;
                     !no_more_task && !pp->shutdown && i < spawn_cap;
                     i++) {
			...
                        code = start_one();
                        ... set no_more_task to 1 as needed
                        ... set pp->shutdown to 1 as needed
		}
                if (no_more_task && !pp->nr_processes)
			break;
		buffer_stderr(...);
                output(...);
                collect(...);
	}

That is, you need to have two independent conditions that tell you
not to spawn any new task:

 (1) You called start_one() repeatedly and next_task() said "nothing
     more for now", so you know calling start_one() one more time
     without changing other conditions (like draining output from
     running processes and culling finished ones) will not help.

     Letting other parts of the application that uses this scheduler
     loop (i.e. drain output, cull finished process, etc.) may
     change the situation and you _do_ need to call start_one() when
     the next_task() merely said "nothing more for now".

     That is what no_more_task controls.

 (2) The application said "I want the system to be gracefully shut
     down".  next_task() may also have said "nothing more for now"
     and you may have set no_more_task in response to it, but unlike
     (1) above, draining and culling must be done only to shut the
     system down, the application does not want new processes to be
     added.  You do not want to enter the spawn_cap loop when it
     happens.

     That is what pp->shutdown controls.

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 5/8] sigchain: add command to pop all common signals
  2015-09-28 23:14 ` [PATCH 5/8] sigchain: add command to pop all common signals Stefan Beller
@ 2015-09-30  5:23   ` Junio C Hamano
  0 siblings, 0 replies; 18+ messages in thread
From: Junio C Hamano @ 2015-09-30  5:23 UTC (permalink / raw)
  To: Stefan Beller
  Cc: git, ramsay, jacob.keller, peff, jrnieder, johannes.schindelin,
	Jens.Lehmann, ericsunshine

Stefan Beller <sbeller@google.com> writes:

> The new method removes all common signal handlers that were installed
> by sigchain_push.
>
> CC: Jeff King <peff@peff.net>
> Signed-off-by: Stefan Beller <sbeller@google.com>
> ---
>  sigchain.c | 9 +++++++++
>  sigchain.h | 1 +
>  2 files changed, 10 insertions(+)

Sounds like a good idea, as you need to clean them all up if you did
push_common() and ended up not receiving any signal.

This is merely pure aesthetics, but I somehow thought that ordering
them in the reverse as listed in push_common() might make more
sense, though.

Thanks.

>
> diff --git a/sigchain.c b/sigchain.c
> index faa375d..9262307 100644
> --- a/sigchain.c
> +++ b/sigchain.c
> @@ -50,3 +50,12 @@ void sigchain_push_common(sigchain_fun f)
>  	sigchain_push(SIGQUIT, f);
>  	sigchain_push(SIGPIPE, f);
>  }
> +
> +void sigchain_pop_common(void)
> +{
> +	sigchain_pop(SIGINT);
> +	sigchain_pop(SIGHUP);
> +	sigchain_pop(SIGTERM);
> +	sigchain_pop(SIGQUIT);
> +	sigchain_pop(SIGPIPE);
> +}
> diff --git a/sigchain.h b/sigchain.h
> index 618083b..138b20f 100644
> --- a/sigchain.h
> +++ b/sigchain.h
> @@ -7,5 +7,6 @@ int sigchain_push(int sig, sigchain_fun f);
>  int sigchain_pop(int sig);
>  
>  void sigchain_push_common(sigchain_fun f);
> +void sigchain_pop_common(void);
>  
>  #endif /* SIGCHAIN_H */

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 6/8] run-command: add an asynchronous parallel child processor
  2015-09-30  3:12   ` Junio C Hamano
@ 2015-09-30 18:28     ` Stefan Beller
  2015-09-30 18:48     ` Junio C Hamano
  1 sibling, 0 replies; 18+ messages in thread
From: Stefan Beller @ 2015-09-30 18:28 UTC (permalink / raw)
  To: Junio C Hamano
  Cc: git@vger.kernel.org, Ramsay Jones, Jacob Keller, Jeff King,
	Jonathan Nieder, Johannes Schindelin, Jens Lehmann, Eric Sunshine

On Tue, Sep 29, 2015 at 8:12 PM, Junio C Hamano <gitster@pobox.com> wrote:
> Stefan Beller <sbeller@google.com> writes:
>
>> +     while (1) {
>> +             int i;
>> +             int output_timeout = 100;
>> +             int spawn_cap = 4;
>> +
>> +             if (!no_more_task) {
>> +                     for (i = 0; i < spawn_cap; i++) {
>> +                             int code;
>> +                             if (pp->nr_processes == pp->max_processes)
>> +                                     break;
>> +
>> +                             code = pp_start_one(pp);
>> +                             if (!code)
>> +                                     continue;
>> +                             if (code < 0) {
>> +                                     pp->shutdown = 1;
>> +                                     kill_children(pp, SIGTERM);
>> +                             }
>> +                             no_more_task = 1;
>> +                             break;
>> +                     }
>> +             }
>> +             if (no_more_task && !pp->nr_processes)
>> +                     break;
>
> I may have comments on other parts of this patch, but I noticed this
> a bit hard to read while reading the end result.
>
> Losing the outer "if (!no_more_task)" and replacing the above with
>
>         for (no_more_task = 0, i = 0;
>              !no_more_task && i < spawn_cap;
>              i++) {
>                 ... do things that may or may not set
>                 ... no_more_task
>         }
>         if (no_more_task && ...)
>                 break;
>
> would make it clear that regardless of spawn_cap, no_more_task is
> honored.
>
> Also I think that having the outer "if (!no_more_task)" and not
> having "no_more_task = 0" after each iteration is buggy.  Even when
> next_task() told start_one() that it does not have more tasks for
> now, as long as there are running processes, it is entirely plausible
> that next call to next_task() can return "now we have some more task
> to do".
>
> Although I think it would make it unsightly, if you want to have the
> large indentation that protects the spawn_cap loop from getting
> entered, the condition would be
>
>         if (!pp->shutdown) {
>                 for (... spawn_cap loop ...) {
>                         ...
>                 }
>         }
>
> That structure could make sense.  But even then I would probably
> write it more like
>
>         ...
>         int spawn_cap = 4;
>
>         pp = pp_init(...);
>         while (1) {
>                 int no_more_task = 0;
>
>                 for (i = 0;
>                      !no_more_task && !pp->shutdown && i < spawn_cap;
>                      i++) {
>                         ...
>                         code = start_one();
>                         ... set no_more_task to 1 as needed
>                         ... set pp->shutdown to 1 as needed
>                 }
>                 if (no_more_task && !pp->nr_processes)
>                         break;
>                 buffer_stderr(...);
>                 output(...);
>                 collect(...);
>         }

That is indeed better to read.
Though if we reset `no_more_task` each iteration of the loop by
having its declaration inside the loop, the condition for exiting the
loop needs to be:

    if ((no_more_task || pp->shutdown) && !pp->nr_processes)
        break;

for correctness.

When looking at that condition, I realized that I implicitly assumed
the workflow, where get_next_task returns 0 intermittently, to be a
second class citizen. I reworded the documentation as well there.

>
> That is, you need to have two independent conditions that tell you
> not to spawn any new task:
>
>  (1) You called start_one() repeatedly and next_task() said "nothing
>      more for now", so you know calling start_one() one more time
>      without changing other conditions (like draining output from
>      running processes and culling finished ones) will not help.
>
>      Letting other parts of the application that uses this scheduler
>      loop (i.e. drain output, cull finished process, etc.) may
>      change the situation and you _do_ need to call start_one() when
>      the next_task() merely said "nothing more for now".
>
>      That is what no_more_task controls.
>
>  (2) The application said "I want the system to be gracefully shut
>      down".  next_task() may also have said "nothing more for now"
>      and you may have set no_more_task in response to it, but unlike
>      (1) above, draining and culling must be done only to shut the
>      system down, the application does not want new processes to be
>      added.  You do not want to enter the spawn_cap loop when it
>      happens.
>
>      That is what pp->shutdown controls.
>

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 6/8] run-command: add an asynchronous parallel child processor
  2015-09-30  3:12   ` Junio C Hamano
  2015-09-30 18:28     ` Stefan Beller
@ 2015-09-30 18:48     ` Junio C Hamano
  1 sibling, 0 replies; 18+ messages in thread
From: Junio C Hamano @ 2015-09-30 18:48 UTC (permalink / raw)
  To: Stefan Beller
  Cc: git, ramsay, jacob.keller, peff, jrnieder, johannes.schindelin,
	Jens.Lehmann, ericsunshine

Junio C Hamano <gitster@pobox.com> writes:

> I may have comments on other parts of this patch, but I noticed this
> a bit hard to read while reading the end result.
> ...

I finished reading the remainder.  Other than the above all look
sensible.

Will replace what had been queued.

Thanks.

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [PATCH 2/8] xread: poll on non blocking fds
  2015-12-14 19:37 [PATCH 0/8] Rerolling sb/submodule-parallel-fetch for the time after 2.7 Stefan Beller
@ 2015-12-14 19:37 ` Stefan Beller
  2015-12-14 22:58   ` Eric Sunshine
  0 siblings, 1 reply; 18+ messages in thread
From: Stefan Beller @ 2015-12-14 19:37 UTC (permalink / raw)
  To: sbeller, git
  Cc: peff, gitster, jrnieder, johannes.schindelin, Jens.Lehmann,
	ericsunshine, j6t

>From the man page:
EAGAIN The file descriptor fd refers to a file other than a socket
       and has been marked nonblocking (O_NONBLOCK), and the read
       would block.

EAGAIN or EWOULDBLOCK
       The file descriptor fd refers to a socket and has been marked
       nonblocking (O_NONBLOCK), and the read would block.  POSIX.1-2001
       allows either error to be returned for this case, and does not
       require these constants to have the same value, so a portable
       application should check for both possibilities.

If we get an EAGAIN or EWOULDBLOCK the fd must have set O_NONBLOCK.
As the intent of xread is to read as much as possible either until the
fd is EOF or an actual error occurs, we can ease the feeder of the fd
by not spinning the whole time, but rather wait for it politely by not
busy waiting.

We should not care if the call to poll failed, as we're in an infinite
loop and can only get out with the correct read().

Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
---
 wrapper.c | 13 +++++++++++--
 1 file changed, 11 insertions(+), 2 deletions(-)

diff --git a/wrapper.c b/wrapper.c
index 6fcaa4d..4f720fe 100644
--- a/wrapper.c
+++ b/wrapper.c
@@ -236,8 +236,17 @@ ssize_t xread(int fd, void *buf, size_t len)
 	    len = MAX_IO_SIZE;
 	while (1) {
 		nr = read(fd, buf, len);
-		if ((nr < 0) && (errno == EAGAIN || errno == EINTR))
-			continue;
+		if (nr < 0) {
+			if (errno == EINTR)
+				continue;
+			if (errno == EAGAIN || errno == EWOULDBLOCK) {
+				struct pollfd pfd;
+				pfd.events = POLLIN;
+				pfd.fd = fd;
+				/* We deliberately ignore the return value */
+				poll(&pfd, 1, -1);
+			}
+		}
 		return nr;
 	}
 }
-- 
2.6.4.443.ge094245.dirty

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* Re: [PATCH 2/8] xread: poll on non blocking fds
  2015-12-14 19:37 ` [PATCH 2/8] xread: poll on non blocking fds Stefan Beller
@ 2015-12-14 22:58   ` Eric Sunshine
  2015-12-14 23:07     ` Stefan Beller
  2015-12-14 23:11     ` Junio C Hamano
  0 siblings, 2 replies; 18+ messages in thread
From: Eric Sunshine @ 2015-12-14 22:58 UTC (permalink / raw)
  To: Stefan Beller
  Cc: Git List, Jeff King, Junio C Hamano, Jonathan Nieder,
	Johannes Schindelin, Jens Lehmann, Johannes Sixt

On Mon, Dec 14, 2015 at 2:37 PM, Stefan Beller <sbeller@google.com> wrote:
> From the man page:
> EAGAIN The file descriptor fd refers to a file other than a socket
>        and has been marked nonblocking (O_NONBLOCK), and the read
>        would block.
>
> EAGAIN or EWOULDBLOCK
>        The file descriptor fd refers to a socket and has been marked
>        nonblocking (O_NONBLOCK), and the read would block.  POSIX.1-2001
>        allows either error to be returned for this case, and does not
>        require these constants to have the same value, so a portable
>        application should check for both possibilities.
>
> If we get an EAGAIN or EWOULDBLOCK the fd must have set O_NONBLOCK.
> As the intent of xread is to read as much as possible either until the
> fd is EOF or an actual error occurs, we can ease the feeder of the fd
> by not spinning the whole time, but rather wait for it politely by not
> busy waiting.
>
> We should not care if the call to poll failed, as we're in an infinite
> loop and can only get out with the correct read().
>
> Signed-off-by: Stefan Beller <sbeller@google.com>
> Signed-off-by: Junio C Hamano <gitster@pobox.com>
> ---
> diff --git a/wrapper.c b/wrapper.c
> index 6fcaa4d..4f720fe 100644
> --- a/wrapper.c
> +++ b/wrapper.c
> @@ -236,8 +236,17 @@ ssize_t xread(int fd, void *buf, size_t len)
>             len = MAX_IO_SIZE;
>         while (1) {
>                 nr = read(fd, buf, len);
> -               if ((nr < 0) && (errno == EAGAIN || errno == EINTR))
> -                       continue;
> +               if (nr < 0) {
> +                       if (errno == EINTR)
> +                               continue;
> +                       if (errno == EAGAIN || errno == EWOULDBLOCK) {
> +                               struct pollfd pfd;
> +                               pfd.events = POLLIN;
> +                               pfd.fd = fd;
> +                               /* We deliberately ignore the return value */

This comment tells us what the code itself already says, but not why
the value is being ignored. The reader still has to consult the commit
message to learn that detail, which makes the value of the comment
questionable.

> +                               poll(&pfd, 1, -1);
> +                       }
> +               }
>                 return nr;
>         }
>  }
> --
> 2.6.4.443.ge094245.dirty
>

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 2/8] xread: poll on non blocking fds
  2015-12-14 22:58   ` Eric Sunshine
@ 2015-12-14 23:07     ` Stefan Beller
  2015-12-14 23:11     ` Junio C Hamano
  1 sibling, 0 replies; 18+ messages in thread
From: Stefan Beller @ 2015-12-14 23:07 UTC (permalink / raw)
  To: Eric Sunshine
  Cc: Git List, Jeff King, Junio C Hamano, Jonathan Nieder,
	Johannes Schindelin, Jens Lehmann, Johannes Sixt

On Mon, Dec 14, 2015 at 2:58 PM, Eric Sunshine <ericsunshine@gmail.com> wrote:
>> +                               /* We deliberately ignore the return value */
>
> This comment tells us what the code itself already says, but not why
> the value is being ignored. The reader still has to consult the commit
> message to learn that detail, which makes the value of the comment
> questionable.

It at least tells you it's not because of sloppiness. ;)
I'll elaborate that comment a bit further in a resend though.

Thanks,
Stefan

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 2/8] xread: poll on non blocking fds
  2015-12-14 22:58   ` Eric Sunshine
  2015-12-14 23:07     ` Stefan Beller
@ 2015-12-14 23:11     ` Junio C Hamano
  2015-12-14 23:14       ` Stefan Beller
  1 sibling, 1 reply; 18+ messages in thread
From: Junio C Hamano @ 2015-12-14 23:11 UTC (permalink / raw)
  To: Eric Sunshine
  Cc: Stefan Beller, Git List, Jeff King, Jonathan Nieder,
	Johannes Schindelin, Jens Lehmann, Johannes Sixt

Eric Sunshine <ericsunshine@gmail.com> writes:

> This comment tells us what the code itself already says, but not why
> the value is being ignored. The reader still has to consult the commit
> message to learn that detail, which makes the value of the comment
> questionable.

Let's do this for now, then.

-- >8 --
From: Stefan Beller <sbeller@google.com>
Date: Mon, 14 Dec 2015 11:37:12 -0800
Subject: [PATCH] xread: poll on non blocking fds

The man page of read(2) says:

  EAGAIN The file descriptor fd refers to a file other than a socket
	 and has been marked nonblocking (O_NONBLOCK), and the read
	 would block.

  EAGAIN or EWOULDBLOCK
	 The file descriptor fd refers to a socket and has been marked
	 nonblocking (O_NONBLOCK), and the read would block.  POSIX.1-2001
	 allows either error to be returned for this case, and does not
	 require these constants to have the same value, so a portable
	 application should check for both possibilities.

If we get an EAGAIN or EWOULDBLOCK the fd must have set O_NONBLOCK.
As the intent of xread is to read as much as possible either until the
fd is EOF or an actual error occurs, we can ease the feeder of the fd
by not spinning the whole time, but rather wait for it politely by not
busy waiting.

We should not care if the call to poll failed, as we're in an infinite
loop and can only get out with the correct read().

Signed-off-by: Stefan Beller <sbeller@google.com>
Acked-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
---
 wrapper.c | 20 ++++++++++++++++++--
 1 file changed, 18 insertions(+), 2 deletions(-)

diff --git a/wrapper.c b/wrapper.c
index 6fcaa4d..1770efa 100644
--- a/wrapper.c
+++ b/wrapper.c
@@ -236,8 +236,24 @@ ssize_t xread(int fd, void *buf, size_t len)
 	    len = MAX_IO_SIZE;
 	while (1) {
 		nr = read(fd, buf, len);
-		if ((nr < 0) && (errno == EAGAIN || errno == EINTR))
-			continue;
+		if (nr < 0) {
+			if (errno == EINTR)
+				continue;
+			if (errno == EAGAIN || errno == EWOULDBLOCK) {
+				struct pollfd pfd;
+				pfd.events = POLLIN;
+				pfd.fd = fd;
+				/*
+				 * it is OK if this poll() failed; we
+				 * want to leave this infinite loop
+				 * only when read() returns with
+				 * success, or an expected failure,
+				 * which would be checked by the next
+				 * call to read(2).
+				 */
+				poll(&pfd, 1, -1);
+			}
+		}
 		return nr;
 	}
 }
-- 
2.7.0-rc0-109-gb762328

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* Re: [PATCH 2/8] xread: poll on non blocking fds
  2015-12-14 23:11     ` Junio C Hamano
@ 2015-12-14 23:14       ` Stefan Beller
  0 siblings, 0 replies; 18+ messages in thread
From: Stefan Beller @ 2015-12-14 23:14 UTC (permalink / raw)
  To: Junio C Hamano
  Cc: Eric Sunshine, Git List, Jeff King, Jonathan Nieder,
	Johannes Schindelin, Jens Lehmann, Johannes Sixt

On Mon, Dec 14, 2015 at 3:11 PM, Junio C Hamano <gitster@pobox.com> wrote:
> Eric Sunshine <ericsunshine@gmail.com> writes:
>
>> This comment tells us what the code itself already says, but not why
>> the value is being ignored. The reader still has to consult the commit
>> message to learn that detail, which makes the value of the comment
>> questionable.
>
> Let's do this for now, then.

That looks good to me. I'll pick it up for the resend.

>
> -- >8 --
> From: Stefan Beller <sbeller@google.com>
> Date: Mon, 14 Dec 2015 11:37:12 -0800
> Subject: [PATCH] xread: poll on non blocking fds
>
> The man page of read(2) says:
>
>   EAGAIN The file descriptor fd refers to a file other than a socket
>          and has been marked nonblocking (O_NONBLOCK), and the read
>          would block.
>
>   EAGAIN or EWOULDBLOCK
>          The file descriptor fd refers to a socket and has been marked
>          nonblocking (O_NONBLOCK), and the read would block.  POSIX.1-2001
>          allows either error to be returned for this case, and does not
>          require these constants to have the same value, so a portable
>          application should check for both possibilities.
>
> If we get an EAGAIN or EWOULDBLOCK the fd must have set O_NONBLOCK.
> As the intent of xread is to read as much as possible either until the
> fd is EOF or an actual error occurs, we can ease the feeder of the fd
> by not spinning the whole time, but rather wait for it politely by not
> busy waiting.
>
> We should not care if the call to poll failed, as we're in an infinite
> loop and can only get out with the correct read().
>
> Signed-off-by: Stefan Beller <sbeller@google.com>
> Acked-by: Johannes Sixt <j6t@kdbg.org>
> Signed-off-by: Junio C Hamano <gitster@pobox.com>
> ---
>  wrapper.c | 20 ++++++++++++++++++--
>  1 file changed, 18 insertions(+), 2 deletions(-)
>
> diff --git a/wrapper.c b/wrapper.c
> index 6fcaa4d..1770efa 100644
> --- a/wrapper.c
> +++ b/wrapper.c
> @@ -236,8 +236,24 @@ ssize_t xread(int fd, void *buf, size_t len)
>             len = MAX_IO_SIZE;
>         while (1) {
>                 nr = read(fd, buf, len);
> -               if ((nr < 0) && (errno == EAGAIN || errno == EINTR))
> -                       continue;
> +               if (nr < 0) {
> +                       if (errno == EINTR)
> +                               continue;
> +                       if (errno == EAGAIN || errno == EWOULDBLOCK) {
> +                               struct pollfd pfd;
> +                               pfd.events = POLLIN;
> +                               pfd.fd = fd;
> +                               /*
> +                                * it is OK if this poll() failed; we
> +                                * want to leave this infinite loop
> +                                * only when read() returns with
> +                                * success, or an expected failure,
> +                                * which would be checked by the next
> +                                * call to read(2).
> +                                */
> +                               poll(&pfd, 1, -1);
> +                       }
> +               }
>                 return nr;
>         }
>  }
> --
> 2.7.0-rc0-109-gb762328
>

^ permalink raw reply	[flat|nested] 18+ messages in thread

end of thread, other threads:[~2015-12-14 23:14 UTC | newest]

Thread overview: 18+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-09-28 23:13 [PATCH 0/8] fetch submodules in parallel Stefan Beller
2015-09-28 23:13 ` [PATCH 1/8] submodule.c: write "Fetching submodule <foo>" to stderr Stefan Beller
2015-09-28 23:14 ` [PATCH 2/8] xread: poll on non blocking fds Stefan Beller
2015-09-28 23:14 ` [PATCH 3/8] xread_nonblock: add functionality to read from fds without blocking Stefan Beller
2015-09-28 23:14 ` [PATCH 4/8] strbuf: add strbuf_read_once to read " Stefan Beller
2015-09-28 23:14 ` [PATCH 5/8] sigchain: add command to pop all common signals Stefan Beller
2015-09-30  5:23   ` Junio C Hamano
2015-09-28 23:14 ` [PATCH 6/8] run-command: add an asynchronous parallel child processor Stefan Beller
2015-09-30  3:12   ` Junio C Hamano
2015-09-30 18:28     ` Stefan Beller
2015-09-30 18:48     ` Junio C Hamano
2015-09-28 23:14 ` [PATCH 7/8] fetch_populated_submodules: use new parallel job processing Stefan Beller
2015-09-28 23:14 ` [PATCH 8/8] submodules: allow parallel fetching, add tests and documentation Stefan Beller
  -- strict thread matches above, loose matches on Subject: below --
2015-12-14 19:37 [PATCH 0/8] Rerolling sb/submodule-parallel-fetch for the time after 2.7 Stefan Beller
2015-12-14 19:37 ` [PATCH 2/8] xread: poll on non blocking fds Stefan Beller
2015-12-14 22:58   ` Eric Sunshine
2015-12-14 23:07     ` Stefan Beller
2015-12-14 23:11     ` Junio C Hamano
2015-12-14 23:14       ` Stefan Beller

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).