2005-08-01 03:17:43 +08:00
|
|
|
#ifndef RUN_COMMAND_H
|
|
|
|
#define RUN_COMMAND_H
|
|
|
|
|
2018-11-03 16:48:38 +08:00
|
|
|
#include "thread-utils.h"
|
2010-03-06 23:40:42 +08:00
|
|
|
|
2020-07-29 04:23:39 +08:00
|
|
|
#include "strvec.h"
|
2014-05-15 16:33:26 +08:00
|
|
|
|
2019-11-18 05:04:55 +08:00
|
|
|
/**
|
|
|
|
* The run-command API offers a versatile tool to run sub-processes with
|
|
|
|
* redirected input and output as well as with a modified environment
|
|
|
|
* and an alternate current directory.
|
|
|
|
*
|
|
|
|
* A similar API offers the capability to run a function asynchronously,
|
|
|
|
* which is primarily used to capture the output that the function
|
|
|
|
* produces in the caller in order to process it.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This describes the arguments, redirections, and environment of a
|
|
|
|
* command to run in a sub-process.
|
|
|
|
*
|
|
|
|
* The caller:
|
|
|
|
*
|
|
|
|
* 1. allocates and clears (using child_process_init() or
|
|
|
|
* CHILD_PROCESS_INIT) a struct child_process variable;
|
|
|
|
* 2. initializes the members;
|
|
|
|
* 3. calls start_command();
|
|
|
|
* 4. processes the data;
|
|
|
|
* 5. closes file descriptors (if necessary; see below);
|
|
|
|
* 6. calls finish_command().
|
|
|
|
*
|
|
|
|
* Special forms of redirection are available by setting these members
|
|
|
|
* to 1:
|
|
|
|
*
|
|
|
|
* .no_stdin, .no_stdout, .no_stderr: The respective channel is
|
|
|
|
* redirected to /dev/null.
|
|
|
|
*
|
|
|
|
* .stdout_to_stderr: stdout of the child is redirected to its
|
|
|
|
* stderr. This happens after stderr is itself redirected.
|
|
|
|
* So stdout will follow stderr to wherever it is
|
|
|
|
* redirected.
|
|
|
|
*/
|
2007-03-10 16:28:00 +08:00
|
|
|
struct child_process {
|
2019-11-18 05:04:55 +08:00
|
|
|
|
|
|
|
/**
|
run-command API: remove "argv" member, always use "args"
Remove the "argv" member from the run-command API, ever since "args"
was added in c460c0ecdca (run-command: store an optional argv_array,
2014-05-15) being able to provide either "argv" or "args" has led to
some confusion and bugs.
If we hadn't gone in that direction and only had an "argv" our
problems wouldn't have been solved either, as noted in [1] (and in the
documentation amended here) it comes with inherent memory management
issues: The caller would have to hang on to the "argv" until the
run-command API was finished. If the "argv" was an argument to main()
this wasn't an issue, but if it it was manually constructed using the
API might be painful.
We also have a recent report[2] of a user of the API segfaulting,
which is a direct result of it being complex to use. This commit
addresses the root cause of that bug.
This change is larger than I'd like, but there's no easy way to avoid
it that wouldn't involve even more verbose intermediate steps. We use
the "argv" as the source of truth over the "args", so we need to
change all parts of run-command.[ch] itself, as well as the trace2
logging at the same time.
The resulting Windows-specific code in start_command() is a bit nasty,
as we're now assigning to a strvec's "v" member, instead of to our own
"argv". There was a suggestion of some alternate approaches in reply
to an earlier version of this commit[3], but let's leave larger a
larger and needless refactoring of this code for now.
1. http://lore.kernel.org/git/YT6BnnXeAWn8BycF@coredump.intra.peff.net
2. https://lore.kernel.org/git/20211120194048.12125-1-ematsumiya@suse.de/
3. https://lore.kernel.org/git/patch-5.5-ea1011f7473-20211122T153605Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-26 06:52:22 +08:00
|
|
|
* The .args is a `struct strvec', use that API to manipulate
|
|
|
|
* it, e.g. strvec_pushv() to add an existing "const char **"
|
|
|
|
* vector.
|
2019-11-18 05:04:55 +08:00
|
|
|
*
|
run-command API: remove "argv" member, always use "args"
Remove the "argv" member from the run-command API, ever since "args"
was added in c460c0ecdca (run-command: store an optional argv_array,
2014-05-15) being able to provide either "argv" or "args" has led to
some confusion and bugs.
If we hadn't gone in that direction and only had an "argv" our
problems wouldn't have been solved either, as noted in [1] (and in the
documentation amended here) it comes with inherent memory management
issues: The caller would have to hang on to the "argv" until the
run-command API was finished. If the "argv" was an argument to main()
this wasn't an issue, but if it it was manually constructed using the
API might be painful.
We also have a recent report[2] of a user of the API segfaulting,
which is a direct result of it being complex to use. This commit
addresses the root cause of that bug.
This change is larger than I'd like, but there's no easy way to avoid
it that wouldn't involve even more verbose intermediate steps. We use
the "argv" as the source of truth over the "args", so we need to
change all parts of run-command.[ch] itself, as well as the trace2
logging at the same time.
The resulting Windows-specific code in start_command() is a bit nasty,
as we're now assigning to a strvec's "v" member, instead of to our own
"argv". There was a suggestion of some alternate approaches in reply
to an earlier version of this commit[3], but let's leave larger a
larger and needless refactoring of this code for now.
1. http://lore.kernel.org/git/YT6BnnXeAWn8BycF@coredump.intra.peff.net
2. https://lore.kernel.org/git/20211120194048.12125-1-ematsumiya@suse.de/
3. https://lore.kernel.org/git/patch-5.5-ea1011f7473-20211122T153605Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-26 06:52:22 +08:00
|
|
|
* If the command to run is a git command, set the first
|
|
|
|
* element in the strvec to the command name without the
|
|
|
|
* 'git-' prefix and set .git_cmd = 1.
|
2019-11-18 05:04:55 +08:00
|
|
|
*
|
run-command API: remove "argv" member, always use "args"
Remove the "argv" member from the run-command API, ever since "args"
was added in c460c0ecdca (run-command: store an optional argv_array,
2014-05-15) being able to provide either "argv" or "args" has led to
some confusion and bugs.
If we hadn't gone in that direction and only had an "argv" our
problems wouldn't have been solved either, as noted in [1] (and in the
documentation amended here) it comes with inherent memory management
issues: The caller would have to hang on to the "argv" until the
run-command API was finished. If the "argv" was an argument to main()
this wasn't an issue, but if it it was manually constructed using the
API might be painful.
We also have a recent report[2] of a user of the API segfaulting,
which is a direct result of it being complex to use. This commit
addresses the root cause of that bug.
This change is larger than I'd like, but there's no easy way to avoid
it that wouldn't involve even more verbose intermediate steps. We use
the "argv" as the source of truth over the "args", so we need to
change all parts of run-command.[ch] itself, as well as the trace2
logging at the same time.
The resulting Windows-specific code in start_command() is a bit nasty,
as we're now assigning to a strvec's "v" member, instead of to our own
"argv". There was a suggestion of some alternate approaches in reply
to an earlier version of this commit[3], but let's leave larger a
larger and needless refactoring of this code for now.
1. http://lore.kernel.org/git/YT6BnnXeAWn8BycF@coredump.intra.peff.net
2. https://lore.kernel.org/git/20211120194048.12125-1-ematsumiya@suse.de/
3. https://lore.kernel.org/git/patch-5.5-ea1011f7473-20211122T153605Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-26 06:52:22 +08:00
|
|
|
* The memory in .args will be cleaned up automatically during
|
|
|
|
* `finish_command` (or during `start_command` when it is unsuccessful).
|
2019-11-18 05:04:55 +08:00
|
|
|
*/
|
2020-07-29 04:25:12 +08:00
|
|
|
struct strvec args;
|
2021-11-26 06:52:24 +08:00
|
|
|
|
|
|
|
/**
|
2022-06-02 17:09:51 +08:00
|
|
|
* Like .args the .env is a `struct strvec'.
|
2021-11-26 06:52:24 +08:00
|
|
|
*
|
|
|
|
* To modify the environment of the sub-process, specify an array of
|
|
|
|
* environment settings. Each string in the array manipulates the
|
|
|
|
* environment.
|
|
|
|
*
|
|
|
|
* - If the string is of the form "VAR=value", i.e. it contains '='
|
|
|
|
* the variable is added to the child process's environment.
|
|
|
|
*
|
|
|
|
* - If the string does not contain '=', it names an environment
|
|
|
|
* variable that will be removed from the child process's environment.
|
|
|
|
*
|
2022-06-02 17:09:51 +08:00
|
|
|
* The memory in .env will be cleaned up automatically during
|
2021-11-26 06:52:24 +08:00
|
|
|
* `finish_command` (or during `start_command` when it is unsuccessful).
|
|
|
|
*/
|
2022-06-02 17:09:50 +08:00
|
|
|
struct strvec env;
|
2007-03-10 16:28:05 +08:00
|
|
|
pid_t pid;
|
2019-02-23 06:25:01 +08:00
|
|
|
|
|
|
|
int trace2_child_id;
|
|
|
|
uint64_t trace2_child_us_start;
|
|
|
|
const char *trace2_child_class;
|
|
|
|
const char *trace2_hook_name;
|
|
|
|
|
2008-02-22 06:42:56 +08:00
|
|
|
/*
|
|
|
|
* Using .in, .out, .err:
|
2019-11-18 05:04:55 +08:00
|
|
|
* - Specify 0 for no redirections. No new file descriptor is allocated.
|
|
|
|
* (child inherits stdin, stdout, stderr from parent).
|
2008-02-22 06:42:56 +08:00
|
|
|
* - Specify -1 to have a pipe allocated as follows:
|
|
|
|
* .in: returns the writable pipe end; parent writes to it,
|
|
|
|
* the readable pipe end becomes child's stdin
|
|
|
|
* .out, .err: returns the readable pipe end; parent reads from
|
|
|
|
* it, the writable pipe end becomes child's stdout/stderr
|
|
|
|
* The caller of start_command() must close the returned FDs
|
|
|
|
* after it has completed reading from/writing to it!
|
|
|
|
* - Specify > 0 to set a channel to a particular FD as follows:
|
|
|
|
* .in: a readable FD, becomes child's stdin
|
|
|
|
* .out: a writable FD, becomes child's stdout/stderr
|
2010-02-06 04:57:37 +08:00
|
|
|
* .err: a writable FD, becomes child's stderr
|
2008-02-22 06:42:56 +08:00
|
|
|
* The specified FD is closed by start_command(), even in case
|
|
|
|
* of errors!
|
|
|
|
*/
|
2007-03-10 16:28:08 +08:00
|
|
|
int in;
|
2007-03-13 02:37:45 +08:00
|
|
|
int out;
|
2007-10-20 03:47:58 +08:00
|
|
|
int err;
|
2019-11-18 05:04:55 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* To specify a new initial working directory for the sub-process,
|
|
|
|
* specify it in the .dir member.
|
|
|
|
*/
|
2007-05-23 05:48:23 +08:00
|
|
|
const char *dir;
|
2019-11-18 05:04:55 +08:00
|
|
|
|
2007-03-10 16:28:00 +08:00
|
|
|
unsigned no_stdin:1;
|
2007-03-13 02:37:55 +08:00
|
|
|
unsigned no_stdout:1;
|
2007-11-11 15:29:37 +08:00
|
|
|
unsigned no_stderr:1;
|
2020-02-21 10:56:37 +08:00
|
|
|
unsigned git_cmd:1; /* if this is to be git sub-command */
|
2019-11-18 05:04:55 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* If the program cannot be found, the functions return -1 and set
|
|
|
|
* errno to ENOENT. Normally, an error message is printed, but if
|
|
|
|
* .silent_exec_failure is set to 1, no message is printed for this
|
|
|
|
* special error condition.
|
|
|
|
*/
|
2009-07-05 03:26:42 +08:00
|
|
|
unsigned silent_exec_failure:1;
|
2019-11-18 05:04:55 +08:00
|
|
|
|
2021-01-23 05:03:33 +08:00
|
|
|
/**
|
|
|
|
* Run the command from argv[0] using a shell (but note that we may
|
|
|
|
* still optimize out the shell call if the command contains no
|
|
|
|
* metacharacters). Note that further arguments to the command in
|
|
|
|
* argv[1], etc, do not need to be shell-quoted.
|
|
|
|
*/
|
2009-12-30 18:53:16 +08:00
|
|
|
unsigned use_shell:1;
|
2021-01-23 05:03:33 +08:00
|
|
|
|
2021-09-09 17:47:06 +08:00
|
|
|
/**
|
|
|
|
* Release any open file handles to the object store before running
|
|
|
|
* the command; This is necessary e.g. when the spawned process may
|
|
|
|
* want to repack because that would delete `.pack` files (and on
|
|
|
|
* Windows, you cannot delete files that are still in use).
|
|
|
|
*/
|
|
|
|
unsigned close_object_store:1;
|
|
|
|
|
2021-01-23 05:03:33 +08:00
|
|
|
unsigned stdout_to_stderr:1;
|
run-command: optionally kill children on exit
When we spawn a helper process, it should generally be done
and finish_command called before we exit. However, if we
exit abnormally due to an early return or a signal, the
helper may continue to run in our absence.
In the best case, this may simply be wasted CPU cycles or a
few stray messages on a terminal. But it could also mean a
process that the user thought was aborted continues to run
to completion (e.g., a push's pack-objects helper will
complete the push, even though you killed the push process).
This patch provides infrastructure for run-command to keep
track of PIDs to be killed, and clean them on signal
reception or input, just as we do with tempfiles. PIDs can
be added in two ways:
1. If NO_PTHREADS is defined, async helper processes are
automatically marked. By definition this code must be
ready to die when the parent dies, since it may be
implemented as a thread of the parent process.
2. If the run-command caller specifies the "clean_on_exit"
option. This is not the default, as there are cases
where it is OK for the child to outlive us (e.g., when
spawning a pager).
PIDs are cleared from the kill-list automatically during
wait_or_whine, which is called from finish_command and
finish_async.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Clemens Buchacher <drizzd@aon.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-07 19:42:43 +08:00
|
|
|
unsigned clean_on_exit:1;
|
execv_dashed_external: wait for child on signal death
When you hit ^C to interrupt a git command going to a pager,
this usually leaves the pager running. But when a dashed
external is in use, the pager ends up in a funny state and
quits (but only after eating one more character from the
terminal!). This fixes it.
Explaining the reason will require a little background.
When git runs a pager, it's important for the git process to
hang around and wait for the pager to finish, even though it
has no more data to feed it. This is because git spawns the
pager as a child, and thus the git process is the session
leader on the terminal. After it dies, the pager will finish
its current read from the terminal (eating the one
character), and then get EIO trying to read again.
When you hit ^C, that sends SIGINT to git and to the pager,
and it's a similar situation. The pager ignores it, but the
git process needs to hang around until the pager is done. We
addressed that long ago in a3da882120 (pager: do
wait_for_pager on signal death, 2009-01-22).
But when you have a dashed external (or an alias pointing to
a builtin, which will re-exec git for the builtin), there's
an extra process in the mix. For instance, running:
$ git -c alias.l=log l
will end up with a process tree like:
git (parent)
\
git-log (child)
\
less (pager)
If you hit ^C, SIGINT goes to all of them. The pager ignores
it, and the child git process will end up in wait_for_pager().
But the parent git process will die, and the usual EIO
trouble happens.
So we really want the parent git process to wait_for_pager(),
but of course it doesn't know anything about the pager at
all, since it was started by the child. However, we can
have it wait on the git-log child, which in turn is waiting
on the pager. And that's what this patch does.
There are a few design decisions here worth explaining:
1. The new feature is attached to run-command's
clean_on_exit feature. Partly this is convenience,
since that feature already has a signal handler that
deals with child cleanup.
But it's also a meaningful connection. The main reason
that dashed externals use clean_on_exit is to bind the
two processes together. If somebody kills the parent
with a signal, we propagate that to the child (in this
instance with SIGINT, we do propagate but it doesn't
matter because the original signal went to the whole
process group). Likewise, we do not want the parent
to go away until the child has done so.
In a traditional Unix world, we'd probably accomplish
this binding by just having the parent execve() the
child directly. But since that doesn't work on Windows,
everything goes through run_command's more spawn-like
interface.
2. We do _not_ automatically waitpid() on any
clean_on_exit children. For dashed externals this makes
sense; we know that the parent is doing nothing but
waiting for the child to exit anyway. But with other
children, it's possible that the child, after getting
the signal, could be waiting on the parent to do
something (like closing a descriptor). If we were to
wait on such a child, we'd end up in a deadlock. So
this errs on the side of caution, and lets callers
enable the feature explicitly.
3. When we send children the cleanup signal, we send all
the signals first, before waiting on any children. This
is to avoid the case where one child might be waiting
on another one to exit, causing a deadlock. We inform
all of them that it's time to die before reaping any.
In practice, there is only ever one dashed external run
from a given process, so this doesn't matter much now.
But it future-proofs us if other callers start using
the wait_after_clean mechanism.
There's no automated test here, because it would end up racy
and unportable. But it's easy to reproduce the situation by
running the log command given above and hitting ^C.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-07 09:22:23 +08:00
|
|
|
unsigned wait_after_clean:1;
|
2016-10-17 07:20:28 +08:00
|
|
|
void (*clean_on_exit_handler)(struct child_process *process);
|
2007-03-10 16:28:00 +08:00
|
|
|
};
|
|
|
|
|
2021-07-01 18:51:25 +08:00
|
|
|
#define CHILD_PROCESS_INIT { \
|
|
|
|
.args = STRVEC_INIT, \
|
2022-06-02 17:09:50 +08:00
|
|
|
.env = STRVEC_INIT, \
|
2021-07-01 18:51:25 +08:00
|
|
|
}
|
2019-11-18 05:04:55 +08:00
|
|
|
|
|
|
|
/**
|
2022-10-30 19:55:06 +08:00
|
|
|
* The functions: start_command, finish_command, run_command do the following:
|
2019-11-18 05:04:55 +08:00
|
|
|
*
|
|
|
|
* - If a system call failed, errno is set and -1 is returned. A diagnostic
|
|
|
|
* is printed.
|
|
|
|
*
|
|
|
|
* - If the program was not found, then -1 is returned and errno is set to
|
|
|
|
* ENOENT; a diagnostic is printed only if .silent_exec_failure is 0.
|
|
|
|
*
|
|
|
|
* - Otherwise, the program is run. If it terminates regularly, its exit
|
|
|
|
* code is returned. No diagnostic is printed, even if the exit code is
|
|
|
|
* non-zero.
|
|
|
|
*
|
|
|
|
* - If the program terminated due to a signal, then the return value is the
|
|
|
|
* signal number + 128, ie. the same value that a POSIX shell's $? would
|
|
|
|
* report. A diagnostic is printed.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Initialize a struct child_process variable.
|
|
|
|
*/
|
2014-08-20 03:10:48 +08:00
|
|
|
void child_process_init(struct child_process *);
|
2019-11-18 05:04:55 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Release the memory associated with the struct child_process.
|
|
|
|
* Most users of the run-command API don't need to call this
|
|
|
|
* function explicitly because `start_command` invokes it on
|
|
|
|
* failure and `finish_command` calls it automatically already.
|
|
|
|
*/
|
2015-10-24 20:11:27 +08:00
|
|
|
void child_process_clear(struct child_process *);
|
2019-11-18 05:04:55 +08:00
|
|
|
|
2019-04-29 16:28:14 +08:00
|
|
|
int is_executable(const char *name);
|
2014-08-20 03:09:35 +08:00
|
|
|
|
2021-09-14 01:39:01 +08:00
|
|
|
/**
|
|
|
|
* Check if the command exists on $PATH. This emulates the path search that
|
|
|
|
* execvp would perform, without actually executing the command so it
|
|
|
|
* can be used before fork() to prepare to run a command using
|
|
|
|
* execve() or after execvp() to diagnose why it failed.
|
|
|
|
*
|
|
|
|
* The caller should ensure that command contains no directory separators.
|
|
|
|
*
|
|
|
|
* Returns 1 if it is found in $PATH or 0 if the command could not be found.
|
|
|
|
*/
|
|
|
|
int exists_in_PATH(const char *command);
|
|
|
|
|
2019-11-18 05:04:55 +08:00
|
|
|
/**
|
|
|
|
* Start a sub-process. Takes a pointer to a `struct child_process`
|
|
|
|
* that specifies the details and returns pipe FDs (if requested).
|
|
|
|
* See below for details.
|
|
|
|
*/
|
2007-03-10 16:28:05 +08:00
|
|
|
int start_command(struct child_process *);
|
2019-11-18 05:04:55 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Wait for the completion of a sub-process that was started with
|
|
|
|
* start_command().
|
|
|
|
*/
|
2007-03-10 16:28:05 +08:00
|
|
|
int finish_command(struct child_process *);
|
2019-11-18 05:04:55 +08:00
|
|
|
|
pager: don't use unsafe functions in signal handlers
Since the commit a3da8821208d (pager: do wait_for_pager on signal
death), we call wait_for_pager() in the pager's signal handler. The
recent bug report revealed that this causes a deadlock in glibc at
aborting "git log" [*1*]. When this happens, git process is left
unterminated, and it can't be killed by SIGTERM but only by SIGKILL.
The problem is that wait_for_pager() function does more than waiting
for pager process's termination, but it does cleanups and printing
errors. Unfortunately, the functions that may be used in a signal
handler are very limited [*2*]. Particularly, malloc(), free() and the
variants can't be used in a signal handler because they take a mutex
internally in glibc. This was the cause of the deadlock above. Other
than the direct calls of malloc/free, many functions calling
malloc/free can't be used. strerror() is such one, either.
Also the usage of fflush() and printf() in a signal handler is bad,
although it seems working so far. In a safer side, we should avoid
them, too.
This patch tries to reduce the calls of such functions in signal
handlers. wait_for_signal() takes a flag and avoids the unsafe
calls. Also, finish_command_in_signal() is introduced for the
same reason. There the free() calls are removed, and only waits for
the children without whining at errors.
[*1*] https://bugzilla.opensuse.org/show_bug.cgi?id=942297
[*2*] http://pubs.opengroup.org/onlinepubs/9699919799/functions/V2_chap02.html#tag_15_04_03
Signed-off-by: Takashi Iwai <tiwai@suse.de>
Reviewed-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-09-04 17:35:57 +08:00
|
|
|
int finish_command_in_signal(struct child_process *);
|
2019-11-18 05:04:55 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* A convenience function that encapsulates a sequence of
|
|
|
|
* start_command() followed by finish_command(). Takes a pointer
|
|
|
|
* to a `struct child_process` that specifies the details.
|
|
|
|
*/
|
2007-03-10 16:28:00 +08:00
|
|
|
int run_command(struct child_process *);
|
|
|
|
|
auto-gc: extract a reusable helper from "git fetch"
Back in 1991006c (fetch: convert argv_gc_auto to struct argv_array,
2014-08-16), we taught "git fetch --quiet" to pass the "--quiet"
option down to "gc --auto". This issue, however, is not limited to
"fetch":
$ git grep -e 'gc.*--auto' \*.c
finds hits in "am", "commit", "merge", and "rebase" and these
commands do not pass "--quiet" down to "gc --auto" when they
themselves are told to be quiet.
As a preparatory step, let's introduce a helper function
run_auto_gc(), that the caller can pass a boolean "quiet",
and redo the fix to "git fetch" using the helper.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
Reviewed-by: Taylor Blau <me@ttaylorr.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-05-07 04:18:29 +08:00
|
|
|
/*
|
|
|
|
* Trigger an auto-gc
|
|
|
|
*/
|
2020-09-18 02:11:44 +08:00
|
|
|
int run_auto_maintenance(int quiet);
|
auto-gc: extract a reusable helper from "git fetch"
Back in 1991006c (fetch: convert argv_gc_auto to struct argv_array,
2014-08-16), we taught "git fetch --quiet" to pass the "--quiet"
option down to "gc --auto". This issue, however, is not limited to
"fetch":
$ git grep -e 'gc.*--auto' \*.c
finds hits in "am", "commit", "merge", and "rebase" and these
commands do not pass "--quiet" down to "gc --auto" when they
themselves are told to be quiet.
As a preparatory step, let's introduce a helper function
run_auto_gc(), that the caller can pass a boolean "quiet",
and redo the fix to "git fetch" using the helper.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
Reviewed-by: Taylor Blau <me@ttaylorr.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-05-07 04:18:29 +08:00
|
|
|
|
run-command: introduce capture_command helper
Something as simple as reading the stdout from a command
turns out to be rather hard to do right. Doing:
cmd.out = -1;
run_command(&cmd);
strbuf_read(&buf, cmd.out, 0);
can result in deadlock if the child process produces a large
amount of output. What happens is:
1. The parent spawns the child with its stdout connected
to a pipe, of which the parent is the sole reader.
2. The parent calls wait(), blocking until the child exits.
3. The child writes to stdout. If it writes more data than
the OS pipe buffer can hold, the write() call will
block.
This is a deadlock; the parent is waiting for the child to
exit, and the child is waiting for the parent to call
read().
So we might try instead:
start_command(&cmd);
strbuf_read(&buf, cmd.out, 0);
finish_command(&cmd);
But that is not quite right either. We are examining cmd.out
and running finish_command whether start_command succeeded
or not, which is wrong. Moreover, these snippets do not do
any error handling. If our read() fails, we must make sure
to still call finish_command (to reap the child process).
And both snippets failed to close the cmd.out descriptor,
which they must do (provided start_command succeeded).
Let's introduce a run-command helper that can make this a
bit simpler for callers to get right.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-23 11:53:43 +08:00
|
|
|
/**
|
2016-06-18 07:38:47 +08:00
|
|
|
* Execute the given command, sending "in" to its stdin, and capturing its
|
|
|
|
* stdout and stderr in the "out" and "err" strbufs. Any of the three may
|
|
|
|
* be NULL to skip processing.
|
|
|
|
*
|
run-command: introduce capture_command helper
Something as simple as reading the stdout from a command
turns out to be rather hard to do right. Doing:
cmd.out = -1;
run_command(&cmd);
strbuf_read(&buf, cmd.out, 0);
can result in deadlock if the child process produces a large
amount of output. What happens is:
1. The parent spawns the child with its stdout connected
to a pipe, of which the parent is the sole reader.
2. The parent calls wait(), blocking until the child exits.
3. The child writes to stdout. If it writes more data than
the OS pipe buffer can hold, the write() call will
block.
This is a deadlock; the parent is waiting for the child to
exit, and the child is waiting for the parent to call
read().
So we might try instead:
start_command(&cmd);
strbuf_read(&buf, cmd.out, 0);
finish_command(&cmd);
But that is not quite right either. We are examining cmd.out
and running finish_command whether start_command succeeded
or not, which is wrong. Moreover, these snippets do not do
any error handling. If our read() fails, we must make sure
to still call finish_command (to reap the child process).
And both snippets failed to close the cmd.out descriptor,
which they must do (provided start_command succeeded).
Let's introduce a run-command helper that can make this a
bit simpler for callers to get right.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-23 11:53:43 +08:00
|
|
|
* Returns -1 if starting the command fails or reading fails, and otherwise
|
2016-06-18 07:38:47 +08:00
|
|
|
* returns the exit code of the command. Any output collected in the
|
|
|
|
* buffers is kept even if the command returns a non-zero exit. The hint fields
|
|
|
|
* gives starting sizes for the strbuf allocations.
|
run-command: introduce capture_command helper
Something as simple as reading the stdout from a command
turns out to be rather hard to do right. Doing:
cmd.out = -1;
run_command(&cmd);
strbuf_read(&buf, cmd.out, 0);
can result in deadlock if the child process produces a large
amount of output. What happens is:
1. The parent spawns the child with its stdout connected
to a pipe, of which the parent is the sole reader.
2. The parent calls wait(), blocking until the child exits.
3. The child writes to stdout. If it writes more data than
the OS pipe buffer can hold, the write() call will
block.
This is a deadlock; the parent is waiting for the child to
exit, and the child is waiting for the parent to call
read().
So we might try instead:
start_command(&cmd);
strbuf_read(&buf, cmd.out, 0);
finish_command(&cmd);
But that is not quite right either. We are examining cmd.out
and running finish_command whether start_command succeeded
or not, which is wrong. Moreover, these snippets do not do
any error handling. If our read() fails, we must make sure
to still call finish_command (to reap the child process).
And both snippets failed to close the cmd.out descriptor,
which they must do (provided start_command succeeded).
Let's introduce a run-command helper that can make this a
bit simpler for callers to get right.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-23 11:53:43 +08:00
|
|
|
*
|
|
|
|
* The fields of "cmd" should be set up as they would for a normal run_command
|
2016-06-18 07:38:47 +08:00
|
|
|
* invocation. But note that there is no need to set the in, out, or err
|
|
|
|
* fields; pipe_command handles that automatically.
|
|
|
|
*/
|
|
|
|
int pipe_command(struct child_process *cmd,
|
|
|
|
const char *in, size_t in_len,
|
|
|
|
struct strbuf *out, size_t out_hint,
|
|
|
|
struct strbuf *err, size_t err_hint);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Convenience wrapper around pipe_command for the common case
|
|
|
|
* of capturing only stdout.
|
run-command: introduce capture_command helper
Something as simple as reading the stdout from a command
turns out to be rather hard to do right. Doing:
cmd.out = -1;
run_command(&cmd);
strbuf_read(&buf, cmd.out, 0);
can result in deadlock if the child process produces a large
amount of output. What happens is:
1. The parent spawns the child with its stdout connected
to a pipe, of which the parent is the sole reader.
2. The parent calls wait(), blocking until the child exits.
3. The child writes to stdout. If it writes more data than
the OS pipe buffer can hold, the write() call will
block.
This is a deadlock; the parent is waiting for the child to
exit, and the child is waiting for the parent to call
read().
So we might try instead:
start_command(&cmd);
strbuf_read(&buf, cmd.out, 0);
finish_command(&cmd);
But that is not quite right either. We are examining cmd.out
and running finish_command whether start_command succeeded
or not, which is wrong. Moreover, these snippets do not do
any error handling. If our read() fails, we must make sure
to still call finish_command (to reap the child process).
And both snippets failed to close the cmd.out descriptor,
which they must do (provided start_command succeeded).
Let's introduce a run-command helper that can make this a
bit simpler for callers to get right.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-23 11:53:43 +08:00
|
|
|
*/
|
2016-06-18 07:38:47 +08:00
|
|
|
static inline int capture_command(struct child_process *cmd,
|
|
|
|
struct strbuf *out,
|
|
|
|
size_t hint)
|
|
|
|
{
|
|
|
|
return pipe_command(cmd, NULL, 0, out, hint, NULL, 0);
|
|
|
|
}
|
run-command: introduce capture_command helper
Something as simple as reading the stdout from a command
turns out to be rather hard to do right. Doing:
cmd.out = -1;
run_command(&cmd);
strbuf_read(&buf, cmd.out, 0);
can result in deadlock if the child process produces a large
amount of output. What happens is:
1. The parent spawns the child with its stdout connected
to a pipe, of which the parent is the sole reader.
2. The parent calls wait(), blocking until the child exits.
3. The child writes to stdout. If it writes more data than
the OS pipe buffer can hold, the write() call will
block.
This is a deadlock; the parent is waiting for the child to
exit, and the child is waiting for the parent to call
read().
So we might try instead:
start_command(&cmd);
strbuf_read(&buf, cmd.out, 0);
finish_command(&cmd);
But that is not quite right either. We are examining cmd.out
and running finish_command whether start_command succeeded
or not, which is wrong. Moreover, these snippets do not do
any error handling. If our read() fails, we must make sure
to still call finish_command (to reap the child process).
And both snippets failed to close the cmd.out descriptor,
which they must do (provided start_command succeeded).
Let's introduce a run-command helper that can make this a
bit simpler for callers to get right.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-23 11:53:43 +08:00
|
|
|
|
2007-10-20 03:48:00 +08:00
|
|
|
/*
|
|
|
|
* The purpose of the following functions is to feed a pipe by running
|
|
|
|
* a function asynchronously and providing output that the caller reads.
|
|
|
|
*
|
|
|
|
* It is expected that no synchronization and mutual exclusion between
|
|
|
|
* the caller and the feed function is necessary so that the function
|
|
|
|
* can run in a thread without interfering with the caller.
|
2019-11-18 05:04:55 +08:00
|
|
|
*
|
|
|
|
* The caller:
|
|
|
|
*
|
|
|
|
* 1. allocates and clears (memset(&asy, 0, sizeof(asy));) a
|
|
|
|
* struct async variable;
|
|
|
|
* 2. initializes .proc and .data;
|
|
|
|
* 3. calls start_async();
|
|
|
|
* 4. processes communicates with proc through .in and .out;
|
|
|
|
* 5. closes .in and .out;
|
|
|
|
* 6. calls finish_async().
|
|
|
|
*
|
|
|
|
* There are serious restrictions on what the asynchronous function can do
|
|
|
|
* because this facility is implemented by a thread in the same address
|
|
|
|
* space on most platforms (when pthreads is available), but by a pipe to
|
|
|
|
* a forked process otherwise:
|
|
|
|
*
|
|
|
|
* - It cannot change the program's state (global variables, environment,
|
|
|
|
* etc.) in a way that the caller notices; in other words, .in and .out
|
|
|
|
* are the only communication channels to the caller.
|
|
|
|
*
|
|
|
|
* - It must not change the program's state that the caller of the
|
|
|
|
* facility also uses.
|
|
|
|
*
|
2007-10-20 03:48:00 +08:00
|
|
|
*/
|
|
|
|
struct async {
|
2019-11-18 05:04:55 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* The function pointer in .proc has the following signature:
|
|
|
|
*
|
|
|
|
* int proc(int in, int out, void *data);
|
|
|
|
*
|
|
|
|
* - in, out specifies a set of file descriptors to which the function
|
|
|
|
* must read/write the data that it needs/produces. The function
|
|
|
|
* *must* close these descriptors before it returns. A descriptor
|
|
|
|
* may be -1 if the caller did not configure a descriptor for that
|
|
|
|
* direction.
|
|
|
|
*
|
|
|
|
* - data is the value that the caller has specified in the .data member
|
|
|
|
* of struct async.
|
|
|
|
*
|
|
|
|
* - The return value of the function is 0 on success and non-zero
|
|
|
|
* on failure. If the function indicates failure, finish_async() will
|
|
|
|
* report failure as well.
|
|
|
|
*
|
2007-10-20 03:48:00 +08:00
|
|
|
*/
|
2010-02-06 04:57:38 +08:00
|
|
|
int (*proc)(int in, int out, void *data);
|
2019-11-18 05:04:55 +08:00
|
|
|
|
2007-10-20 03:48:00 +08:00
|
|
|
void *data;
|
2019-11-18 05:04:55 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* The members .in, .out are used to provide a set of fd's for
|
|
|
|
* communication between the caller and the callee as follows:
|
|
|
|
*
|
|
|
|
* - Specify 0 to have no file descriptor passed. The callee will
|
|
|
|
* receive -1 in the corresponding argument.
|
|
|
|
*
|
|
|
|
* - Specify < 0 to have a pipe allocated; start_async() replaces
|
|
|
|
* with the pipe FD in the following way:
|
|
|
|
*
|
|
|
|
* .in: Returns the writable pipe end into which the caller
|
|
|
|
* writes; the readable end of the pipe becomes the function's
|
|
|
|
* in argument.
|
|
|
|
*
|
|
|
|
* .out: Returns the readable pipe end from which the caller
|
|
|
|
* reads; the writable end of the pipe becomes the function's
|
|
|
|
* out argument.
|
|
|
|
*
|
|
|
|
* The caller of start_async() must close the returned FDs after it
|
|
|
|
* has completed reading from/writing from them.
|
|
|
|
*
|
|
|
|
* - Specify a file descriptor > 0 to be used by the function:
|
|
|
|
*
|
|
|
|
* .in: The FD must be readable; it becomes the function's in.
|
|
|
|
* .out: The FD must be writable; it becomes the function's out.
|
|
|
|
*
|
|
|
|
* The specified FD is closed by start_async(), even if it fails to
|
|
|
|
* run the function.
|
|
|
|
*/
|
2010-02-06 04:57:38 +08:00
|
|
|
int in; /* caller writes here and closes it */
|
2007-10-20 03:48:00 +08:00
|
|
|
int out; /* caller reads from here and closes it */
|
2010-03-10 04:00:36 +08:00
|
|
|
#ifdef NO_PTHREADS
|
2007-10-20 03:48:00 +08:00
|
|
|
pid_t pid;
|
2007-12-09 05:19:14 +08:00
|
|
|
#else
|
2010-03-06 23:40:42 +08:00
|
|
|
pthread_t tid;
|
2010-02-06 04:57:38 +08:00
|
|
|
int proc_in;
|
|
|
|
int proc_out;
|
2007-12-09 05:19:14 +08:00
|
|
|
#endif
|
run-command: teach async threads to ignore SIGPIPE
Async processes can be implemented as separate forked
processes, or as threads (depending on the NO_PTHREADS
setting). In the latter case, if an async thread gets
SIGPIPE, it takes down the whole process. This is obviously
bad if the main process was not otherwise going to die, but
even if we were going to die, it means the main process does
not have a chance to report a useful error message.
There's also the small matter that forked async processes
will not take the main process down on a signal, meaning git
will behave differently depending on the NO_PTHREADS
setting.
This patch fixes it by adding a new flag to "struct async"
to block SIGPIPE just in the async thread. In theory, this
should always be on (which makes async threads behave more
like async processes), but we would first want to make sure
that each async process we spawn is careful about checking
return codes from write() and would not spew endlessly into
a dead pipe. So let's start with it as optional, and we can
enable it for specific sites in future patches.
The natural name for this option would be "ignore_sigpipe",
since that's what it does for the threaded case. But since
that name might imply that we are ignoring it in all cases
(including the separate-process one), let's call it
"isolate_sigpipe". What we are really asking for is
isolation. I.e., not to have our main process taken down by
signals spawned by the async process. How that is
implemented is up to the run-command code.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-04-20 06:49:41 +08:00
|
|
|
int isolate_sigpipe;
|
2007-10-20 03:48:00 +08:00
|
|
|
};
|
|
|
|
|
2019-11-18 05:04:55 +08:00
|
|
|
/**
|
|
|
|
* Run a function asynchronously. Takes a pointer to a `struct
|
|
|
|
* async` that specifies the details and returns a set of pipe FDs
|
|
|
|
* for communication with the function. See below for details.
|
|
|
|
*/
|
2007-10-20 03:48:00 +08:00
|
|
|
int start_async(struct async *async);
|
2019-11-18 05:04:55 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Wait for the completion of an asynchronous function that was
|
|
|
|
* started with start_async().
|
|
|
|
*/
|
2007-10-20 03:48:00 +08:00
|
|
|
int finish_async(struct async *async);
|
2019-11-18 05:04:55 +08:00
|
|
|
|
2015-09-02 04:22:43 +08:00
|
|
|
int in_async(void);
|
2018-11-03 16:48:39 +08:00
|
|
|
int async_with_fork(void);
|
2016-10-17 07:20:27 +08:00
|
|
|
void check_pipe(int err);
|
2007-10-20 03:48:00 +08:00
|
|
|
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 08:04:10 +08:00
|
|
|
/**
|
|
|
|
* This callback should initialize the child process and preload the
|
|
|
|
* error channel if desired. The preloading of is useful if you want to
|
|
|
|
* have a message printed directly before the output of the child process.
|
|
|
|
* pp_cb is the callback cookie as passed to run_processes_parallel.
|
|
|
|
* You can store a child process specific callback cookie in pp_task_cb.
|
|
|
|
*
|
run-command: add an "ungroup" option to run_process_parallel()
Extend the parallel execution API added in c553c72eed6 (run-command:
add an asynchronous parallel child processor, 2015-12-15) to support a
mode where the stdout and stderr of the processes isn't captured and
output in a deterministic order, instead we'll leave it to the kernel
and stdio to sort it out.
This gives the API same functionality as GNU parallel's --ungroup
option. As we'll see in a subsequent commit the main reason to want
this is to support stdout and stderr being connected to the TTY in the
case of jobs=1, demonstrated here with GNU parallel:
$ parallel --ungroup 'test -t {} && echo TTY || echo NTTY' ::: 1 2
TTY
TTY
$ parallel 'test -t {} && echo TTY || echo NTTY' ::: 1 2
NTTY
NTTY
Another is as GNU parallel's documentation notes a potential for
optimization. As demonstrated in next commit our results with "git
hook run" will be similar, but generally speaking this shows that if
you want to run processes in parallel where the exact order isn't
important this can be a lot faster:
$ hyperfine -r 3 -L o ,--ungroup 'parallel {o} seq ::: 10000000 >/dev/null '
Benchmark 1: parallel seq ::: 10000000 >/dev/null
Time (mean ± σ): 220.2 ms ± 9.3 ms [User: 124.9 ms, System: 96.1 ms]
Range (min … max): 212.3 ms … 230.5 ms 3 runs
Benchmark 2: parallel --ungroup seq ::: 10000000 >/dev/null
Time (mean ± σ): 154.7 ms ± 0.9 ms [User: 136.2 ms, System: 25.1 ms]
Range (min … max): 153.9 ms … 155.7 ms 3 runs
Summary
'parallel --ungroup seq ::: 10000000 >/dev/null ' ran
1.42 ± 0.06 times faster than 'parallel seq ::: 10000000 >/dev/null '
A large part of the juggling in the API is to make the API safer for
its maintenance and consumers alike.
For the maintenance of the API we e.g. avoid malloc()-ing the
"pp->pfd", ensuring that SANITIZE=address and other similar tools will
catch any unexpected misuse.
For API consumers we take pains to never pass the non-NULL "out"
buffer to an API user that provided the "ungroup" option. The
resulting code in t/helper/test-run-command.c isn't typical of such a
user, i.e. they'd typically use one mode or the other, and would know
whether they'd provided "ungroup" or not.
We could also avoid the strbuf_init() for "buffered_output" by having
"struct parallel_processes" use a static PARALLEL_PROCESSES_INIT
initializer, but let's leave that cleanup for later.
Using a global "run_processes_parallel_ungroup" variable to enable
this option is rather nasty, but is being done here to produce as
minimal of a change as possible for a subsequent regression fix. This
change is extracted from a larger initial version[1] which ends up
with a better end-state for the API, but in doing so needed to modify
all existing callers of the API. Let's defer that for now, and
narrowly focus on what we need for fixing the regression in the
subsequent commit.
It's safe to do this with a global variable because:
A) hook.c is the only user of it that sets it to non-zero, and before
we'll get any other API users we'll refactor away this method of
passing in the option, i.e. re-roll [1].
B) Even if hook.c wasn't the only user we don't have callers of this
API that concurrently invoke this parallel process starting API
itself in parallel.
As noted above "A" && "B" are rather nasty, and we don't want to live
with those caveats long-term, but for now they should be an acceptable
compromise.
1. https://lore.kernel.org/git/cover-v2-0.8-00000000000-20220518T195858Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-07 16:48:19 +08:00
|
|
|
* See run_processes_parallel() below for a discussion of the "struct
|
|
|
|
* strbuf *out" parameter.
|
|
|
|
*
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 08:04:10 +08:00
|
|
|
* Even after returning 0 to indicate that there are no more processes,
|
|
|
|
* this function will be called again until there are no more running
|
|
|
|
* child processes.
|
|
|
|
*
|
|
|
|
* Return 1 if the next child is ready to run.
|
|
|
|
* Return 0 if there are currently no more tasks to be processed.
|
|
|
|
* To send a signal to other child processes for abortion,
|
|
|
|
* return the negative signal number.
|
|
|
|
*/
|
|
|
|
typedef int (*get_next_task_fn)(struct child_process *cp,
|
2016-03-01 10:07:16 +08:00
|
|
|
struct strbuf *out,
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 08:04:10 +08:00
|
|
|
void *pp_cb,
|
|
|
|
void **pp_task_cb);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This callback is called whenever there are problems starting
|
|
|
|
* a new process.
|
|
|
|
*
|
run-command: add an "ungroup" option to run_process_parallel()
Extend the parallel execution API added in c553c72eed6 (run-command:
add an asynchronous parallel child processor, 2015-12-15) to support a
mode where the stdout and stderr of the processes isn't captured and
output in a deterministic order, instead we'll leave it to the kernel
and stdio to sort it out.
This gives the API same functionality as GNU parallel's --ungroup
option. As we'll see in a subsequent commit the main reason to want
this is to support stdout and stderr being connected to the TTY in the
case of jobs=1, demonstrated here with GNU parallel:
$ parallel --ungroup 'test -t {} && echo TTY || echo NTTY' ::: 1 2
TTY
TTY
$ parallel 'test -t {} && echo TTY || echo NTTY' ::: 1 2
NTTY
NTTY
Another is as GNU parallel's documentation notes a potential for
optimization. As demonstrated in next commit our results with "git
hook run" will be similar, but generally speaking this shows that if
you want to run processes in parallel where the exact order isn't
important this can be a lot faster:
$ hyperfine -r 3 -L o ,--ungroup 'parallel {o} seq ::: 10000000 >/dev/null '
Benchmark 1: parallel seq ::: 10000000 >/dev/null
Time (mean ± σ): 220.2 ms ± 9.3 ms [User: 124.9 ms, System: 96.1 ms]
Range (min … max): 212.3 ms … 230.5 ms 3 runs
Benchmark 2: parallel --ungroup seq ::: 10000000 >/dev/null
Time (mean ± σ): 154.7 ms ± 0.9 ms [User: 136.2 ms, System: 25.1 ms]
Range (min … max): 153.9 ms … 155.7 ms 3 runs
Summary
'parallel --ungroup seq ::: 10000000 >/dev/null ' ran
1.42 ± 0.06 times faster than 'parallel seq ::: 10000000 >/dev/null '
A large part of the juggling in the API is to make the API safer for
its maintenance and consumers alike.
For the maintenance of the API we e.g. avoid malloc()-ing the
"pp->pfd", ensuring that SANITIZE=address and other similar tools will
catch any unexpected misuse.
For API consumers we take pains to never pass the non-NULL "out"
buffer to an API user that provided the "ungroup" option. The
resulting code in t/helper/test-run-command.c isn't typical of such a
user, i.e. they'd typically use one mode or the other, and would know
whether they'd provided "ungroup" or not.
We could also avoid the strbuf_init() for "buffered_output" by having
"struct parallel_processes" use a static PARALLEL_PROCESSES_INIT
initializer, but let's leave that cleanup for later.
Using a global "run_processes_parallel_ungroup" variable to enable
this option is rather nasty, but is being done here to produce as
minimal of a change as possible for a subsequent regression fix. This
change is extracted from a larger initial version[1] which ends up
with a better end-state for the API, but in doing so needed to modify
all existing callers of the API. Let's defer that for now, and
narrowly focus on what we need for fixing the regression in the
subsequent commit.
It's safe to do this with a global variable because:
A) hook.c is the only user of it that sets it to non-zero, and before
we'll get any other API users we'll refactor away this method of
passing in the option, i.e. re-roll [1].
B) Even if hook.c wasn't the only user we don't have callers of this
API that concurrently invoke this parallel process starting API
itself in parallel.
As noted above "A" && "B" are rather nasty, and we don't want to live
with those caveats long-term, but for now they should be an acceptable
compromise.
1. https://lore.kernel.org/git/cover-v2-0.8-00000000000-20220518T195858Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-07 16:48:19 +08:00
|
|
|
* See run_processes_parallel() below for a discussion of the "struct
|
|
|
|
* strbuf *out" parameter.
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 08:04:10 +08:00
|
|
|
*
|
|
|
|
* pp_cb is the callback cookie as passed into run_processes_parallel,
|
|
|
|
* pp_task_cb is the callback cookie as passed into get_next_task_fn.
|
|
|
|
*
|
|
|
|
* Return 0 to continue the parallel processing. To abort return non zero.
|
|
|
|
* To send a signal to other child processes for abortion, return
|
|
|
|
* the negative signal number.
|
|
|
|
*/
|
2016-03-01 10:07:16 +08:00
|
|
|
typedef int (*start_failure_fn)(struct strbuf *out,
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 08:04:10 +08:00
|
|
|
void *pp_cb,
|
|
|
|
void *pp_task_cb);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This callback is called on every child process that finished processing.
|
|
|
|
*
|
run-command: add an "ungroup" option to run_process_parallel()
Extend the parallel execution API added in c553c72eed6 (run-command:
add an asynchronous parallel child processor, 2015-12-15) to support a
mode where the stdout and stderr of the processes isn't captured and
output in a deterministic order, instead we'll leave it to the kernel
and stdio to sort it out.
This gives the API same functionality as GNU parallel's --ungroup
option. As we'll see in a subsequent commit the main reason to want
this is to support stdout and stderr being connected to the TTY in the
case of jobs=1, demonstrated here with GNU parallel:
$ parallel --ungroup 'test -t {} && echo TTY || echo NTTY' ::: 1 2
TTY
TTY
$ parallel 'test -t {} && echo TTY || echo NTTY' ::: 1 2
NTTY
NTTY
Another is as GNU parallel's documentation notes a potential for
optimization. As demonstrated in next commit our results with "git
hook run" will be similar, but generally speaking this shows that if
you want to run processes in parallel where the exact order isn't
important this can be a lot faster:
$ hyperfine -r 3 -L o ,--ungroup 'parallel {o} seq ::: 10000000 >/dev/null '
Benchmark 1: parallel seq ::: 10000000 >/dev/null
Time (mean ± σ): 220.2 ms ± 9.3 ms [User: 124.9 ms, System: 96.1 ms]
Range (min … max): 212.3 ms … 230.5 ms 3 runs
Benchmark 2: parallel --ungroup seq ::: 10000000 >/dev/null
Time (mean ± σ): 154.7 ms ± 0.9 ms [User: 136.2 ms, System: 25.1 ms]
Range (min … max): 153.9 ms … 155.7 ms 3 runs
Summary
'parallel --ungroup seq ::: 10000000 >/dev/null ' ran
1.42 ± 0.06 times faster than 'parallel seq ::: 10000000 >/dev/null '
A large part of the juggling in the API is to make the API safer for
its maintenance and consumers alike.
For the maintenance of the API we e.g. avoid malloc()-ing the
"pp->pfd", ensuring that SANITIZE=address and other similar tools will
catch any unexpected misuse.
For API consumers we take pains to never pass the non-NULL "out"
buffer to an API user that provided the "ungroup" option. The
resulting code in t/helper/test-run-command.c isn't typical of such a
user, i.e. they'd typically use one mode or the other, and would know
whether they'd provided "ungroup" or not.
We could also avoid the strbuf_init() for "buffered_output" by having
"struct parallel_processes" use a static PARALLEL_PROCESSES_INIT
initializer, but let's leave that cleanup for later.
Using a global "run_processes_parallel_ungroup" variable to enable
this option is rather nasty, but is being done here to produce as
minimal of a change as possible for a subsequent regression fix. This
change is extracted from a larger initial version[1] which ends up
with a better end-state for the API, but in doing so needed to modify
all existing callers of the API. Let's defer that for now, and
narrowly focus on what we need for fixing the regression in the
subsequent commit.
It's safe to do this with a global variable because:
A) hook.c is the only user of it that sets it to non-zero, and before
we'll get any other API users we'll refactor away this method of
passing in the option, i.e. re-roll [1].
B) Even if hook.c wasn't the only user we don't have callers of this
API that concurrently invoke this parallel process starting API
itself in parallel.
As noted above "A" && "B" are rather nasty, and we don't want to live
with those caveats long-term, but for now they should be an acceptable
compromise.
1. https://lore.kernel.org/git/cover-v2-0.8-00000000000-20220518T195858Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-07 16:48:19 +08:00
|
|
|
* See run_processes_parallel() below for a discussion of the "struct
|
|
|
|
* strbuf *out" parameter.
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 08:04:10 +08:00
|
|
|
*
|
|
|
|
* pp_cb is the callback cookie as passed into run_processes_parallel,
|
|
|
|
* pp_task_cb is the callback cookie as passed into get_next_task_fn.
|
|
|
|
*
|
|
|
|
* Return 0 to continue the parallel processing. To abort return non zero.
|
|
|
|
* To send a signal to other child processes for abortion, return
|
|
|
|
* the negative signal number.
|
|
|
|
*/
|
|
|
|
typedef int (*task_finished_fn)(int result,
|
2016-03-01 10:07:16 +08:00
|
|
|
struct strbuf *out,
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 08:04:10 +08:00
|
|
|
void *pp_cb,
|
|
|
|
void *pp_task_cb);
|
|
|
|
|
|
|
|
/**
|
2022-10-13 05:02:26 +08:00
|
|
|
* Option used by run_processes_parallel(), { 0 }-initialized means no
|
|
|
|
* options.
|
|
|
|
*/
|
|
|
|
struct run_process_parallel_opts
|
|
|
|
{
|
|
|
|
/**
|
|
|
|
* tr2_category & tr2_label: sets the trace2 category and label for
|
|
|
|
* logging. These must either be unset, or both of them must be set.
|
|
|
|
*/
|
|
|
|
const char *tr2_category;
|
|
|
|
const char *tr2_label;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* processes: see 'processes' in run_processes_parallel() below.
|
|
|
|
*/
|
|
|
|
size_t processes;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ungroup: see 'ungroup' in run_processes_parallel() below.
|
|
|
|
*/
|
|
|
|
unsigned int ungroup:1;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* get_next_task: See get_next_task_fn() above. This must be
|
|
|
|
* specified.
|
|
|
|
*/
|
|
|
|
get_next_task_fn get_next_task;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* start_failure: See start_failure_fn() above. This can be
|
|
|
|
* NULL to omit any special handling.
|
|
|
|
*/
|
|
|
|
start_failure_fn start_failure;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* task_finished: See task_finished_fn() above. This can be
|
|
|
|
* NULL to omit any special handling.
|
|
|
|
*/
|
|
|
|
task_finished_fn task_finished;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* data: user data, will be passed as "pp_cb" to the callback
|
|
|
|
* parameters.
|
|
|
|
*/
|
|
|
|
void *data;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Options are passed via the "struct run_process_parallel_opts" above.
|
|
|
|
*
|
|
|
|
* Runs N 'processes' at the same time. Whenever a process can be
|
|
|
|
* started, the callback opts.get_next_task is called to obtain the data
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 08:04:10 +08:00
|
|
|
* required to start another child process.
|
|
|
|
*
|
|
|
|
* The children started via this function run in parallel. Their output
|
|
|
|
* (both stdout and stderr) is routed to stderr in a manner that output
|
run-command: add an "ungroup" option to run_process_parallel()
Extend the parallel execution API added in c553c72eed6 (run-command:
add an asynchronous parallel child processor, 2015-12-15) to support a
mode where the stdout and stderr of the processes isn't captured and
output in a deterministic order, instead we'll leave it to the kernel
and stdio to sort it out.
This gives the API same functionality as GNU parallel's --ungroup
option. As we'll see in a subsequent commit the main reason to want
this is to support stdout and stderr being connected to the TTY in the
case of jobs=1, demonstrated here with GNU parallel:
$ parallel --ungroup 'test -t {} && echo TTY || echo NTTY' ::: 1 2
TTY
TTY
$ parallel 'test -t {} && echo TTY || echo NTTY' ::: 1 2
NTTY
NTTY
Another is as GNU parallel's documentation notes a potential for
optimization. As demonstrated in next commit our results with "git
hook run" will be similar, but generally speaking this shows that if
you want to run processes in parallel where the exact order isn't
important this can be a lot faster:
$ hyperfine -r 3 -L o ,--ungroup 'parallel {o} seq ::: 10000000 >/dev/null '
Benchmark 1: parallel seq ::: 10000000 >/dev/null
Time (mean ± σ): 220.2 ms ± 9.3 ms [User: 124.9 ms, System: 96.1 ms]
Range (min … max): 212.3 ms … 230.5 ms 3 runs
Benchmark 2: parallel --ungroup seq ::: 10000000 >/dev/null
Time (mean ± σ): 154.7 ms ± 0.9 ms [User: 136.2 ms, System: 25.1 ms]
Range (min … max): 153.9 ms … 155.7 ms 3 runs
Summary
'parallel --ungroup seq ::: 10000000 >/dev/null ' ran
1.42 ± 0.06 times faster than 'parallel seq ::: 10000000 >/dev/null '
A large part of the juggling in the API is to make the API safer for
its maintenance and consumers alike.
For the maintenance of the API we e.g. avoid malloc()-ing the
"pp->pfd", ensuring that SANITIZE=address and other similar tools will
catch any unexpected misuse.
For API consumers we take pains to never pass the non-NULL "out"
buffer to an API user that provided the "ungroup" option. The
resulting code in t/helper/test-run-command.c isn't typical of such a
user, i.e. they'd typically use one mode or the other, and would know
whether they'd provided "ungroup" or not.
We could also avoid the strbuf_init() for "buffered_output" by having
"struct parallel_processes" use a static PARALLEL_PROCESSES_INIT
initializer, but let's leave that cleanup for later.
Using a global "run_processes_parallel_ungroup" variable to enable
this option is rather nasty, but is being done here to produce as
minimal of a change as possible for a subsequent regression fix. This
change is extracted from a larger initial version[1] which ends up
with a better end-state for the API, but in doing so needed to modify
all existing callers of the API. Let's defer that for now, and
narrowly focus on what we need for fixing the regression in the
subsequent commit.
It's safe to do this with a global variable because:
A) hook.c is the only user of it that sets it to non-zero, and before
we'll get any other API users we'll refactor away this method of
passing in the option, i.e. re-roll [1].
B) Even if hook.c wasn't the only user we don't have callers of this
API that concurrently invoke this parallel process starting API
itself in parallel.
As noted above "A" && "B" are rather nasty, and we don't want to live
with those caveats long-term, but for now they should be an acceptable
compromise.
1. https://lore.kernel.org/git/cover-v2-0.8-00000000000-20220518T195858Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-07 16:48:19 +08:00
|
|
|
* from different tasks does not interleave (but see "ungroup" below).
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 08:04:10 +08:00
|
|
|
*
|
run-command: add an "ungroup" option to run_process_parallel()
Extend the parallel execution API added in c553c72eed6 (run-command:
add an asynchronous parallel child processor, 2015-12-15) to support a
mode where the stdout and stderr of the processes isn't captured and
output in a deterministic order, instead we'll leave it to the kernel
and stdio to sort it out.
This gives the API same functionality as GNU parallel's --ungroup
option. As we'll see in a subsequent commit the main reason to want
this is to support stdout and stderr being connected to the TTY in the
case of jobs=1, demonstrated here with GNU parallel:
$ parallel --ungroup 'test -t {} && echo TTY || echo NTTY' ::: 1 2
TTY
TTY
$ parallel 'test -t {} && echo TTY || echo NTTY' ::: 1 2
NTTY
NTTY
Another is as GNU parallel's documentation notes a potential for
optimization. As demonstrated in next commit our results with "git
hook run" will be similar, but generally speaking this shows that if
you want to run processes in parallel where the exact order isn't
important this can be a lot faster:
$ hyperfine -r 3 -L o ,--ungroup 'parallel {o} seq ::: 10000000 >/dev/null '
Benchmark 1: parallel seq ::: 10000000 >/dev/null
Time (mean ± σ): 220.2 ms ± 9.3 ms [User: 124.9 ms, System: 96.1 ms]
Range (min … max): 212.3 ms … 230.5 ms 3 runs
Benchmark 2: parallel --ungroup seq ::: 10000000 >/dev/null
Time (mean ± σ): 154.7 ms ± 0.9 ms [User: 136.2 ms, System: 25.1 ms]
Range (min … max): 153.9 ms … 155.7 ms 3 runs
Summary
'parallel --ungroup seq ::: 10000000 >/dev/null ' ran
1.42 ± 0.06 times faster than 'parallel seq ::: 10000000 >/dev/null '
A large part of the juggling in the API is to make the API safer for
its maintenance and consumers alike.
For the maintenance of the API we e.g. avoid malloc()-ing the
"pp->pfd", ensuring that SANITIZE=address and other similar tools will
catch any unexpected misuse.
For API consumers we take pains to never pass the non-NULL "out"
buffer to an API user that provided the "ungroup" option. The
resulting code in t/helper/test-run-command.c isn't typical of such a
user, i.e. they'd typically use one mode or the other, and would know
whether they'd provided "ungroup" or not.
We could also avoid the strbuf_init() for "buffered_output" by having
"struct parallel_processes" use a static PARALLEL_PROCESSES_INIT
initializer, but let's leave that cleanup for later.
Using a global "run_processes_parallel_ungroup" variable to enable
this option is rather nasty, but is being done here to produce as
minimal of a change as possible for a subsequent regression fix. This
change is extracted from a larger initial version[1] which ends up
with a better end-state for the API, but in doing so needed to modify
all existing callers of the API. Let's defer that for now, and
narrowly focus on what we need for fixing the regression in the
subsequent commit.
It's safe to do this with a global variable because:
A) hook.c is the only user of it that sets it to non-zero, and before
we'll get any other API users we'll refactor away this method of
passing in the option, i.e. re-roll [1].
B) Even if hook.c wasn't the only user we don't have callers of this
API that concurrently invoke this parallel process starting API
itself in parallel.
As noted above "A" && "B" are rather nasty, and we don't want to live
with those caveats long-term, but for now they should be an acceptable
compromise.
1. https://lore.kernel.org/git/cover-v2-0.8-00000000000-20220518T195858Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-07 16:48:19 +08:00
|
|
|
* If the "ungroup" option isn't specified, the API will set the
|
|
|
|
* "stdout_to_stderr" parameter in "struct child_process" and provide
|
|
|
|
* the callbacks with a "struct strbuf *out" parameter to write output
|
|
|
|
* to. In this case the callbacks must not write to stdout or
|
|
|
|
* stderr as such output will mess up the output of the other parallel
|
|
|
|
* processes. If "ungroup" option is specified callbacks will get a
|
|
|
|
* NULL "struct strbuf *out" parameter, and are responsible for
|
|
|
|
* emitting their own output, including dealing with any race
|
|
|
|
* conditions due to writing in parallel to stdout and stderr.
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 08:04:10 +08:00
|
|
|
*/
|
2022-10-13 05:02:26 +08:00
|
|
|
void run_processes_parallel(const struct run_process_parallel_opts *opts);
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 08:04:10 +08:00
|
|
|
|
2021-06-18 01:13:25 +08:00
|
|
|
/**
|
2022-06-02 17:09:51 +08:00
|
|
|
* Convenience function which prepares env for a command to be run in a
|
|
|
|
* new repo. This adds all GIT_* environment variables to env with the
|
2021-06-18 01:13:25 +08:00
|
|
|
* exception of GIT_CONFIG_PARAMETERS and GIT_CONFIG_COUNT (which cause the
|
|
|
|
* corresponding environment variables to be unset in the subprocess) and adds
|
|
|
|
* an environment variable pointing to new_git_dir. See local_repo_env in
|
2023-05-16 14:33:57 +08:00
|
|
|
* environment.h for more information.
|
2021-06-18 01:13:25 +08:00
|
|
|
*/
|
2022-06-02 17:09:51 +08:00
|
|
|
void prepare_other_repo_env(struct strvec *env, const char *new_git_dir);
|
2021-06-18 01:13:25 +08:00
|
|
|
|
2021-09-20 23:36:17 +08:00
|
|
|
/**
|
|
|
|
* Possible return values for start_bg_command().
|
|
|
|
*/
|
|
|
|
enum start_bg_result {
|
|
|
|
/* child process is "ready" */
|
|
|
|
SBGR_READY = 0,
|
|
|
|
|
|
|
|
/* child process could not be started */
|
|
|
|
SBGR_ERROR,
|
|
|
|
|
|
|
|
/* callback error when testing for "ready" */
|
|
|
|
SBGR_CB_ERROR,
|
|
|
|
|
|
|
|
/* timeout expired waiting for child to become "ready" */
|
|
|
|
SBGR_TIMEOUT,
|
|
|
|
|
|
|
|
/* child process exited or was signalled before becomming "ready" */
|
|
|
|
SBGR_DIED,
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Callback used by start_bg_command() to ask whether the
|
|
|
|
* child process is ready or needs more time to become "ready".
|
|
|
|
*
|
|
|
|
* The callback will receive the cmd and cb_data arguments given to
|
|
|
|
* start_bg_command().
|
|
|
|
*
|
|
|
|
* Returns 1 is child needs more time (subject to the requested timeout).
|
|
|
|
* Returns 0 if child is "ready".
|
|
|
|
* Returns -1 on any error and cause start_bg_command() to also error out.
|
|
|
|
*/
|
|
|
|
typedef int(start_bg_wait_cb)(const struct child_process *cmd, void *cb_data);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Start a command in the background. Wait long enough for the child
|
|
|
|
* to become "ready" (as defined by the provided callback). Capture
|
|
|
|
* immediate errors (like failure to start) and any immediate exit
|
|
|
|
* status (such as a shutdown/signal before the child became "ready")
|
|
|
|
* and return this like start_command().
|
|
|
|
*
|
|
|
|
* We run a custom wait loop using the provided callback to wait for
|
|
|
|
* the child to start and become "ready". This is limited by the given
|
|
|
|
* timeout value.
|
|
|
|
*
|
|
|
|
* If the child does successfully start and become "ready", we orphan
|
|
|
|
* it into the background.
|
|
|
|
*
|
|
|
|
* The caller must not call finish_command().
|
|
|
|
*
|
|
|
|
* The opaque cb_data argument will be forwarded to the callback for
|
|
|
|
* any instance data that it might require. This may be NULL.
|
|
|
|
*/
|
|
|
|
enum start_bg_result start_bg_command(struct child_process *cmd,
|
|
|
|
start_bg_wait_cb *wait_cb,
|
|
|
|
void *cb_data,
|
|
|
|
unsigned int timeout_sec);
|
|
|
|
|
2023-05-16 14:33:49 +08:00
|
|
|
int sane_execvp(const char *file, char *const argv[]);
|
|
|
|
|
2005-08-01 03:17:43 +08:00
|
|
|
#endif
|