2017-09-10 22:44:28 +08:00
|
|
|
# Library of functions shared by all CI scripts
|
|
|
|
|
2023-11-09 16:05:25 +08:00
|
|
|
if test true = "$GITHUB_ACTIONS"
|
2022-05-22 06:18:50 +08:00
|
|
|
then
|
|
|
|
begin_group () {
|
|
|
|
need_to_end_group=t
|
|
|
|
echo "::group::$1" >&2
|
|
|
|
set -x
|
|
|
|
}
|
|
|
|
|
|
|
|
end_group () {
|
|
|
|
test -n "$need_to_end_group" || return 0
|
|
|
|
set +x
|
|
|
|
need_to_end_group=
|
|
|
|
echo '::endgroup::' >&2
|
|
|
|
}
|
2023-11-09 16:05:54 +08:00
|
|
|
elif test true = "$GITLAB_CI"
|
|
|
|
then
|
|
|
|
begin_group () {
|
|
|
|
need_to_end_group=t
|
2024-05-04 01:21:03 +08:00
|
|
|
printf "\e[0Ksection_start:$(date +%s):$(echo "$1" | tr ' ' _)[collapsed=true]\r\e[0K$1\n"
|
2023-11-09 16:05:54 +08:00
|
|
|
trap "end_group '$1'" EXIT
|
|
|
|
set -x
|
|
|
|
}
|
2022-05-22 06:18:50 +08:00
|
|
|
|
2023-11-09 16:05:54 +08:00
|
|
|
end_group () {
|
|
|
|
test -n "$need_to_end_group" || return 0
|
2022-05-22 06:18:50 +08:00
|
|
|
set +x
|
2023-11-09 16:05:54 +08:00
|
|
|
need_to_end_group=
|
|
|
|
printf "\e[0Ksection_end:$(date +%s):$(echo "$1" | tr ' ' _)\r\e[0K\n"
|
|
|
|
trap - EXIT
|
2022-05-22 06:18:50 +08:00
|
|
|
}
|
2023-11-09 16:05:25 +08:00
|
|
|
else
|
|
|
|
begin_group () { :; }
|
|
|
|
end_group () { :; }
|
2022-05-22 06:18:50 +08:00
|
|
|
|
2023-11-09 16:05:25 +08:00
|
|
|
set -x
|
2022-05-22 06:18:50 +08:00
|
|
|
fi
|
|
|
|
|
2023-11-09 16:05:29 +08:00
|
|
|
group () {
|
|
|
|
group="$1"
|
|
|
|
shift
|
|
|
|
begin_group "$group"
|
|
|
|
|
|
|
|
# work around `dash` not supporting `set -o pipefail`
|
|
|
|
(
|
|
|
|
"$@" 2>&1
|
|
|
|
echo $? >exit.status
|
|
|
|
) |
|
|
|
|
sed 's/^\(\([^ ]*\):\([0-9]*\):\([0-9]*:\) \)\(error\|warning\): /::\5 file=\2,line=\3::\1/'
|
|
|
|
res=$(cat exit.status)
|
|
|
|
rm exit.status
|
|
|
|
|
|
|
|
end_group "$group"
|
|
|
|
return $res
|
|
|
|
}
|
|
|
|
|
|
|
|
begin_group "CI setup"
|
|
|
|
trap "end_group 'CI setup'" EXIT
|
|
|
|
|
2022-05-22 06:18:50 +08:00
|
|
|
# Set 'exit on error' for all CI scripts to let the caller know that
|
|
|
|
# something went wrong.
|
|
|
|
#
|
|
|
|
# We already enabled tracing executed commands earlier. This helps by showing
|
|
|
|
# how # environment variables are set and and dependencies are installed.
|
|
|
|
set -e
|
|
|
|
|
2017-09-10 22:44:29 +08:00
|
|
|
skip_branch_tip_with_tag () {
|
|
|
|
# Sometimes, a branch is pushed at the same time the tag that points
|
|
|
|
# at the same commit as the tip of the branch is pushed, and building
|
|
|
|
# both at the same time is a waste.
|
|
|
|
#
|
2019-01-28 07:26:49 +08:00
|
|
|
# When the build is triggered by a push to a tag, $CI_BRANCH will
|
|
|
|
# have that tagname, e.g. v2.14.0. Let's see if $CI_BRANCH is
|
|
|
|
# exactly at a tag, and if so, if it is different from $CI_BRANCH.
|
|
|
|
# That way, we can tell if we are building the tip of a branch that
|
|
|
|
# is tagged and we can skip the build because we won't be skipping a
|
|
|
|
# build of a tag.
|
2017-09-10 22:44:29 +08:00
|
|
|
|
2019-01-28 07:26:49 +08:00
|
|
|
if TAG=$(git describe --exact-match "$CI_BRANCH" 2>/dev/null) &&
|
|
|
|
test "$TAG" != "$CI_BRANCH"
|
2017-09-10 22:44:29 +08:00
|
|
|
then
|
2019-01-28 07:26:49 +08:00
|
|
|
echo "$(tput setaf 2)Tip of $CI_BRANCH is exactly at $TAG$(tput sgr0)"
|
2017-09-10 22:44:29 +08:00
|
|
|
exit 0
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2023-11-09 16:05:46 +08:00
|
|
|
# Check whether we can use the path passed via the first argument as Git
|
|
|
|
# repository.
|
|
|
|
is_usable_git_repository () {
|
|
|
|
# We require Git in our PATH, otherwise we cannot access repositories
|
|
|
|
# at all.
|
|
|
|
if ! command -v git >/dev/null
|
|
|
|
then
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
|
|
|
|
# And the target directory needs to be a proper Git repository.
|
|
|
|
if ! git -C "$1" rev-parse 2>/dev/null
|
|
|
|
then
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
travis-ci: record and skip successfully built trees
Travis CI dutifully builds and tests each new branch tip, even if its
tree has previously been successfully built and tested. This happens
often enough in contributors' workflows, when a work-in-progress
branch is rebased changing e.g. only commit messages or the order or
number of commits while leaving the resulting code intact, and is then
pushed to a Travis CI-enabled GitHub fork.
This is wasting Travis CI's resources and is sometimes scary-annoying
when the new tip commit with a tree identical to the previous,
successfully tested one is suddenly reported in red, because one of
the OSX build jobs happened to exceed the time limit yet again.
So extend our Travis CI build scripts to skip building commits whose
trees have previously been successfully built and tested. Use the
Travis CI cache feature to keep a record of the object names of trees
that tested successfully, in a plain and simple flat text file, one
line per tree object name. Append the current tree's object name at
the end of every successful build job to this file, along with a bit
of additional info about the build job (commit object name, Travis CI
job number and id). Limit the size of this file to 1000 records, to
prevent it from growing too large for git/git's forever living
integration branches. Check, using a simple grep invocation, in each
build job whether the current commit's tree is already in there, and
skip the build if it is. Include a message in the skipped build job's
trace log, containing the URL to the build job successfully testing
that tree for the first time and instructions on how to force a
re-build. Catch the case when a build job, which successfully built
and tested a particular tree for the first time, is restarted and omit
the URL of the previous build job's trace log, as in this case it's
the same build job and the trace log has just been overwritten.
Note: this won't kick in if two identical trees are on two different
branches, because Travis CI caches are not shared between build jobs
of different branches.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Reviewed-by: Lars Schneider <larsxschneider@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-31 18:12:05 +08:00
|
|
|
# Save some info about the current commit's tree, so we can skip the build
|
|
|
|
# job if we encounter the same tree again and can provide a useful info
|
|
|
|
# message.
|
|
|
|
save_good_tree () {
|
2023-11-09 16:05:46 +08:00
|
|
|
if ! is_usable_git_repository .
|
|
|
|
then
|
|
|
|
return
|
|
|
|
fi
|
|
|
|
|
ci/lib.sh: encapsulate Travis-specific things
The upcoming patches will allow building git.git via Azure Pipelines
(i.e. Azure DevOps' Continuous Integration), where variable names and
URLs look a bit different than in Travis CI.
Also, the configurations of the available agents are different. For
example, Travis' and Azure Pipelines' macOS agents are set up
differently, so that on Travis, we have to install the git-lfs and
gettext Homebrew packages, and on Azure Pipelines we do not need to.
Likewise, Azure Pipelines' Ubuntu agents already have asciidoctor
installed.
Finally, on Azure Pipelines the natural way is not to base64-encode tar
files of the trash directories of failed tests, but to publish build
artifacts instead. Therefore, that code to log those base64-encoded tar
files is guarded to be Travis-specific.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-01-28 07:26:51 +08:00
|
|
|
echo "$(git rev-parse $CI_COMMIT^{tree}) $CI_COMMIT $CI_JOB_NUMBER $CI_JOB_ID" >>"$good_trees_file"
|
travis-ci: record and skip successfully built trees
Travis CI dutifully builds and tests each new branch tip, even if its
tree has previously been successfully built and tested. This happens
often enough in contributors' workflows, when a work-in-progress
branch is rebased changing e.g. only commit messages or the order or
number of commits while leaving the resulting code intact, and is then
pushed to a Travis CI-enabled GitHub fork.
This is wasting Travis CI's resources and is sometimes scary-annoying
when the new tip commit with a tree identical to the previous,
successfully tested one is suddenly reported in red, because one of
the OSX build jobs happened to exceed the time limit yet again.
So extend our Travis CI build scripts to skip building commits whose
trees have previously been successfully built and tested. Use the
Travis CI cache feature to keep a record of the object names of trees
that tested successfully, in a plain and simple flat text file, one
line per tree object name. Append the current tree's object name at
the end of every successful build job to this file, along with a bit
of additional info about the build job (commit object name, Travis CI
job number and id). Limit the size of this file to 1000 records, to
prevent it from growing too large for git/git's forever living
integration branches. Check, using a simple grep invocation, in each
build job whether the current commit's tree is already in there, and
skip the build if it is. Include a message in the skipped build job's
trace log, containing the URL to the build job successfully testing
that tree for the first time and instructions on how to force a
re-build. Catch the case when a build job, which successfully built
and tested a particular tree for the first time, is restarted and omit
the URL of the previous build job's trace log, as in this case it's
the same build job and the trace log has just been overwritten.
Note: this won't kick in if two identical trees are on two different
branches, because Travis CI caches are not shared between build jobs
of different branches.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Reviewed-by: Lars Schneider <larsxschneider@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-31 18:12:05 +08:00
|
|
|
# limit the file size
|
|
|
|
tail -1000 "$good_trees_file" >"$good_trees_file".tmp
|
|
|
|
mv "$good_trees_file".tmp "$good_trees_file"
|
|
|
|
}
|
|
|
|
|
|
|
|
# Skip the build job if the same tree has already been built and tested
|
|
|
|
# successfully before (e.g. because the branch got rebased, changing only
|
|
|
|
# the commit messages).
|
|
|
|
skip_good_tree () {
|
2021-11-24 00:29:08 +08:00
|
|
|
if test true = "$GITHUB_ACTIONS"
|
travis-ci: do not skip successfully tested trees in debug mode
Travis CI offers shell access to its virtual machine environment
running the build jobs, called "debug mode" [1]. After restarting a
build job in debug mode and logging in, the first thing I usually do
is to install dependencies, i.e. run './ci/install-dependencies.sh'.
This works just fine when I restarted a failed build job in debug
mode. However, after restarting a successful build job in debug mode
our CI scripts get all clever, and exit without doing anything useful,
claiming that "This commit's tree has already been built and tested
successfully" [2]. Our CI scripts are right, and we do want to skip
building and testing already known good trees in "regular" CI builds.
In debug mode, however, this is a nuisiance, because one has to delete
the cache (or at least the 'good-trees' file in the cache) to proceed.
Let's update our CI scripts, in particular the common 'ci/lib.sh', to
not skip previously successfully built and tested trees in debug mode,
so all those scripts will do what there were supposed to do even when
a successful build job was restarted in debug mode.
[1] https://docs.travis-ci.com/user/running-build-in-debug-mode/
[2] 9cc2c76f5e (travis-ci: record and skip successfully built trees,
2017-12-31)
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-09-21 15:40:54 +08:00
|
|
|
then
|
|
|
|
return
|
|
|
|
fi
|
|
|
|
|
2023-11-09 16:05:46 +08:00
|
|
|
if ! is_usable_git_repository .
|
|
|
|
then
|
|
|
|
return
|
|
|
|
fi
|
|
|
|
|
ci/lib.sh: encapsulate Travis-specific things
The upcoming patches will allow building git.git via Azure Pipelines
(i.e. Azure DevOps' Continuous Integration), where variable names and
URLs look a bit different than in Travis CI.
Also, the configurations of the available agents are different. For
example, Travis' and Azure Pipelines' macOS agents are set up
differently, so that on Travis, we have to install the git-lfs and
gettext Homebrew packages, and on Azure Pipelines we do not need to.
Likewise, Azure Pipelines' Ubuntu agents already have asciidoctor
installed.
Finally, on Azure Pipelines the natural way is not to base64-encode tar
files of the trash directories of failed tests, but to publish build
artifacts instead. Therefore, that code to log those base64-encoded tar
files is guarded to be Travis-specific.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-01-28 07:26:51 +08:00
|
|
|
if ! good_tree_info="$(grep "^$(git rev-parse $CI_COMMIT^{tree}) " "$good_trees_file")"
|
travis-ci: record and skip successfully built trees
Travis CI dutifully builds and tests each new branch tip, even if its
tree has previously been successfully built and tested. This happens
often enough in contributors' workflows, when a work-in-progress
branch is rebased changing e.g. only commit messages or the order or
number of commits while leaving the resulting code intact, and is then
pushed to a Travis CI-enabled GitHub fork.
This is wasting Travis CI's resources and is sometimes scary-annoying
when the new tip commit with a tree identical to the previous,
successfully tested one is suddenly reported in red, because one of
the OSX build jobs happened to exceed the time limit yet again.
So extend our Travis CI build scripts to skip building commits whose
trees have previously been successfully built and tested. Use the
Travis CI cache feature to keep a record of the object names of trees
that tested successfully, in a plain and simple flat text file, one
line per tree object name. Append the current tree's object name at
the end of every successful build job to this file, along with a bit
of additional info about the build job (commit object name, Travis CI
job number and id). Limit the size of this file to 1000 records, to
prevent it from growing too large for git/git's forever living
integration branches. Check, using a simple grep invocation, in each
build job whether the current commit's tree is already in there, and
skip the build if it is. Include a message in the skipped build job's
trace log, containing the URL to the build job successfully testing
that tree for the first time and instructions on how to force a
re-build. Catch the case when a build job, which successfully built
and tested a particular tree for the first time, is restarted and omit
the URL of the previous build job's trace log, as in this case it's
the same build job and the trace log has just been overwritten.
Note: this won't kick in if two identical trees are on two different
branches, because Travis CI caches are not shared between build jobs
of different branches.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Reviewed-by: Lars Schneider <larsxschneider@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-31 18:12:05 +08:00
|
|
|
then
|
|
|
|
# Haven't seen this tree yet, or no cached good trees file yet.
|
|
|
|
# Continue the build job.
|
|
|
|
return
|
|
|
|
fi
|
|
|
|
|
|
|
|
echo "$good_tree_info" | {
|
|
|
|
read tree prev_good_commit prev_good_job_number prev_good_job_id
|
|
|
|
|
ci/lib.sh: encapsulate Travis-specific things
The upcoming patches will allow building git.git via Azure Pipelines
(i.e. Azure DevOps' Continuous Integration), where variable names and
URLs look a bit different than in Travis CI.
Also, the configurations of the available agents are different. For
example, Travis' and Azure Pipelines' macOS agents are set up
differently, so that on Travis, we have to install the git-lfs and
gettext Homebrew packages, and on Azure Pipelines we do not need to.
Likewise, Azure Pipelines' Ubuntu agents already have asciidoctor
installed.
Finally, on Azure Pipelines the natural way is not to base64-encode tar
files of the trash directories of failed tests, but to publish build
artifacts instead. Therefore, that code to log those base64-encoded tar
files is guarded to be Travis-specific.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-01-28 07:26:51 +08:00
|
|
|
if test "$CI_JOB_ID" = "$prev_good_job_id"
|
travis-ci: record and skip successfully built trees
Travis CI dutifully builds and tests each new branch tip, even if its
tree has previously been successfully built and tested. This happens
often enough in contributors' workflows, when a work-in-progress
branch is rebased changing e.g. only commit messages or the order or
number of commits while leaving the resulting code intact, and is then
pushed to a Travis CI-enabled GitHub fork.
This is wasting Travis CI's resources and is sometimes scary-annoying
when the new tip commit with a tree identical to the previous,
successfully tested one is suddenly reported in red, because one of
the OSX build jobs happened to exceed the time limit yet again.
So extend our Travis CI build scripts to skip building commits whose
trees have previously been successfully built and tested. Use the
Travis CI cache feature to keep a record of the object names of trees
that tested successfully, in a plain and simple flat text file, one
line per tree object name. Append the current tree's object name at
the end of every successful build job to this file, along with a bit
of additional info about the build job (commit object name, Travis CI
job number and id). Limit the size of this file to 1000 records, to
prevent it from growing too large for git/git's forever living
integration branches. Check, using a simple grep invocation, in each
build job whether the current commit's tree is already in there, and
skip the build if it is. Include a message in the skipped build job's
trace log, containing the URL to the build job successfully testing
that tree for the first time and instructions on how to force a
re-build. Catch the case when a build job, which successfully built
and tested a particular tree for the first time, is restarted and omit
the URL of the previous build job's trace log, as in this case it's
the same build job and the trace log has just been overwritten.
Note: this won't kick in if two identical trees are on two different
branches, because Travis CI caches are not shared between build jobs
of different branches.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Reviewed-by: Lars Schneider <larsxschneider@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-31 18:12:05 +08:00
|
|
|
then
|
|
|
|
cat <<-EOF
|
ci/lib.sh: encapsulate Travis-specific things
The upcoming patches will allow building git.git via Azure Pipelines
(i.e. Azure DevOps' Continuous Integration), where variable names and
URLs look a bit different than in Travis CI.
Also, the configurations of the available agents are different. For
example, Travis' and Azure Pipelines' macOS agents are set up
differently, so that on Travis, we have to install the git-lfs and
gettext Homebrew packages, and on Azure Pipelines we do not need to.
Likewise, Azure Pipelines' Ubuntu agents already have asciidoctor
installed.
Finally, on Azure Pipelines the natural way is not to base64-encode tar
files of the trash directories of failed tests, but to publish build
artifacts instead. Therefore, that code to log those base64-encoded tar
files is guarded to be Travis-specific.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-01-28 07:26:51 +08:00
|
|
|
$(tput setaf 2)Skipping build job for commit $CI_COMMIT.$(tput sgr0)
|
travis-ci: record and skip successfully built trees
Travis CI dutifully builds and tests each new branch tip, even if its
tree has previously been successfully built and tested. This happens
often enough in contributors' workflows, when a work-in-progress
branch is rebased changing e.g. only commit messages or the order or
number of commits while leaving the resulting code intact, and is then
pushed to a Travis CI-enabled GitHub fork.
This is wasting Travis CI's resources and is sometimes scary-annoying
when the new tip commit with a tree identical to the previous,
successfully tested one is suddenly reported in red, because one of
the OSX build jobs happened to exceed the time limit yet again.
So extend our Travis CI build scripts to skip building commits whose
trees have previously been successfully built and tested. Use the
Travis CI cache feature to keep a record of the object names of trees
that tested successfully, in a plain and simple flat text file, one
line per tree object name. Append the current tree's object name at
the end of every successful build job to this file, along with a bit
of additional info about the build job (commit object name, Travis CI
job number and id). Limit the size of this file to 1000 records, to
prevent it from growing too large for git/git's forever living
integration branches. Check, using a simple grep invocation, in each
build job whether the current commit's tree is already in there, and
skip the build if it is. Include a message in the skipped build job's
trace log, containing the URL to the build job successfully testing
that tree for the first time and instructions on how to force a
re-build. Catch the case when a build job, which successfully built
and tested a particular tree for the first time, is restarted and omit
the URL of the previous build job's trace log, as in this case it's
the same build job and the trace log has just been overwritten.
Note: this won't kick in if two identical trees are on two different
branches, because Travis CI caches are not shared between build jobs
of different branches.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Reviewed-by: Lars Schneider <larsxschneider@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-31 18:12:05 +08:00
|
|
|
This commit has already been built and tested successfully by this build job.
|
|
|
|
To force a re-build delete the branch's cache and then hit 'Restart job'.
|
|
|
|
EOF
|
|
|
|
else
|
|
|
|
cat <<-EOF
|
ci/lib.sh: encapsulate Travis-specific things
The upcoming patches will allow building git.git via Azure Pipelines
(i.e. Azure DevOps' Continuous Integration), where variable names and
URLs look a bit different than in Travis CI.
Also, the configurations of the available agents are different. For
example, Travis' and Azure Pipelines' macOS agents are set up
differently, so that on Travis, we have to install the git-lfs and
gettext Homebrew packages, and on Azure Pipelines we do not need to.
Likewise, Azure Pipelines' Ubuntu agents already have asciidoctor
installed.
Finally, on Azure Pipelines the natural way is not to base64-encode tar
files of the trash directories of failed tests, but to publish build
artifacts instead. Therefore, that code to log those base64-encoded tar
files is guarded to be Travis-specific.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-01-28 07:26:51 +08:00
|
|
|
$(tput setaf 2)Skipping build job for commit $CI_COMMIT.$(tput sgr0)
|
travis-ci: record and skip successfully built trees
Travis CI dutifully builds and tests each new branch tip, even if its
tree has previously been successfully built and tested. This happens
often enough in contributors' workflows, when a work-in-progress
branch is rebased changing e.g. only commit messages or the order or
number of commits while leaving the resulting code intact, and is then
pushed to a Travis CI-enabled GitHub fork.
This is wasting Travis CI's resources and is sometimes scary-annoying
when the new tip commit with a tree identical to the previous,
successfully tested one is suddenly reported in red, because one of
the OSX build jobs happened to exceed the time limit yet again.
So extend our Travis CI build scripts to skip building commits whose
trees have previously been successfully built and tested. Use the
Travis CI cache feature to keep a record of the object names of trees
that tested successfully, in a plain and simple flat text file, one
line per tree object name. Append the current tree's object name at
the end of every successful build job to this file, along with a bit
of additional info about the build job (commit object name, Travis CI
job number and id). Limit the size of this file to 1000 records, to
prevent it from growing too large for git/git's forever living
integration branches. Check, using a simple grep invocation, in each
build job whether the current commit's tree is already in there, and
skip the build if it is. Include a message in the skipped build job's
trace log, containing the URL to the build job successfully testing
that tree for the first time and instructions on how to force a
re-build. Catch the case when a build job, which successfully built
and tested a particular tree for the first time, is restarted and omit
the URL of the previous build job's trace log, as in this case it's
the same build job and the trace log has just been overwritten.
Note: this won't kick in if two identical trees are on two different
branches, because Travis CI caches are not shared between build jobs
of different branches.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Reviewed-by: Lars Schneider <larsxschneider@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-31 18:12:05 +08:00
|
|
|
This commit's tree has already been built and tested successfully in build job $prev_good_job_number for commit $prev_good_commit.
|
2021-11-24 00:29:08 +08:00
|
|
|
The log of that build job is available at $SYSTEM_TASKDEFINITIONSURI$SYSTEM_TEAMPROJECT/_build/results?buildId=$prev_good_job_id
|
travis-ci: record and skip successfully built trees
Travis CI dutifully builds and tests each new branch tip, even if its
tree has previously been successfully built and tested. This happens
often enough in contributors' workflows, when a work-in-progress
branch is rebased changing e.g. only commit messages or the order or
number of commits while leaving the resulting code intact, and is then
pushed to a Travis CI-enabled GitHub fork.
This is wasting Travis CI's resources and is sometimes scary-annoying
when the new tip commit with a tree identical to the previous,
successfully tested one is suddenly reported in red, because one of
the OSX build jobs happened to exceed the time limit yet again.
So extend our Travis CI build scripts to skip building commits whose
trees have previously been successfully built and tested. Use the
Travis CI cache feature to keep a record of the object names of trees
that tested successfully, in a plain and simple flat text file, one
line per tree object name. Append the current tree's object name at
the end of every successful build job to this file, along with a bit
of additional info about the build job (commit object name, Travis CI
job number and id). Limit the size of this file to 1000 records, to
prevent it from growing too large for git/git's forever living
integration branches. Check, using a simple grep invocation, in each
build job whether the current commit's tree is already in there, and
skip the build if it is. Include a message in the skipped build job's
trace log, containing the URL to the build job successfully testing
that tree for the first time and instructions on how to force a
re-build. Catch the case when a build job, which successfully built
and tested a particular tree for the first time, is restarted and omit
the URL of the previous build job's trace log, as in this case it's
the same build job and the trace log has just been overwritten.
Note: this won't kick in if two identical trees are on two different
branches, because Travis CI caches are not shared between build jobs
of different branches.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Reviewed-by: Lars Schneider <larsxschneider@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-31 18:12:05 +08:00
|
|
|
To force a re-build delete the branch's cache and then hit 'Restart job'.
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
exit 0
|
|
|
|
}
|
|
|
|
|
2022-05-22 06:18:45 +08:00
|
|
|
check_unignored_build_artifacts () {
|
2023-11-09 16:05:46 +08:00
|
|
|
if ! is_usable_git_repository .
|
|
|
|
then
|
|
|
|
return
|
|
|
|
fi
|
|
|
|
|
2018-01-01 00:02:06 +08:00
|
|
|
! git ls-files --other --exclude-standard --error-unmatch \
|
|
|
|
-- ':/*' 2>/dev/null ||
|
|
|
|
{
|
|
|
|
echo "$(tput setaf 1)error: found unignored build artifacts$(tput sgr0)"
|
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
ci: make it easier to find failed tests' logs in the GitHub workflow
When investigating a test failure, the time that matters most is the
time it takes from getting aware of the failure to displaying the output
of the failing test case.
You currently have to know a lot of implementation details when
investigating test failures in the CI runs. The first step is easy: the
failed job is marked quite clearly, but when opening it, the failed step
is expanded, which in our case is the one running
`ci/run-build-and-tests.sh`. This step, most notably, only offers a
high-level view of what went wrong: it prints the output of `prove`
which merely tells the reader which test script failed.
The actually interesting part is in the detailed log of said failed
test script. But that log is shown in the CI run's step that runs
`ci/print-test-failures.sh`. And that step is _not_ expanded in the web
UI by default. It is even marked as "successful", which makes it very
easy to miss that there is useful information hidden in there.
Let's help the reader by showing the failed tests' detailed logs in the
step that is expanded automatically, i.e. directly after the test suite
failed.
This also helps the situation where the _build_ failed and the
`print-test-failures` step was executed under the assumption that the
_test suite_ failed, and consequently failed to find any failed tests.
An alternative way to implement this patch would be to source
`ci/print-test-failures.sh` in the `handle_test_failures` function to
show these logs. However, over the course of the next few commits, we
want to introduce some grouping which would be harder to achieve that
way (for example, we do want a leaner, and colored, preamble for each
failed test script, and it would be trickier to accommodate the lack of
nested groupings in GitHub workflows' output).
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-05-22 06:18:49 +08:00
|
|
|
handle_failed_tests () {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2023-11-09 16:05:38 +08:00
|
|
|
create_failed_test_artifacts () {
|
|
|
|
mkdir -p t/failed-test-artifacts
|
|
|
|
|
|
|
|
for test_exit in t/test-results/*.exit
|
|
|
|
do
|
|
|
|
test 0 != "$(cat "$test_exit")" || continue
|
|
|
|
|
|
|
|
test_name="${test_exit%.exit}"
|
|
|
|
test_name="${test_name##*/}"
|
|
|
|
printf "\\e[33m\\e[1m=== Failed test: ${test_name} ===\\e[m\\n"
|
|
|
|
echo "The full logs are in the 'print test failures' step below."
|
|
|
|
echo "See also the 'failed-tests-*' artifacts attached to this run."
|
|
|
|
cat "t/test-results/$test_name.markup"
|
|
|
|
|
|
|
|
trash_dir="t/trash directory.$test_name"
|
|
|
|
cp "t/test-results/$test_name.out" t/failed-test-artifacts/
|
|
|
|
tar czf t/failed-test-artifacts/"$test_name".trash.tar.gz "$trash_dir"
|
|
|
|
done
|
|
|
|
}
|
|
|
|
|
2020-04-08 12:05:34 +08:00
|
|
|
# GitHub Action doesn't set TERM, which is required by tput
|
|
|
|
export TERM=${TERM:-dumb}
|
|
|
|
|
2019-02-08 03:36:28 +08:00
|
|
|
# Clear MAKEFLAGS that may come from the outside world.
|
|
|
|
export MAKEFLAGS=
|
|
|
|
|
2021-11-24 00:29:08 +08:00
|
|
|
if test -n "$SYSTEM_COLLECTIONURI" || test -n "$SYSTEM_TASKDEFINITIONSURI"
|
2019-01-29 22:19:28 +08:00
|
|
|
then
|
|
|
|
CI_TYPE=azure-pipelines
|
|
|
|
# We are running in Azure Pipelines
|
|
|
|
CI_BRANCH="$BUILD_SOURCEBRANCH"
|
|
|
|
CI_COMMIT="$BUILD_SOURCEVERSION"
|
|
|
|
CI_JOB_ID="$BUILD_BUILDID"
|
|
|
|
CI_JOB_NUMBER="$BUILD_BUILDNUMBER"
|
|
|
|
CI_OS_NAME="$(echo "$AGENT_OS" | tr A-Z a-z)"
|
|
|
|
test darwin != "$CI_OS_NAME" || CI_OS_NAME=osx
|
|
|
|
CI_REPO_SLUG="$(expr "$BUILD_REPOSITORY_URI" : '.*/\([^/]*/[^/]*\)$')"
|
|
|
|
CC="${CC:-gcc}"
|
|
|
|
|
|
|
|
# use a subdirectory of the cache dir (because the file share is shared
|
|
|
|
# among *all* phases)
|
|
|
|
cache_dir="$HOME/test-cache/$SYSTEM_PHASENAME"
|
|
|
|
|
2023-11-09 16:05:42 +08:00
|
|
|
GIT_TEST_OPTS="--write-junit-xml"
|
|
|
|
JOBS=10
|
2020-04-08 12:05:33 +08:00
|
|
|
elif test true = "$GITHUB_ACTIONS"
|
|
|
|
then
|
|
|
|
CI_TYPE=github-actions
|
|
|
|
CI_BRANCH="$GITHUB_REF"
|
|
|
|
CI_COMMIT="$GITHUB_SHA"
|
|
|
|
CI_OS_NAME="$(echo "$RUNNER_OS" | tr A-Z a-z)"
|
|
|
|
test macos != "$CI_OS_NAME" || CI_OS_NAME=osx
|
|
|
|
CI_REPO_SLUG="$GITHUB_REPOSITORY"
|
|
|
|
CI_JOB_ID="$GITHUB_RUN_ID"
|
CI: select CC based on CC_PACKAGE (again)
Fix a regression in 707d2f2fe86 (CI: use "$runs_on_pool", not
"$jobname" to select packages & config, 2021-11-23).
In that commit I changed CC=gcc from CC=gcc-9, but on OSX the "gcc" in
$PATH points to clang, we need to use gcc-9 instead. Likewise for the
linux-gcc job CC=gcc-8 was changed to the implicit CC=gcc, which would
select GCC 9.4.0 instead of GCC 8.4.0.
Furthermore in 25715419bf4 (CI: don't run "make test" twice in one
job, 2021-11-23) when the "linux-TEST-vars" job was split off from
"linux-gcc" the "cc_package: gcc-8" line was copied along with
it, so its "cc_package" line wasn't working as intended either.
As a table, this is what's changed by this commit, i.e. it only
affects the linux-gcc, linux-TEST-vars and osx-gcc jobs:
|-------------------+-----------+-------------------+-------+-------|
| jobname | vector.cc | vector.cc_package | old | new |
|-------------------+-----------+-------------------+-------+-------|
| linux-clang | clang | - | clang | clang |
| linux-sha256 | clang | - | clang | clang |
| linux-gcc | gcc | gcc-8 | gcc | gcc-8 |
| osx-clang | clang | - | clang | clang |
| osx-gcc | gcc | gcc-9 | clang | gcc-9 |
| linux-gcc-default | gcc | - | gcc | gcc |
| linux-TEST-vars | gcc | gcc-8 | gcc | gcc-8 |
|-------------------+-----------+-------------------+-------+-------|
Reported-by: Carlo Arenas <carenas@gmail.com>
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-04-22 17:20:53 +08:00
|
|
|
CC="${CC_PACKAGE:-${CC:-gcc}}"
|
2020-10-08 23:29:35 +08:00
|
|
|
DONT_SKIP_TAGS=t
|
ci: make it easier to find failed tests' logs in the GitHub workflow
When investigating a test failure, the time that matters most is the
time it takes from getting aware of the failure to displaying the output
of the failing test case.
You currently have to know a lot of implementation details when
investigating test failures in the CI runs. The first step is easy: the
failed job is marked quite clearly, but when opening it, the failed step
is expanded, which in our case is the one running
`ci/run-build-and-tests.sh`. This step, most notably, only offers a
high-level view of what went wrong: it prints the output of `prove`
which merely tells the reader which test script failed.
The actually interesting part is in the detailed log of said failed
test script. But that log is shown in the CI run's step that runs
`ci/print-test-failures.sh`. And that step is _not_ expanded in the web
UI by default. It is even marked as "successful", which makes it very
easy to miss that there is useful information hidden in there.
Let's help the reader by showing the failed tests' detailed logs in the
step that is expanded automatically, i.e. directly after the test suite
failed.
This also helps the situation where the _build_ failed and the
`print-test-failures` step was executed under the assumption that the
_test suite_ failed, and consequently failed to find any failed tests.
An alternative way to implement this patch would be to source
`ci/print-test-failures.sh` in the `handle_test_failures` function to
show these logs. However, over the course of the next few commits, we
want to introduce some grouping which would be harder to achieve that
way (for example, we do want a leaner, and colored, preamble for each
failed test script, and it would be trickier to accommodate the lack of
nested groupings in GitHub workflows' output).
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-05-22 06:18:49 +08:00
|
|
|
handle_failed_tests () {
|
|
|
|
echo "FAILED_TEST_ARTIFACTS=t/failed-test-artifacts" >>$GITHUB_ENV
|
2023-11-09 16:05:38 +08:00
|
|
|
create_failed_test_artifacts
|
ci: make it easier to find failed tests' logs in the GitHub workflow
When investigating a test failure, the time that matters most is the
time it takes from getting aware of the failure to displaying the output
of the failing test case.
You currently have to know a lot of implementation details when
investigating test failures in the CI runs. The first step is easy: the
failed job is marked quite clearly, but when opening it, the failed step
is expanded, which in our case is the one running
`ci/run-build-and-tests.sh`. This step, most notably, only offers a
high-level view of what went wrong: it prints the output of `prove`
which merely tells the reader which test script failed.
The actually interesting part is in the detailed log of said failed
test script. But that log is shown in the CI run's step that runs
`ci/print-test-failures.sh`. And that step is _not_ expanded in the web
UI by default. It is even marked as "successful", which makes it very
easy to miss that there is useful information hidden in there.
Let's help the reader by showing the failed tests' detailed logs in the
step that is expanded automatically, i.e. directly after the test suite
failed.
This also helps the situation where the _build_ failed and the
`print-test-failures` step was executed under the assumption that the
_test suite_ failed, and consequently failed to find any failed tests.
An alternative way to implement this patch would be to source
`ci/print-test-failures.sh` in the `handle_test_failures` function to
show these logs. However, over the course of the next few commits, we
want to introduce some grouping which would be harder to achieve that
way (for example, we do want a leaner, and colored, preamble for each
failed test script, and it would be trickier to accommodate the lack of
nested groupings in GitHub workflows' output).
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-05-22 06:18:49 +08:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2020-04-08 12:05:33 +08:00
|
|
|
cache_dir="$HOME/none"
|
|
|
|
|
2023-11-09 16:05:42 +08:00
|
|
|
GIT_TEST_OPTS="--github-workflow-markup"
|
|
|
|
JOBS=10
|
2023-11-09 16:05:54 +08:00
|
|
|
elif test true = "$GITLAB_CI"
|
|
|
|
then
|
|
|
|
CI_TYPE=gitlab-ci
|
|
|
|
CI_BRANCH="$CI_COMMIT_REF_NAME"
|
|
|
|
CI_COMMIT="$CI_COMMIT_SHA"
|
|
|
|
case "$CI_JOB_IMAGE" in
|
|
|
|
macos-*)
|
2024-01-18 18:23:02 +08:00
|
|
|
# GitLab CI has Python installed via multiple package managers,
|
|
|
|
# most notably via asdf and Homebrew. Ensure that our builds
|
|
|
|
# pick up the Homebrew one by prepending it to our PATH as the
|
|
|
|
# asdf one breaks tests.
|
|
|
|
export PATH="$(brew --prefix)/bin:$PATH"
|
|
|
|
|
|
|
|
CI_OS_NAME=osx
|
|
|
|
;;
|
2023-11-09 16:05:54 +08:00
|
|
|
alpine:*|fedora:*|ubuntu:*)
|
|
|
|
CI_OS_NAME=linux;;
|
|
|
|
*)
|
|
|
|
echo "Could not identify OS image" >&2
|
|
|
|
env >&2
|
|
|
|
exit 1
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
CI_REPO_SLUG="$CI_PROJECT_PATH"
|
|
|
|
CI_JOB_ID="$CI_JOB_ID"
|
|
|
|
CC="${CC_PACKAGE:-${CC:-gcc}}"
|
|
|
|
DONT_SKIP_TAGS=t
|
|
|
|
handle_failed_tests () {
|
|
|
|
create_failed_test_artifacts
|
ci: make it easier to find failed tests' logs in the GitHub workflow
When investigating a test failure, the time that matters most is the
time it takes from getting aware of the failure to displaying the output
of the failing test case.
You currently have to know a lot of implementation details when
investigating test failures in the CI runs. The first step is easy: the
failed job is marked quite clearly, but when opening it, the failed step
is expanded, which in our case is the one running
`ci/run-build-and-tests.sh`. This step, most notably, only offers a
high-level view of what went wrong: it prints the output of `prove`
which merely tells the reader which test script failed.
The actually interesting part is in the detailed log of said failed
test script. But that log is shown in the CI run's step that runs
`ci/print-test-failures.sh`. And that step is _not_ expanded in the web
UI by default. It is even marked as "successful", which makes it very
easy to miss that there is useful information hidden in there.
Let's help the reader by showing the failed tests' detailed logs in the
step that is expanded automatically, i.e. directly after the test suite
failed.
This also helps the situation where the _build_ failed and the
`print-test-failures` step was executed under the assumption that the
_test suite_ failed, and consequently failed to find any failed tests.
An alternative way to implement this patch would be to source
`ci/print-test-failures.sh` in the `handle_test_failures` function to
show these logs. However, over the course of the next few commits, we
want to introduce some grouping which would be harder to achieve that
way (for example, we do want a leaner, and colored, preamble for each
failed test script, and it would be trickier to accommodate the lack of
nested groupings in GitHub workflows' output).
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-05-22 06:18:49 +08:00
|
|
|
return 1
|
|
|
|
}
|
2020-04-08 12:05:33 +08:00
|
|
|
|
|
|
|
cache_dir="$HOME/none"
|
|
|
|
|
2024-04-12 12:43:57 +08:00
|
|
|
distro=$(echo "$CI_JOB_IMAGE" | tr : -)
|
2023-11-09 16:05:54 +08:00
|
|
|
JOBS=$(nproc)
|
ci/lib.sh: encapsulate Travis-specific things
The upcoming patches will allow building git.git via Azure Pipelines
(i.e. Azure DevOps' Continuous Integration), where variable names and
URLs look a bit different than in Travis CI.
Also, the configurations of the available agents are different. For
example, Travis' and Azure Pipelines' macOS agents are set up
differently, so that on Travis, we have to install the git-lfs and
gettext Homebrew packages, and on Azure Pipelines we do not need to.
Likewise, Azure Pipelines' Ubuntu agents already have asciidoctor
installed.
Finally, on Azure Pipelines the natural way is not to base64-encode tar
files of the trash directories of failed tests, but to publish build
artifacts instead. Therefore, that code to log those base64-encoded tar
files is guarded to be Travis-specific.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-01-28 07:26:51 +08:00
|
|
|
else
|
|
|
|
echo "Could not identify CI type" >&2
|
2020-04-08 12:05:32 +08:00
|
|
|
env >&2
|
ci/lib.sh: encapsulate Travis-specific things
The upcoming patches will allow building git.git via Azure Pipelines
(i.e. Azure DevOps' Continuous Integration), where variable names and
URLs look a bit different than in Travis CI.
Also, the configurations of the available agents are different. For
example, Travis' and Azure Pipelines' macOS agents are set up
differently, so that on Travis, we have to install the git-lfs and
gettext Homebrew packages, and on Azure Pipelines we do not need to.
Likewise, Azure Pipelines' Ubuntu agents already have asciidoctor
installed.
Finally, on Azure Pipelines the natural way is not to base64-encode tar
files of the trash directories of failed tests, but to publish build
artifacts instead. Therefore, that code to log those base64-encoded tar
files is guarded to be Travis-specific.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-01-28 07:26:51 +08:00
|
|
|
exit 1
|
|
|
|
fi
|
2019-01-28 07:26:49 +08:00
|
|
|
|
2023-11-09 16:05:42 +08:00
|
|
|
MAKEFLAGS="$MAKEFLAGS --jobs=$JOBS"
|
ci: avoid running the test suite _twice_
This is a late amendment of 4a6e4b960263 (CI: remove Travis CI support,
2021-11-23), whereby the `.prove` file (being written by the `prove`
command that is used to run the test suite) is no longer retained
between CI builds: This feature was only ever used in the Travis CI
builds, we tried for a while to do the same in Azure Pipelines CI runs
(but I gave up on it after a while), and we never used that feature in
GitHub Actions (nor does the new GitLab CI code use it).
Retaining the Prove cache has been fragile from the start, even though
the idea seemed good at the time, the idea being that the `.prove` file
caches information about previous `prove` runs (`save`) and uses them
(`slow`) to run the tests in the order from longer-running to shorter
ones, making optimal use of the parallelism implied by `--jobs=<N>`.
However, using a Prove cache can cause some surprising behavior: When
the `prove` caches information about a test script it has run,
subsequent `prove` runs (with `--state=slow`) will run the same test
script again even if said script is not specified on the `prove`
command-line!
So far, this bug did not matter. Right until d8f416bbb87c (ci: run unit
tests in CI, 2023-11-09) did it not matter.
But starting with that commit, we invoke `prove` _twice_ in CI, once to
run the regular test suite of regression test scripts, and once to run
the unit tests. Due to the bug, the second invocation re-runs all of the
tests that were already run as part of the first invocation. This not
only wastes build minutes, it also frequently causes the `osx-*` jobs to
fail because they already take a long time and now are likely to run
into a timeout.
The worst part about it is that there is actually no benefit to keep
running with `--state=slow,save`, ever since we decided no longer to
try to reuse the Prove cache between CI runs.
So let's just drop that Prove option and live happily ever after.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2023-11-14 04:25:08 +08:00
|
|
|
GIT_PROVE_OPTS="--timer --jobs $JOBS"
|
2023-11-09 16:05:42 +08:00
|
|
|
|
|
|
|
GIT_TEST_OPTS="$GIT_TEST_OPTS --verbose-log -x"
|
|
|
|
case "$CI_OS_NAME" in
|
|
|
|
windows|windows_nt)
|
|
|
|
GIT_TEST_OPTS="$GIT_TEST_OPTS --no-chain-lint --no-bin-wrappers"
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
|
|
|
|
export GIT_TEST_OPTS
|
|
|
|
export GIT_PROVE_OPTS
|
|
|
|
|
2018-01-30 01:17:11 +08:00
|
|
|
good_trees_file="$cache_dir/good-trees"
|
|
|
|
|
|
|
|
mkdir -p "$cache_dir"
|
2017-12-31 18:12:04 +08:00
|
|
|
|
2020-10-08 23:29:35 +08:00
|
|
|
test -n "${DONT_SKIP_TAGS-}" ||
|
2017-09-10 22:44:29 +08:00
|
|
|
skip_branch_tip_with_tag
|
travis-ci: record and skip successfully built trees
Travis CI dutifully builds and tests each new branch tip, even if its
tree has previously been successfully built and tested. This happens
often enough in contributors' workflows, when a work-in-progress
branch is rebased changing e.g. only commit messages or the order or
number of commits while leaving the resulting code intact, and is then
pushed to a Travis CI-enabled GitHub fork.
This is wasting Travis CI's resources and is sometimes scary-annoying
when the new tip commit with a tree identical to the previous,
successfully tested one is suddenly reported in red, because one of
the OSX build jobs happened to exceed the time limit yet again.
So extend our Travis CI build scripts to skip building commits whose
trees have previously been successfully built and tested. Use the
Travis CI cache feature to keep a record of the object names of trees
that tested successfully, in a plain and simple flat text file, one
line per tree object name. Append the current tree's object name at
the end of every successful build job to this file, along with a bit
of additional info about the build job (commit object name, Travis CI
job number and id). Limit the size of this file to 1000 records, to
prevent it from growing too large for git/git's forever living
integration branches. Check, using a simple grep invocation, in each
build job whether the current commit's tree is already in there, and
skip the build if it is. Include a message in the skipped build job's
trace log, containing the URL to the build job successfully testing
that tree for the first time and instructions on how to force a
re-build. Catch the case when a build job, which successfully built
and tested a particular tree for the first time, is restarted and omit
the URL of the previous build job's trace log, as in this case it's
the same build job and the trace log has just been overwritten.
Note: this won't kick in if two identical trees are on two different
branches, because Travis CI caches are not shared between build jobs
of different branches.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Reviewed-by: Lars Schneider <larsxschneider@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-31 18:12:05 +08:00
|
|
|
skip_good_tree
|
travis-ci: fix running P4 and Git LFS tests in Linux build jobs
Linux build jobs on Travis CI skip the P4 and Git LFS tests since
commit 657343a60 (travis-ci: move Travis CI code into dedicated
scripts, 2017-09-10), claiming there are no P4 or Git LFS installed.
The reason is that P4 and Git LFS binaries are not installed to a
directory in the default $PATH, but their directories are prepended to
$PATH. This worked just fine before said commit, because $PATH was
set in a scriptlet embedded in our '.travis.yml', thus its new value
was visible during the rest of the build job. However, after these
embedded scriptlets were moved into dedicated scripts executed in
separate shell processes, any variable set in one of those scripts is
only visible in that single script but not in any of the others. In
this case, 'ci/install-dependencies.sh' downloads P4 and Git LFS and
modifies $PATH, but to no effect, because 'ci/run-tests.sh' only sees
Travis CI's default $PATH.
Move adjusting $PATH to 'ci/lib-travisci.sh', which is sourced in all
other 'ci/' scripts, so all those scripts will see the updated $PATH
value.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-11-01 19:55:35 +08:00
|
|
|
|
travis-ci: introduce a $jobname variable for 'ci/*' scripts
A couple of 'ci/*' scripts are shared between different build jobs:
'ci/lib-travisci.sh', being a common library, is sourced from almost
every script, while 'ci/install-dependencies.sh', 'ci/run-build.sh'
and 'ci/run-tests.sh' are shared between the "regular" GCC and Clang
Linux and OSX build jobs, and the latter two scripts are used in the
GETTEXT_POISON Linux build job as well.
Our builds could benefit from these shared scripts being able to
easily tell which build job they are taking part in. Now, it's
already quite easy to tell apart Linux vs OSX and GCC vs Clang build
jobs, but it gets trickier with all the additional Linux-based build
jobs included explicitly in the build matrix.
Unfortunately, Travis CI doesn't provide much help in this regard.
The closest we've got is the $TRAVIS_JOB_NUMBER variable, the value of
which is two dot-separated integers, where the second integer
indicates a particular build job. While it would be possible to use
that second number to identify the build job in our shared scripts, it
doesn't seem like a good idea to rely on that:
- Though the build job numbering sequence seems to be stable so far,
Travis CI's documentation doesn't explicitly states that it is
indeed stable and will remain so in the future. And even if it
were stable,
- if we were to remove or insert a build job in the middle, then the
job numbers of all subsequent build jobs would change accordingly.
So roll our own means of simple build job identification and introduce
the $jobname environment variable in our builds, setting it in the
environments of the explicitly included jobs in '.travis.yml', while
constructing one in 'ci/lib-travisci.sh' as the combination of the OS
and compiler name for the GCC and Clang Linux and OSX build jobs. Use
$jobname instead of $TRAVIS_OS_NAME in scripts taking different
actions based on the OS and build job (when installing P4 and Git LFS
dependencies and including them in $PATH). The following two patches
will also rely on $jobname.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-12 07:34:44 +08:00
|
|
|
if test -z "$jobname"
|
|
|
|
then
|
ci/lib.sh: encapsulate Travis-specific things
The upcoming patches will allow building git.git via Azure Pipelines
(i.e. Azure DevOps' Continuous Integration), where variable names and
URLs look a bit different than in Travis CI.
Also, the configurations of the available agents are different. For
example, Travis' and Azure Pipelines' macOS agents are set up
differently, so that on Travis, we have to install the git-lfs and
gettext Homebrew packages, and on Azure Pipelines we do not need to.
Likewise, Azure Pipelines' Ubuntu agents already have asciidoctor
installed.
Finally, on Azure Pipelines the natural way is not to base64-encode tar
files of the trash directories of failed tests, but to publish build
artifacts instead. Therefore, that code to log those base64-encoded tar
files is guarded to be Travis-specific.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-01-28 07:26:51 +08:00
|
|
|
jobname="$CI_OS_NAME-$CC"
|
travis-ci: introduce a $jobname variable for 'ci/*' scripts
A couple of 'ci/*' scripts are shared between different build jobs:
'ci/lib-travisci.sh', being a common library, is sourced from almost
every script, while 'ci/install-dependencies.sh', 'ci/run-build.sh'
and 'ci/run-tests.sh' are shared between the "regular" GCC and Clang
Linux and OSX build jobs, and the latter two scripts are used in the
GETTEXT_POISON Linux build job as well.
Our builds could benefit from these shared scripts being able to
easily tell which build job they are taking part in. Now, it's
already quite easy to tell apart Linux vs OSX and GCC vs Clang build
jobs, but it gets trickier with all the additional Linux-based build
jobs included explicitly in the build matrix.
Unfortunately, Travis CI doesn't provide much help in this regard.
The closest we've got is the $TRAVIS_JOB_NUMBER variable, the value of
which is two dot-separated integers, where the second integer
indicates a particular build job. While it would be possible to use
that second number to identify the build job in our shared scripts, it
doesn't seem like a good idea to rely on that:
- Though the build job numbering sequence seems to be stable so far,
Travis CI's documentation doesn't explicitly states that it is
indeed stable and will remain so in the future. And even if it
were stable,
- if we were to remove or insert a build job in the middle, then the
job numbers of all subsequent build jobs would change accordingly.
So roll our own means of simple build job identification and introduce
the $jobname environment variable in our builds, setting it in the
environments of the explicitly included jobs in '.travis.yml', while
constructing one in 'ci/lib-travisci.sh' as the combination of the OS
and compiler name for the GCC and Clang Linux and OSX build jobs. Use
$jobname instead of $TRAVIS_OS_NAME in scripts taking different
actions based on the OS and build job (when installing P4 and Git LFS
dependencies and including them in $PATH). The following two patches
will also rely on $jobname.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-12 07:34:44 +08:00
|
|
|
fi
|
|
|
|
|
travis-ci: move setting environment variables to 'ci/lib-travisci.sh'
Our '.travis.yml's 'env.global' section sets a bunch of environment
variables for all build jobs, though none of them actually affects all
build jobs. It's convenient for us, and in most cases it works just
fine, because irrelevant environment variables are simply ignored.
However, $GIT_SKIP_TESTS is an exception: it tells the test harness to
skip the two test scripts that are prone to occasional failures on
OSX, but as it's set for all build jobs those tests are not run in any
of the build jobs that are capable to run them reliably, either.
Therefore $GIT_SKIP_TESTS should only be set in the OSX build jobs,
but those build jobs are included in the build matrix implicitly (i.e.
by combining the matrix keys 'os' and 'compiler'), and there is no way
to set an environment variable only for a subset of those implicit
build jobs. (Unless we were to add new scriptlets to '.travis.yml',
which is exactly the opposite direction that we took with commit
657343a60 (travis-ci: move Travis CI code into dedicated scripts,
2017-09-10)).
So move setting $GIT_SKIP_TESTS to 'ci/lib-travisci.sh', where it can
trivially be set only for the OSX build jobs.
Furthermore, move setting all other environment variables from
'.travis.yml' to 'ci/lib-travisci.sh', too, because a couple of
environment variables are already set there, and this way all
environment variables will be set in the same place. All the logic
controlling our builds is already in the 'ci/*' scripts anyway, so
there is really no good reason to keep the environment variables
separately.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-12 07:34:45 +08:00
|
|
|
export DEVELOPER=1
|
|
|
|
export DEFAULT_TEST_TARGET=prove
|
2019-11-22 21:14:37 +08:00
|
|
|
export GIT_TEST_CLONE_2GB=true
|
ci: stop linking built-ins to the dashed versions
Since e4597aae6590 (run test suite without dashed git-commands in PATH,
2009-12-02), we stopped running our tests with `git-foo` binaries found
at the top-level directory of a freshly built source tree; instead we
have placed only `git` and selected `git-foo` commands that must be on
`$PATH` in `bin-wrappers/` and prepended that `bin-wrappers/` to the
`PATH` used in the test suite. We did that to catch the tests and
scripted Git commands that still try to use the dashed form.
Since CI jobs will not install the built Git to anywhere, and the
hardlinks we make at the top-level of the source tree for `git-add` and
friends are not even used during tests, they are pure waste of resources
these days.
Thanks to the newly invented `SKIP_DASHED_BUILT_INS` knob, we can now
skip creating these links in the source tree. So let's do that.
Note that this change introduces a subtle change of behavior: when Git's
`cmd_main()` calls `setup_path()`, it inserts the value of
`GIT_EXEC_PATH` (defaulting to `<prefix>/libexec/git-core`) at the
beginning of the environment variable `PATH`. This is necessary to find
e.g. scripted commands that are installed in that location. For the
purposes of Git's test suite, the `bin-wrappers/` scripts override
`GIT_EXEC_PATH` to point to the top-level directory of the source code.
In other words, if a scripted command had used a dashed invocation of a
built-in Git command, it would not have been caught previously, which is
fixed by this change.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-09-22 06:28:17 +08:00
|
|
|
export SKIP_DASHED_BUILT_INS=YesPlease
|
travis-ci: move setting environment variables to 'ci/lib-travisci.sh'
Our '.travis.yml's 'env.global' section sets a bunch of environment
variables for all build jobs, though none of them actually affects all
build jobs. It's convenient for us, and in most cases it works just
fine, because irrelevant environment variables are simply ignored.
However, $GIT_SKIP_TESTS is an exception: it tells the test harness to
skip the two test scripts that are prone to occasional failures on
OSX, but as it's set for all build jobs those tests are not run in any
of the build jobs that are capable to run them reliably, either.
Therefore $GIT_SKIP_TESTS should only be set in the OSX build jobs,
but those build jobs are included in the build matrix implicitly (i.e.
by combining the matrix keys 'os' and 'compiler'), and there is no way
to set an environment variable only for a subset of those implicit
build jobs. (Unless we were to add new scriptlets to '.travis.yml',
which is exactly the opposite direction that we took with commit
657343a60 (travis-ci: move Travis CI code into dedicated scripts,
2017-09-10)).
So move setting $GIT_SKIP_TESTS to 'ci/lib-travisci.sh', where it can
trivially be set only for the OSX build jobs.
Furthermore, move setting all other environment variables from
'.travis.yml' to 'ci/lib-travisci.sh', too, because a couple of
environment variables are already set there, and this way all
environment variables will be set in the same place. All the logic
controlling our builds is already in the 'ci/*' scripts anyway, so
there is really no good reason to keep the environment variables
separately.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-12 07:34:45 +08:00
|
|
|
|
2024-04-12 12:43:57 +08:00
|
|
|
case "$distro" in
|
2022-11-25 17:59:51 +08:00
|
|
|
ubuntu-*)
|
2021-11-24 00:29:11 +08:00
|
|
|
if test "$jobname" = "linux-gcc-default"
|
|
|
|
then
|
|
|
|
break
|
|
|
|
fi
|
|
|
|
|
ci: fix Python dependency on Ubuntu 24.04
Newer versions of Ubuntu have dropped Python 2 starting with Ubuntu
23.04. By default though, our CI setups will try to use that Python
version on all Ubuntu-based jobs except for the "linux-gcc" one.
We didn't notice this issue due to two reasons:
- The "ubuntu:latest" tag always points to the latest LTS release.
Until a few weeks ago this was Ubuntu 22.04, which still had Python
2.
- Our Docker-based CI jobs had their own script to install
dependencies until 9cdeb34b96 (ci: merge scripts which install
dependencies, 2024-04-12), where we didn't even try to install
Python at all for many of them.
Since the CI refactorings have originally been implemented, Ubuntu
24.04 was released, and it being an LTS versions means that the "latest"
tag now points to that Python-2-less version. Consequently, those jobs
that use "ubuntu:latest" broke.
Address this by using Python 2 on Ubuntu 20.04, only, whereas we use
Python 3 on all other Ubuntu jobs. Eventually, we should think about
dropping support for Python 2 completely.
Reported-by: Justin Tobler <jltobler@gmail.com>
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2024-05-06 13:35:17 +08:00
|
|
|
# Python 2 is end of life, and Ubuntu 23.04 and newer don't actually
|
|
|
|
# have it anymore. We thus only test with Python 2 on older LTS
|
|
|
|
# releases.
|
|
|
|
if "$distro" = "ubuntu-20.04"
|
travis-ci: build with the right compiler
Our 'Makefile' hardcodes the compiler to build Git as 'CC = cc'. This
CC variable can be overridden from the command line, i.e. 'make
CC=gcc-X.Y' will build with that particular GCC version, but not from
the environment, i.e. 'CC=gcc-X.Y make' will still build with whatever
'cc' happens to be on the platform.
Our build jobs on Travis CI are badly affected by this. In the build
matrix we have dedicated build jobs to build Git with GCC and Clang
both on Linux and macOS from the very beginning (522354d70f (Add
Travis CI support, 2015-11-27)). Alas, this never really worked as
supposed to, because Travis CI specifies the compiler for those build
jobs as 'export CC=gcc' and 'export CC=clang' (which works fine for
projects built with './configure && make'). Consequently, our
'linux-clang' build job has always used GCC, because that's where 'cc'
points at in Travis CI's Linux images, while the 'osx-gcc' build job
has always used Clang. Furthermore, 37fa4b3c78 (travis-ci: run gcc-8
on linux-gcc jobs, 2018-05-19) added an 'export CC=gcc-8' in an
attempt to build with a more modern compiler, but to no avail.
Set MAKEFLAGS with CC based on the $CC environment variable, so 'make'
will run the "right" compiler. The Xcode 10.1 macOS image on Travis
CI already contains the gcc@8 package from Homebrew, but we have to
'brew link' it first to be able to use it.
So with this patch our build jobs will build Git with the following
compiler versions:
linux-clang: clang version 5.0.0 (tags/RELEASE_500/final)
linux-gcc: gcc-8 (Ubuntu 8.1.0-5ubuntu1~14.04) 8.1.0
osx-clang: Apple LLVM version 10.0.0 (clang-1000.11.45.5)
osx-gcc: gcc-8 (Homebrew GCC 8.2.0) 8.2.0
GETTEXT_POISON: gcc (Ubuntu 4.8.4-2ubuntu1~14.04.3) 4.8.4
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-01-17 09:29:13 +08:00
|
|
|
then
|
ci: fix Python dependency on Ubuntu 24.04
Newer versions of Ubuntu have dropped Python 2 starting with Ubuntu
23.04. By default though, our CI setups will try to use that Python
version on all Ubuntu-based jobs except for the "linux-gcc" one.
We didn't notice this issue due to two reasons:
- The "ubuntu:latest" tag always points to the latest LTS release.
Until a few weeks ago this was Ubuntu 22.04, which still had Python
2.
- Our Docker-based CI jobs had their own script to install
dependencies until 9cdeb34b96 (ci: merge scripts which install
dependencies, 2024-04-12), where we didn't even try to install
Python at all for many of them.
Since the CI refactorings have originally been implemented, Ubuntu
24.04 was released, and it being an LTS versions means that the "latest"
tag now points to that Python-2-less version. Consequently, those jobs
that use "ubuntu:latest" broke.
Address this by using Python 2 on Ubuntu 20.04, only, whereas we use
Python 3 on all other Ubuntu jobs. Eventually, we should think about
dropping support for Python 2 completely.
Reported-by: Justin Tobler <jltobler@gmail.com>
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2024-05-06 13:35:17 +08:00
|
|
|
PYTHON_PACKAGE=python2
|
|
|
|
else
|
2022-11-25 17:59:54 +08:00
|
|
|
PYTHON_PACKAGE=python3
|
travis-ci: build with the right compiler
Our 'Makefile' hardcodes the compiler to build Git as 'CC = cc'. This
CC variable can be overridden from the command line, i.e. 'make
CC=gcc-X.Y' will build with that particular GCC version, but not from
the environment, i.e. 'CC=gcc-X.Y make' will still build with whatever
'cc' happens to be on the platform.
Our build jobs on Travis CI are badly affected by this. In the build
matrix we have dedicated build jobs to build Git with GCC and Clang
both on Linux and macOS from the very beginning (522354d70f (Add
Travis CI support, 2015-11-27)). Alas, this never really worked as
supposed to, because Travis CI specifies the compiler for those build
jobs as 'export CC=gcc' and 'export CC=clang' (which works fine for
projects built with './configure && make'). Consequently, our
'linux-clang' build job has always used GCC, because that's where 'cc'
points at in Travis CI's Linux images, while the 'osx-gcc' build job
has always used Clang. Furthermore, 37fa4b3c78 (travis-ci: run gcc-8
on linux-gcc jobs, 2018-05-19) added an 'export CC=gcc-8' in an
attempt to build with a more modern compiler, but to no avail.
Set MAKEFLAGS with CC based on the $CC environment variable, so 'make'
will run the "right" compiler. The Xcode 10.1 macOS image on Travis
CI already contains the gcc@8 package from Homebrew, but we have to
'brew link' it first to be able to use it.
So with this patch our build jobs will build Git with the following
compiler versions:
linux-clang: clang version 5.0.0 (tags/RELEASE_500/final)
linux-gcc: gcc-8 (Ubuntu 8.1.0-5ubuntu1~14.04) 8.1.0
osx-clang: Apple LLVM version 10.0.0 (clang-1000.11.45.5)
osx-gcc: gcc-8 (Homebrew GCC 8.2.0) 8.2.0
GETTEXT_POISON: gcc (Ubuntu 4.8.4-2ubuntu1~14.04.3) 4.8.4
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-01-17 09:29:13 +08:00
|
|
|
fi
|
2022-11-25 17:59:54 +08:00
|
|
|
MAKEFLAGS="$MAKEFLAGS PYTHON_PATH=/usr/bin/$PYTHON_PACKAGE"
|
travis-ci: build with the right compiler
Our 'Makefile' hardcodes the compiler to build Git as 'CC = cc'. This
CC variable can be overridden from the command line, i.e. 'make
CC=gcc-X.Y' will build with that particular GCC version, but not from
the environment, i.e. 'CC=gcc-X.Y make' will still build with whatever
'cc' happens to be on the platform.
Our build jobs on Travis CI are badly affected by this. In the build
matrix we have dedicated build jobs to build Git with GCC and Clang
both on Linux and macOS from the very beginning (522354d70f (Add
Travis CI support, 2015-11-27)). Alas, this never really worked as
supposed to, because Travis CI specifies the compiler for those build
jobs as 'export CC=gcc' and 'export CC=clang' (which works fine for
projects built with './configure && make'). Consequently, our
'linux-clang' build job has always used GCC, because that's where 'cc'
points at in Travis CI's Linux images, while the 'osx-gcc' build job
has always used Clang. Furthermore, 37fa4b3c78 (travis-ci: run gcc-8
on linux-gcc jobs, 2018-05-19) added an 'export CC=gcc-8' in an
attempt to build with a more modern compiler, but to no avail.
Set MAKEFLAGS with CC based on the $CC environment variable, so 'make'
will run the "right" compiler. The Xcode 10.1 macOS image on Travis
CI already contains the gcc@8 package from Homebrew, but we have to
'brew link' it first to be able to use it.
So with this patch our build jobs will build Git with the following
compiler versions:
linux-clang: clang version 5.0.0 (tags/RELEASE_500/final)
linux-gcc: gcc-8 (Ubuntu 8.1.0-5ubuntu1~14.04) 8.1.0
osx-clang: Apple LLVM version 10.0.0 (clang-1000.11.45.5)
osx-gcc: gcc-8 (Homebrew GCC 8.2.0) 8.2.0
GETTEXT_POISON: gcc (Ubuntu 4.8.4-2ubuntu1~14.04.3) 4.8.4
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-01-17 09:29:13 +08:00
|
|
|
|
2019-09-06 20:13:26 +08:00
|
|
|
export GIT_TEST_HTTPD=true
|
travis-ci: set GIT_TEST_HTTPD in 'ci/lib-travisci.sh'
Commit 657343a60 (travis-ci: move Travis CI code into dedicated
scripts, 2017-09-10) converted '.travis.yml's default 'before_install'
scriptlet to the 'ci/install-dependencies.sh' script, and while doing
so moved setting GIT_TEST_HTTPD=YesPlease for the 64-bit GCC and Clang
Linux build jobs to that script. This is wrong for two reasons:
- The purpose of that script is, as its name suggests, to install
dependencies, not to set any environment variables influencing
which tests should be run (though, arguably, this was already an
issue with the original 'before_install' scriptlet).
- Setting the variable has no effect anymore, because that script is
run in a separate shell process, and the variable won't be visible
in any of the other scripts, notably in 'ci/run-tests.sh'
responsible for, well, running the tests.
Luckily, this didn't have a negative effect on our Travis CI build
jobs, because GIT_TEST_HTTPD is a tri-state variable defaulting to
"auto" and a functioning web server was installed in those Linux build
jobs, so the httpd tests were run anyway.
Apparently the httpd tests run just fine without GIT_TEST_HTTPD being
set, therefore we could simply remove this environment variable.
However, if a bug were to creep in to change the Travis CI build
environment to run the tests as root or to not install Apache, then
the httpd tests would be skipped and the build job would still
succeed. We would only notice if someone actually were to look
through the build job's trace log; but who would look at the trace log
of a successful build job?!
Since httpd tests are important, we do want to run them and we want to
be loudly reminded if they can't be run. Therefore, move setting
GIT_TEST_HTTPD=YesPlease for the 64-bit GCC and Clang Linux build jobs
to 'ci/lib-travisci.sh' to ensure that the build job fails when the
httpd tests can't be run. (We could set it in 'ci/run-tests.sh' just
as well, but it's better to keep all environment variables in one
place in 'ci/lib-travisci.sh'.)
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-12 07:34:46 +08:00
|
|
|
|
travis-ci: move setting environment variables to 'ci/lib-travisci.sh'
Our '.travis.yml's 'env.global' section sets a bunch of environment
variables for all build jobs, though none of them actually affects all
build jobs. It's convenient for us, and in most cases it works just
fine, because irrelevant environment variables are simply ignored.
However, $GIT_SKIP_TESTS is an exception: it tells the test harness to
skip the two test scripts that are prone to occasional failures on
OSX, but as it's set for all build jobs those tests are not run in any
of the build jobs that are capable to run them reliably, either.
Therefore $GIT_SKIP_TESTS should only be set in the OSX build jobs,
but those build jobs are included in the build matrix implicitly (i.e.
by combining the matrix keys 'os' and 'compiler'), and there is no way
to set an environment variable only for a subset of those implicit
build jobs. (Unless we were to add new scriptlets to '.travis.yml',
which is exactly the opposite direction that we took with commit
657343a60 (travis-ci: move Travis CI code into dedicated scripts,
2017-09-10)).
So move setting $GIT_SKIP_TESTS to 'ci/lib-travisci.sh', where it can
trivially be set only for the OSX build jobs.
Furthermore, move setting all other environment variables from
'.travis.yml' to 'ci/lib-travisci.sh', too, because a couple of
environment variables are already set there, and this way all
environment variables will be set in the same place. All the logic
controlling our builds is already in the 'ci/*' scripts anyway, so
there is really no good reason to keep the environment variables
separately.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-12 07:34:45 +08:00
|
|
|
# The Linux build installs the defined dependency versions below.
|
2019-07-07 00:21:14 +08:00
|
|
|
# The OS X build installs much more recent versions, whichever
|
|
|
|
# were recorded in the Homebrew database upon creating the OS X
|
|
|
|
# image.
|
|
|
|
# Keep that in mind when you encounter a broken OS X build!
|
travis-ci: move setting environment variables to 'ci/lib-travisci.sh'
Our '.travis.yml's 'env.global' section sets a bunch of environment
variables for all build jobs, though none of them actually affects all
build jobs. It's convenient for us, and in most cases it works just
fine, because irrelevant environment variables are simply ignored.
However, $GIT_SKIP_TESTS is an exception: it tells the test harness to
skip the two test scripts that are prone to occasional failures on
OSX, but as it's set for all build jobs those tests are not run in any
of the build jobs that are capable to run them reliably, either.
Therefore $GIT_SKIP_TESTS should only be set in the OSX build jobs,
but those build jobs are included in the build matrix implicitly (i.e.
by combining the matrix keys 'os' and 'compiler'), and there is no way
to set an environment variable only for a subset of those implicit
build jobs. (Unless we were to add new scriptlets to '.travis.yml',
which is exactly the opposite direction that we took with commit
657343a60 (travis-ci: move Travis CI code into dedicated scripts,
2017-09-10)).
So move setting $GIT_SKIP_TESTS to 'ci/lib-travisci.sh', where it can
trivially be set only for the OSX build jobs.
Furthermore, move setting all other environment variables from
'.travis.yml' to 'ci/lib-travisci.sh', too, because a couple of
environment variables are already set there, and this way all
environment variables will be set in the same place. All the logic
controlling our builds is already in the 'ci/*' scripts anyway, so
there is really no good reason to keep the environment variables
separately.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-12 07:34:45 +08:00
|
|
|
export LINUX_GIT_LFS_VERSION="1.5.2"
|
travis-ci: fix running P4 and Git LFS tests in Linux build jobs
Linux build jobs on Travis CI skip the P4 and Git LFS tests since
commit 657343a60 (travis-ci: move Travis CI code into dedicated
scripts, 2017-09-10), claiming there are no P4 or Git LFS installed.
The reason is that P4 and Git LFS binaries are not installed to a
directory in the default $PATH, but their directories are prepended to
$PATH. This worked just fine before said commit, because $PATH was
set in a scriptlet embedded in our '.travis.yml', thus its new value
was visible during the rest of the build job. However, after these
embedded scriptlets were moved into dedicated scripts executed in
separate shell processes, any variable set in one of those scripts is
only visible in that single script but not in any of the others. In
this case, 'ci/install-dependencies.sh' downloads P4 and Git LFS and
modifies $PATH, but to no effect, because 'ci/run-tests.sh' only sees
Travis CI's default $PATH.
Move adjusting $PATH to 'ci/lib-travisci.sh', which is sourced in all
other 'ci/' scripts, so all those scripts will see the updated $PATH
value.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-11-01 19:55:35 +08:00
|
|
|
;;
|
2022-12-07 03:57:56 +08:00
|
|
|
macos-*)
|
ci: upgrade to using macos-13
In April, GitHub announced that the `macos-13` pool is available:
https://github.blog/changelog/2023-04-24-github-actions-macos-13-is-now-available/.
It is only a matter of time until the `macos-12` pool is going away,
therefore we should switch now, without pressure of a looming deadline.
Since the `macos-13` runners no longer include Python2, we also drop
specifically testing with Python2 and switch uniformly to Python3, see
https://github.com/actions/runner-images/blob/HEAD/images/macos/macos-13-Readme.md
for details about the software available on the `macos-13` pool's
runners.
Also, on macOS 13, Homebrew seems to install a `gcc@9` package that no
longer comes with a regular `unistd.h` (there seems only to be a
`ssp/unistd.h`), and hence builds would fail with:
In file included from base85.c:1:
git-compat-util.h:223:10: fatal error: unistd.h: No such file or directory
223 | #include <unistd.h>
| ^~~~~~~~~~
compilation terminated.
The reason why we install GCC v9.x explicitly is historical, and back in
the days it was because it was the _newest_ version available via
Homebrew: 176441bfb58 (ci: build Git with GCC 9 in the 'osx-gcc' build
job, 2019-11-27).
To reinstate the spirit of that commit _and_ to fix that build failure,
let's switch to the now-newest GCC version: v13.x.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2023-11-03 15:27:35 +08:00
|
|
|
MAKEFLAGS="$MAKEFLAGS PYTHON_PATH=$(which python3)"
|
|
|
|
if [ "$jobname" != osx-gcc ]
|
travis-ci: build with the right compiler
Our 'Makefile' hardcodes the compiler to build Git as 'CC = cc'. This
CC variable can be overridden from the command line, i.e. 'make
CC=gcc-X.Y' will build with that particular GCC version, but not from
the environment, i.e. 'CC=gcc-X.Y make' will still build with whatever
'cc' happens to be on the platform.
Our build jobs on Travis CI are badly affected by this. In the build
matrix we have dedicated build jobs to build Git with GCC and Clang
both on Linux and macOS from the very beginning (522354d70f (Add
Travis CI support, 2015-11-27)). Alas, this never really worked as
supposed to, because Travis CI specifies the compiler for those build
jobs as 'export CC=gcc' and 'export CC=clang' (which works fine for
projects built with './configure && make'). Consequently, our
'linux-clang' build job has always used GCC, because that's where 'cc'
points at in Travis CI's Linux images, while the 'osx-gcc' build job
has always used Clang. Furthermore, 37fa4b3c78 (travis-ci: run gcc-8
on linux-gcc jobs, 2018-05-19) added an 'export CC=gcc-8' in an
attempt to build with a more modern compiler, but to no avail.
Set MAKEFLAGS with CC based on the $CC environment variable, so 'make'
will run the "right" compiler. The Xcode 10.1 macOS image on Travis
CI already contains the gcc@8 package from Homebrew, but we have to
'brew link' it first to be able to use it.
So with this patch our build jobs will build Git with the following
compiler versions:
linux-clang: clang version 5.0.0 (tags/RELEASE_500/final)
linux-gcc: gcc-8 (Ubuntu 8.1.0-5ubuntu1~14.04) 8.1.0
osx-clang: Apple LLVM version 10.0.0 (clang-1000.11.45.5)
osx-gcc: gcc-8 (Homebrew GCC 8.2.0) 8.2.0
GETTEXT_POISON: gcc (Ubuntu 4.8.4-2ubuntu1~14.04.3) 4.8.4
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-01-17 09:29:13 +08:00
|
|
|
then
|
2022-12-15 16:43:05 +08:00
|
|
|
MAKEFLAGS="$MAKEFLAGS APPLE_COMMON_CRYPTO_SHA1=Yes"
|
travis-ci: build with the right compiler
Our 'Makefile' hardcodes the compiler to build Git as 'CC = cc'. This
CC variable can be overridden from the command line, i.e. 'make
CC=gcc-X.Y' will build with that particular GCC version, but not from
the environment, i.e. 'CC=gcc-X.Y make' will still build with whatever
'cc' happens to be on the platform.
Our build jobs on Travis CI are badly affected by this. In the build
matrix we have dedicated build jobs to build Git with GCC and Clang
both on Linux and macOS from the very beginning (522354d70f (Add
Travis CI support, 2015-11-27)). Alas, this never really worked as
supposed to, because Travis CI specifies the compiler for those build
jobs as 'export CC=gcc' and 'export CC=clang' (which works fine for
projects built with './configure && make'). Consequently, our
'linux-clang' build job has always used GCC, because that's where 'cc'
points at in Travis CI's Linux images, while the 'osx-gcc' build job
has always used Clang. Furthermore, 37fa4b3c78 (travis-ci: run gcc-8
on linux-gcc jobs, 2018-05-19) added an 'export CC=gcc-8' in an
attempt to build with a more modern compiler, but to no avail.
Set MAKEFLAGS with CC based on the $CC environment variable, so 'make'
will run the "right" compiler. The Xcode 10.1 macOS image on Travis
CI already contains the gcc@8 package from Homebrew, but we have to
'brew link' it first to be able to use it.
So with this patch our build jobs will build Git with the following
compiler versions:
linux-clang: clang version 5.0.0 (tags/RELEASE_500/final)
linux-gcc: gcc-8 (Ubuntu 8.1.0-5ubuntu1~14.04) 8.1.0
osx-clang: Apple LLVM version 10.0.0 (clang-1000.11.45.5)
osx-gcc: gcc-8 (Homebrew GCC 8.2.0) 8.2.0
GETTEXT_POISON: gcc (Ubuntu 4.8.4-2ubuntu1~14.04.3) 4.8.4
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-01-17 09:29:13 +08:00
|
|
|
fi
|
travis-ci: move setting environment variables to 'ci/lib-travisci.sh'
Our '.travis.yml's 'env.global' section sets a bunch of environment
variables for all build jobs, though none of them actually affects all
build jobs. It's convenient for us, and in most cases it works just
fine, because irrelevant environment variables are simply ignored.
However, $GIT_SKIP_TESTS is an exception: it tells the test harness to
skip the two test scripts that are prone to occasional failures on
OSX, but as it's set for all build jobs those tests are not run in any
of the build jobs that are capable to run them reliably, either.
Therefore $GIT_SKIP_TESTS should only be set in the OSX build jobs,
but those build jobs are included in the build matrix implicitly (i.e.
by combining the matrix keys 'os' and 'compiler'), and there is no way
to set an environment variable only for a subset of those implicit
build jobs. (Unless we were to add new scriptlets to '.travis.yml',
which is exactly the opposite direction that we took with commit
657343a60 (travis-ci: move Travis CI code into dedicated scripts,
2017-09-10)).
So move setting $GIT_SKIP_TESTS to 'ci/lib-travisci.sh', where it can
trivially be set only for the OSX build jobs.
Furthermore, move setting all other environment variables from
'.travis.yml' to 'ci/lib-travisci.sh', too, because a couple of
environment variables are already set there, and this way all
environment variables will be set in the same place. All the logic
controlling our builds is already in the 'ci/*' scripts anyway, so
there is really no good reason to keep the environment variables
separately.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-12 07:34:45 +08:00
|
|
|
;;
|
2021-11-24 00:29:11 +08:00
|
|
|
esac
|
|
|
|
|
2024-04-12 12:44:27 +08:00
|
|
|
CUSTOM_PATH="${CUSTOM_PATH:-$HOME/path}"
|
2024-04-12 12:44:22 +08:00
|
|
|
export PATH="$CUSTOM_PATH:$PATH"
|
|
|
|
|
2021-11-24 00:29:11 +08:00
|
|
|
case "$jobname" in
|
2021-11-24 00:29:10 +08:00
|
|
|
linux32)
|
ci: make MAKEFLAGS available inside the Docker container in the Linux32 job
Once upon a time we ran 'make --jobs=2 ...' to build Git, its
documentation, or to apply Coccinelle semantic patches. Then commit
eaa62291ff (ci: inherit --jobs via MAKEFLAGS in run-build-and-tests,
2019-01-27) came along, and started using the MAKEFLAGS environment
variable to centralize setting the number of parallel jobs in
'ci/libs.sh'. Alas, it forgot to update 'ci/run-linux32-docker.sh' to
make MAKEFLAGS available inside the Docker container running the 32
bit Linux job, and, consequently, since then that job builds Git
sequentially, and it ignores any Makefile knobs that we might set in
MAKEFLAGS (though we don't set any for the 32 bit Linux job at the
moment).
So update the 'docker run' invocation in 'ci/run-linux32-docker.sh' to
make MAKEFLAGS available inside the Docker container as well. Set
CC=gcc for the 32 bit Linux job, because that's the compiler installed
in the 32 bit Linux Docker image that we use (Travis CI nowadays sets
CC=clang by default, but clang is not installed in this image).
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Đoàn Trần Công Danh <congdanhqx@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-02 21:04:00 +08:00
|
|
|
CC=gcc
|
|
|
|
;;
|
2020-04-04 09:08:50 +08:00
|
|
|
linux-musl)
|
|
|
|
CC=gcc
|
|
|
|
MAKEFLAGS="$MAKEFLAGS PYTHON_PATH=/usr/bin/python3 USE_LIBPCRE2=Yes"
|
|
|
|
MAKEFLAGS="$MAKEFLAGS NO_REGEX=Yes ICONV_OMITS_BOM=Yes"
|
t: use user-specified utf-8 locale for testing svn
In some test-cases, UTF-8 locale is required. To find such locale,
we're using the first available UTF-8 locale that returned by
"locale -a".
However, the locale(1) utility is unavailable on some systems,
e.g. Linux with musl libc.
However, without "locale -a", we can't guess provided UTF-8 locale.
Add a Makefile knob GIT_TEST_UTF8_LOCALE and activate it for
linux-musl in our CI system.
Rename t/lib-git-svn.sh:prepare_a_utf8_locale to prepare_utf8_locale,
since we no longer prepare the variable named "a_utf8_locale",
but set up a fallback value for GIT_TEST_UTF8_LOCALE instead.
The fallback will be LC_ALL, LANG environment variable,
or the first UTF-8 locale from output of "locale -a", in that order.
Signed-off-by: Đoàn Trần Công Danh <congdanhqx@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-06-08 14:56:28 +08:00
|
|
|
MAKEFLAGS="$MAKEFLAGS GIT_TEST_UTF8_LOCALE=C.UTF-8"
|
2020-04-04 09:08:50 +08:00
|
|
|
;;
|
2024-02-07 15:20:35 +08:00
|
|
|
linux-leaks|linux-reftable-leaks)
|
tests: add a test mode for SANITIZE=leak, run it in CI
While git can be compiled with SANITIZE=leak, we have not run
regression tests under that mode. Memory leaks have only been fixed as
one-offs without structured regression testing.
This change adds CI testing for it. We'll now build and small set of
whitelisted t00*.sh tests under Linux with a new job called
"linux-leaks".
The CI target uses a new GIT_TEST_PASSING_SANITIZE_LEAK=true test
mode. When running in that mode, we'll assert that we were compiled
with SANITIZE=leak. We'll then skip all tests, except those that we've
opted-in by setting "TEST_PASSES_SANITIZE_LEAK=true".
A test setting "TEST_PASSES_SANITIZE_LEAK=true" setting can in turn
make use of the "SANITIZE_LEAK" prerequisite, should they wish to
selectively skip tests even under
"GIT_TEST_PASSING_SANITIZE_LEAK=true". In the preceding commit we
started doing this in "t0004-unwritable.sh" under SANITIZE=leak, now
it'll combine nicely with "GIT_TEST_PASSING_SANITIZE_LEAK=true".
This is how tests that don't set "TEST_PASSES_SANITIZE_LEAK=true" will
be skipped under GIT_TEST_PASSING_SANITIZE_LEAK=true:
$ GIT_TEST_PASSING_SANITIZE_LEAK=true ./t0001-init.sh
1..0 # SKIP skip all tests in t0001 under SANITIZE=leak, TEST_PASSES_SANITIZE_LEAK not set
The intent is to add more TEST_PASSES_SANITIZE_LEAK=true annotations
as follow-up change, but let's start small to begin with.
In ci/run-build-and-tests.sh we make use of the default "*" case to
run "make test" without any GIT_TEST_* modes. SANITIZE=leak is known
to fail in combination with GIT_TEST_SPLIT_INDEX=true in
t0016-oidmap.sh, and we're likely to have other such failures in
various GIT_TEST_* modes. Let's focus on getting the base tests
passing, we can expand coverage to GIT_TEST_* modes later.
It would also be possible to implement a more lightweight version of
this by only relying on setting "LSAN_OPTIONS". See
<YS9OT/pn5rRK9cGB@coredump.intra.peff.net>[1] and
<YS9ZIDpANfsh7N+S@coredump.intra.peff.net>[2] for a discussion of
that. I've opted for this approach of adding a GIT_TEST_* mode instead
because it's consistent with how we handle other special test modes.
Being able to add a "!SANITIZE_LEAK" prerequisite and calling
"test_done" early if it isn't satisfied also means that we can more
incrementally add regression tests without being forced to fix
widespread and hard-to-fix leaks at the same time.
We have tests that do simple checking of some tool we're interested
in, but later on in the script might be stressing trace2, or common
sources of leaks like "git log" in combination with the tool (e.g. the
commit-graph tests). To be clear having a prerequisite could also be
accomplished by using "LSAN_OPTIONS" directly.
On the topic of "LSAN_OPTIONS": It would be nice to have a mode to
aggregate all failures in our various scripts, see [2] for a start at
doing that which sets "log_path" in "LSAN_OPTIONS". I've punted on
that for now, it can be added later.
As of writing this we've got major regressions between master..seen,
i.e. the t000*.sh tests and more fixed since 31f9acf9ce2 (Merge branch
'ah/plugleaks', 2021-08-04) have regressed recently.
See the discussion at <87czsv2idy.fsf@evledraar.gmail.com>[3] about
the lack of this sort of test mode, and 0e5bba53af (add UNLEAK
annotation for reducing leak false positives, 2017-09-08) for the
initial addition of SANITIZE=leak.
See also 09595ab381 (Merge branch 'jk/leak-checkers', 2017-09-19),
7782066f67 (Merge branch 'jk/apache-lsan', 2019-05-19) and the recent
936e58851a (Merge branch 'ah/plugleaks', 2021-05-07) for some of the
past history of "one-off" SANITIZE=leak (and more) fixes.
As noted in [5] we can't support this on OSX yet until Clang 14 is
released, at that point we'll probably want to resurrect that
"osx-leaks" job.
1. https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer
2. https://lore.kernel.org/git/YS9OT%2Fpn5rRK9cGB@coredump.intra.peff.net/
3. https://lore.kernel.org/git/87czsv2idy.fsf@evledraar.gmail.com/
4. https://lore.kernel.org/git/YS9ZIDpANfsh7N+S@coredump.intra.peff.net/
5. https://lore.kernel.org/git/20210916035603.76369-1-carenas@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Carlo Marcelo Arenas Belón <carenas@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-09-23 17:20:46 +08:00
|
|
|
export SANITIZE=leak
|
|
|
|
export GIT_TEST_PASSING_SANITIZE_LEAK=true
|
2022-07-28 07:13:43 +08:00
|
|
|
export GIT_TEST_SANITIZE_LEAK_LOG=true
|
tests: add a test mode for SANITIZE=leak, run it in CI
While git can be compiled with SANITIZE=leak, we have not run
regression tests under that mode. Memory leaks have only been fixed as
one-offs without structured regression testing.
This change adds CI testing for it. We'll now build and small set of
whitelisted t00*.sh tests under Linux with a new job called
"linux-leaks".
The CI target uses a new GIT_TEST_PASSING_SANITIZE_LEAK=true test
mode. When running in that mode, we'll assert that we were compiled
with SANITIZE=leak. We'll then skip all tests, except those that we've
opted-in by setting "TEST_PASSES_SANITIZE_LEAK=true".
A test setting "TEST_PASSES_SANITIZE_LEAK=true" setting can in turn
make use of the "SANITIZE_LEAK" prerequisite, should they wish to
selectively skip tests even under
"GIT_TEST_PASSING_SANITIZE_LEAK=true". In the preceding commit we
started doing this in "t0004-unwritable.sh" under SANITIZE=leak, now
it'll combine nicely with "GIT_TEST_PASSING_SANITIZE_LEAK=true".
This is how tests that don't set "TEST_PASSES_SANITIZE_LEAK=true" will
be skipped under GIT_TEST_PASSING_SANITIZE_LEAK=true:
$ GIT_TEST_PASSING_SANITIZE_LEAK=true ./t0001-init.sh
1..0 # SKIP skip all tests in t0001 under SANITIZE=leak, TEST_PASSES_SANITIZE_LEAK not set
The intent is to add more TEST_PASSES_SANITIZE_LEAK=true annotations
as follow-up change, but let's start small to begin with.
In ci/run-build-and-tests.sh we make use of the default "*" case to
run "make test" without any GIT_TEST_* modes. SANITIZE=leak is known
to fail in combination with GIT_TEST_SPLIT_INDEX=true in
t0016-oidmap.sh, and we're likely to have other such failures in
various GIT_TEST_* modes. Let's focus on getting the base tests
passing, we can expand coverage to GIT_TEST_* modes later.
It would also be possible to implement a more lightweight version of
this by only relying on setting "LSAN_OPTIONS". See
<YS9OT/pn5rRK9cGB@coredump.intra.peff.net>[1] and
<YS9ZIDpANfsh7N+S@coredump.intra.peff.net>[2] for a discussion of
that. I've opted for this approach of adding a GIT_TEST_* mode instead
because it's consistent with how we handle other special test modes.
Being able to add a "!SANITIZE_LEAK" prerequisite and calling
"test_done" early if it isn't satisfied also means that we can more
incrementally add regression tests without being forced to fix
widespread and hard-to-fix leaks at the same time.
We have tests that do simple checking of some tool we're interested
in, but later on in the script might be stressing trace2, or common
sources of leaks like "git log" in combination with the tool (e.g. the
commit-graph tests). To be clear having a prerequisite could also be
accomplished by using "LSAN_OPTIONS" directly.
On the topic of "LSAN_OPTIONS": It would be nice to have a mode to
aggregate all failures in our various scripts, see [2] for a start at
doing that which sets "log_path" in "LSAN_OPTIONS". I've punted on
that for now, it can be added later.
As of writing this we've got major regressions between master..seen,
i.e. the t000*.sh tests and more fixed since 31f9acf9ce2 (Merge branch
'ah/plugleaks', 2021-08-04) have regressed recently.
See the discussion at <87czsv2idy.fsf@evledraar.gmail.com>[3] about
the lack of this sort of test mode, and 0e5bba53af (add UNLEAK
annotation for reducing leak false positives, 2017-09-08) for the
initial addition of SANITIZE=leak.
See also 09595ab381 (Merge branch 'jk/leak-checkers', 2017-09-19),
7782066f67 (Merge branch 'jk/apache-lsan', 2019-05-19) and the recent
936e58851a (Merge branch 'ah/plugleaks', 2021-05-07) for some of the
past history of "one-off" SANITIZE=leak (and more) fixes.
As noted in [5] we can't support this on OSX yet until Clang 14 is
released, at that point we'll probably want to resurrect that
"osx-leaks" job.
1. https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer
2. https://lore.kernel.org/git/YS9OT%2Fpn5rRK9cGB@coredump.intra.peff.net/
3. https://lore.kernel.org/git/87czsv2idy.fsf@evledraar.gmail.com/
4. https://lore.kernel.org/git/YS9ZIDpANfsh7N+S@coredump.intra.peff.net/
5. https://lore.kernel.org/git/20210916035603.76369-1-carenas@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Carlo Marcelo Arenas Belón <carenas@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-09-23 17:20:46 +08:00
|
|
|
;;
|
ci: run ASan/UBSan in a single job
When we started running sanitizers in CI via 1c0962c0c4 (ci: add address
and undefined sanitizer tasks, 2022-10-20), we ran them as two separate
CI jobs, since as that commit notes, the combination "seems to take
forever".
And indeed, it does with gcc. However, since the previous commit
switched to using clang, the situation is different, and we can save
some CPU by using a single job for both. Comparing before/after CI runs,
this saved about 14 minutes (the single combined job took 54m, versus
44m plus 24m for ASan and UBSan jobs, respectively). That's wall-clock
and not CPU, but since our jobs are mostly CPU-bound, the two should be
closely proportional.
This does increase the end-to-end time of a CI run, though, since before
this patch the two jobs could run in parallel, and the sanitizer job is
our longest single job. It also means that we won't get a separate
result for "this passed with UBSan but not with ASan" or vice versa).
But as 1c0962c0c4 noted, that is not a very useful signal in practice.
Below are some more detailed timings of gcc vs clang that I measured by
running the test suite on my local workstation. Each measurement counts
only the time to run the test suite with each compiler (not the compile
time itself). We'll focus on the wall-clock times for simplicity, though
the CPU times follow roughly similar trends.
Here's a run with CC=gcc as a baseline:
real 1m12.931s
user 9m30.566s
sys 8m9.538s
Running with SANITIZE=address increases the time by a factor of ~4.7x:
real 5m40.352s
user 49m37.044s
sys 36m42.950s
Running with SANITIZE=undefined increases the time by a factor of ~1.7x:
real 2m5.956s
user 12m42.847s
sys 19m27.067s
So let's call that 6.4 time units to run them separately (where a unit
is the time it takes to run the test suite with no sanitizers). As a
simplistic model, we might imagine that running them together would take
5.4 units (we save 1 unit because we are no longer running the test
suite twice, but just paying the sanitizer overhead on top of a single
run).
But that's not what happens. Running with SANITIZE=address,undefined
results in a factor of 9.3x:
real 11m9.817s
user 77m31.284s
sys 96m40.454s
So not only did we not get faster when doing them together, we actually
spent 1.5x as much CPU as doing them separately! And while those
wall-clock numbers might not look too terrible, keep in mind that this
is on an unloaded 8-core machine. In the CI environment, wall-clock
times will be much closer to CPU times. So not only are we wasting CPU,
but we risk hitting timeouts.
Now let's try the same thing with clang. Here's our no-sanitizer
baseline run, which is almost identical to the gcc one (which is quite
convenient, because we can keep using the same "time units" to get an
apples-to-apples comparison):
real 1m11.844s
user 9m28.313s
sys 8m8.240s
And now again with SANITIZE=address, we get a 5x factor (so slightly
worse than gcc's 4.7x, though I wouldn't read too much into it; there is
a fair bit of run-to-run noise):
real 6m7.662s
user 49m24.330s
sys 44m13.846s
And with SANITIZE=undefined, we are at 1.5x, slightly outperforming gcc
(though again, that's probably mostly noise):
real 1m50.028s
user 11m0.973s
sys 16m42.731s
So running them separately, our total cost is 6.5x. But if we combine
them in a single run (SANITIZE=address,undefined), we get:
real 6m51.804s
user 52m32.049s
sys 51m46.711s
which is a factor of 5.7x. That's along the lines we'd hoped for!
Running them together saves us almost a whole time unit. And that's not
counting any time spent outside the test suite itself (starting the job,
setting up the environment, compiling) that we're no longer duplicating
by having two jobs.
So clang behaves like we'd hope: the overhead to run the sanitizers is
additive as you add more sanitizers. Whereas gcc's numbers seem very
close to multiplicative, almost as if the sanitizers were enforcing
their overheads on each other (though that is purely a guess on what is
going on; ultimately what matters to us is the amount of time it takes).
And that roughly matches the CI improvement I saw. A "time unit" there
is more like 12 minutes, and the observed time savings was 14 minutes
(with the extra presumably coming from avoiding duplicated setup, etc).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2023-06-02 02:09:47 +08:00
|
|
|
linux-asan-ubsan)
|
|
|
|
export SANITIZE=address,undefined
|
ci(linux-asan-ubsan): let's save some time
Every once in a while, the `git-p4` tests flake for reasons outside of
our control. It typically fails with "Connection refused" e.g. here:
https://github.com/git/git/actions/runs/5969707156/job/16196057724
[...]
+ git p4 clone --dest=/home/runner/work/git/git/t/trash directory.t9807-git-p4-submit/git //depot
Initialized empty Git repository in /home/runner/work/git/git/t/trash directory.t9807-git-p4-submit/git/.git/
Perforce client error:
Connect to server failed; check $P4PORT.
TCP connect to localhost:9807 failed.
connect: 127.0.0.1:9807: Connection refused
failure accessing depot: could not run p4
Importing from //depot into /home/runner/work/git/git/t/trash directory.t9807-git-p4-submit/git
[...]
This happens in other jobs, too, but in the `linux-asan-ubsan` job it
hurts the most because that job often takes over a full hour to run,
therefore re-running a failed `linux-asan-ubsan` job is _very_ costly.
The purpose of the `linux-asan-ubsan` job is to exercise the C code of
Git, anyway, and any part of Git's source code that the `git-p4` tests
run and that would benefit from the attention of ASAN/UBSAN are run
better in other tests anyway, as debugging C code run via Python scripts
can get a bit hairy.
In fact, it is not even just `git-p4` that is the problem (even if it
flakes often enough to be problematic in the CI builds), but really the
part about Python scripts. So let's just skip any Python parts of the
tests from being run in that job.
For good measure, also skip the Subversion tests because debugging C
code run via Perl scripts is as much fun as debugging C code run via
Python scripts. And it will reduce the time this very expensive job
takes, which is a big benefit.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2023-08-30 04:47:28 +08:00
|
|
|
export NO_SVN_TESTS=LetsSaveSomeTime
|
|
|
|
MAKEFLAGS="$MAKEFLAGS NO_PYTHON=YepBecauseP4FlakesTooOften"
|
2022-10-21 00:20:59 +08:00
|
|
|
;;
|
tests: add a test mode for SANITIZE=leak, run it in CI
While git can be compiled with SANITIZE=leak, we have not run
regression tests under that mode. Memory leaks have only been fixed as
one-offs without structured regression testing.
This change adds CI testing for it. We'll now build and small set of
whitelisted t00*.sh tests under Linux with a new job called
"linux-leaks".
The CI target uses a new GIT_TEST_PASSING_SANITIZE_LEAK=true test
mode. When running in that mode, we'll assert that we were compiled
with SANITIZE=leak. We'll then skip all tests, except those that we've
opted-in by setting "TEST_PASSES_SANITIZE_LEAK=true".
A test setting "TEST_PASSES_SANITIZE_LEAK=true" setting can in turn
make use of the "SANITIZE_LEAK" prerequisite, should they wish to
selectively skip tests even under
"GIT_TEST_PASSING_SANITIZE_LEAK=true". In the preceding commit we
started doing this in "t0004-unwritable.sh" under SANITIZE=leak, now
it'll combine nicely with "GIT_TEST_PASSING_SANITIZE_LEAK=true".
This is how tests that don't set "TEST_PASSES_SANITIZE_LEAK=true" will
be skipped under GIT_TEST_PASSING_SANITIZE_LEAK=true:
$ GIT_TEST_PASSING_SANITIZE_LEAK=true ./t0001-init.sh
1..0 # SKIP skip all tests in t0001 under SANITIZE=leak, TEST_PASSES_SANITIZE_LEAK not set
The intent is to add more TEST_PASSES_SANITIZE_LEAK=true annotations
as follow-up change, but let's start small to begin with.
In ci/run-build-and-tests.sh we make use of the default "*" case to
run "make test" without any GIT_TEST_* modes. SANITIZE=leak is known
to fail in combination with GIT_TEST_SPLIT_INDEX=true in
t0016-oidmap.sh, and we're likely to have other such failures in
various GIT_TEST_* modes. Let's focus on getting the base tests
passing, we can expand coverage to GIT_TEST_* modes later.
It would also be possible to implement a more lightweight version of
this by only relying on setting "LSAN_OPTIONS". See
<YS9OT/pn5rRK9cGB@coredump.intra.peff.net>[1] and
<YS9ZIDpANfsh7N+S@coredump.intra.peff.net>[2] for a discussion of
that. I've opted for this approach of adding a GIT_TEST_* mode instead
because it's consistent with how we handle other special test modes.
Being able to add a "!SANITIZE_LEAK" prerequisite and calling
"test_done" early if it isn't satisfied also means that we can more
incrementally add regression tests without being forced to fix
widespread and hard-to-fix leaks at the same time.
We have tests that do simple checking of some tool we're interested
in, but later on in the script might be stressing trace2, or common
sources of leaks like "git log" in combination with the tool (e.g. the
commit-graph tests). To be clear having a prerequisite could also be
accomplished by using "LSAN_OPTIONS" directly.
On the topic of "LSAN_OPTIONS": It would be nice to have a mode to
aggregate all failures in our various scripts, see [2] for a start at
doing that which sets "log_path" in "LSAN_OPTIONS". I've punted on
that for now, it can be added later.
As of writing this we've got major regressions between master..seen,
i.e. the t000*.sh tests and more fixed since 31f9acf9ce2 (Merge branch
'ah/plugleaks', 2021-08-04) have regressed recently.
See the discussion at <87czsv2idy.fsf@evledraar.gmail.com>[3] about
the lack of this sort of test mode, and 0e5bba53af (add UNLEAK
annotation for reducing leak false positives, 2017-09-08) for the
initial addition of SANITIZE=leak.
See also 09595ab381 (Merge branch 'jk/leak-checkers', 2017-09-19),
7782066f67 (Merge branch 'jk/apache-lsan', 2019-05-19) and the recent
936e58851a (Merge branch 'ah/plugleaks', 2021-05-07) for some of the
past history of "one-off" SANITIZE=leak (and more) fixes.
As noted in [5] we can't support this on OSX yet until Clang 14 is
released, at that point we'll probably want to resurrect that
"osx-leaks" job.
1. https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer
2. https://lore.kernel.org/git/YS9OT%2Fpn5rRK9cGB@coredump.intra.peff.net/
3. https://lore.kernel.org/git/87czsv2idy.fsf@evledraar.gmail.com/
4. https://lore.kernel.org/git/YS9ZIDpANfsh7N+S@coredump.intra.peff.net/
5. https://lore.kernel.org/git/20210916035603.76369-1-carenas@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Carlo Marcelo Arenas Belón <carenas@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-09-23 17:20:46 +08:00
|
|
|
esac
|
|
|
|
|
2019-02-08 03:36:28 +08:00
|
|
|
MAKEFLAGS="$MAKEFLAGS CC=${CC:-cc}"
|
2022-05-22 06:18:50 +08:00
|
|
|
|
2023-11-09 16:05:29 +08:00
|
|
|
end_group "CI setup"
|
2022-05-22 06:18:50 +08:00
|
|
|
set -x
|