git/t/test-lib.sh

1091 lines
24 KiB
Bash
Raw Normal View History

# Test framework for git. See t/README for usage.
#
# Copyright (c) 2005 Junio C Hamano
#
test-lib.sh: Add explicit license detail, with change from GPLv2 to GPLv2+. Dear Junio, this is a resend of relicensing patch for test suite library, which was initially sent by Carl Worth. Since the time you sent me acks for this patch collected by you, I collected 8 additional acks as is documented at https://git.wiki.kernel.org/index.php/Test-lib_reclicensing. There are still three contributors missing: Bert Wesarg, Stephan Beyer and Bryan Donlan. The contributions of first two are clearly not copyrightable. I'm not sure about the copyrightability of Bryan Donlan's contributions (git log -p --author='Bryan Donlan' t/test-lib.sh). Carl told me that in your ack collection process you missed only three acks. So I wonder whether you already did some analysis of which contributions are copyrightable. If so, are the missing acks in the list bellow? Thanks Michal 8<--------8<--------8<-------- This file has had no explicit license information noted in it, but has clearly been created and modified according to the terms of GPLv2 as with the rest of the git code base. The purpose of relicensing is to allow other GPLv3+ projects (in particular, the notmuch project: http://notmuchmail.org) to use this same test-suite structure and to contribute changes back as well. Signed-off-by: Carl Worth <cworth@cworth.org> Signed-off-by: Michal Sojka <sojkam1@fel.cvut.cz> Acked-by: Alex Riesen <raa.lkml@gmail.com> Acked-by: Brandon Casey <drafnel@gmail.com> Acked-by: Clemens Buchacher <drizzd@aon.at> Acked-by: David Reiss <dreiss@facebook.com> Acked-by: Emil Sit <sit@emilsit.net> Acked-by: Eric Wong <normalperson@yhbt.net> Acked-by: Fredrik Kuivinen <frekui@gmail.com> Acked-by: Gerrit Pape <pape@smarden.org> Acked-by: Christian Couder <chriscool@tuxfamily.org> Acked-by: Jakub Narebski <jnareb@gmail.com> Acked-by: Jeff King <peff@peff.net> Acked-by: Johan Herland <johan@herland.net> Acked-by: Johannes Schindelin <Johannes.Schindelin@gmx.de> Acked-by: Johannes Sixt <j6t@kdbg.org> Acked-by: Jonathan Nieder <jrnieder@gmail.com> Acked-by: Josh Triplett <josh@joshtriplett.org> Acked-by: Junio C Hamano <gitster@pobox.com> Acked-by: Lea Wiemann <lewiemann@gmail.com> Acked-by: Markus Heidelberg <markus.heidelberg@web.de> Acked-by: Martin Waitz <tali@admingilde.org> Acked-by: Matthew Ogilvie <mmogilvi_git@miniinfo.net> Acked-by: Matthias Lederhofer <matled@gmx.net> Acked-by: Michael J Gruber <git@drmicha.warpmail.net> Acked-by: Michele Ballabio <barra_cuda@katamail.com> Acked-by: Miklos Vajna <vmiklos@frugalware.org> Acked-by: Nicolas Pitre <nico@fluxnic.net> Acked-by: Pavel Roskin <proski@gnu.org> Acked-by: Petr Baudis <pasky@ucw.cz> Acked-by: Pierre Habouzit <madcoder@debian.org> Acked-by: Robin Rosenberg <robin.rosenberg@dewire.com> Acked-by: Shawn O. Pearce <spearce@spearce.org> Acked-by: Stephen Boyd <bebarino@gmail.com> Acked-by: Sverre Rabbelier <srabbelier@gmail.com> Acked-by: Thomas Rast <trast@student.ethz.ch> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-04-16 21:53:59 +08:00
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/ .
# Test the binaries we have just built. The tests are kept in
# t/ subdirectory and are run in 'trash directory' subdirectory.
if test -z "$TEST_DIRECTORY"
then
# We allow tests to override this, in case they want to run tests
# outside of t/, e.g. for running tests on the test library
# itself.
TEST_DIRECTORY=$(pwd)
else
# ensure that TEST_DIRECTORY is an absolute path so that it
# is valid even if the current working directory is changed
TEST_DIRECTORY=$(cd "$TEST_DIRECTORY" && pwd) || exit 1
fi
if test -z "$TEST_OUTPUT_DIRECTORY"
then
# Similarly, override this to store the test-results subdir
# elsewhere
TEST_OUTPUT_DIRECTORY=$TEST_DIRECTORY
fi
GIT_BUILD_DIR="$TEST_DIRECTORY"/..
################################################################
# It appears that people try to run tests without building...
"$GIT_BUILD_DIR/git" >/dev/null
if test $? != 1
then
echo >&2 'error: you do not seem to have built git yet.'
exit 1
fi
. "$GIT_BUILD_DIR"/GIT-BUILD-OPTIONS
export PERL_PATH SHELL_PATH
# if --tee was passed, write the output not only to the terminal, but
# additionally to the file test-results/$BASENAME.out, too.
case "$GIT_TEST_TEE_STARTED, $* " in
done,*)
# do not redirect again
;;
*' --tee '*|*' --va'*)
mkdir -p "$TEST_OUTPUT_DIRECTORY/test-results"
BASE="$TEST_OUTPUT_DIRECTORY/test-results/$(basename "$0" .sh)"
(GIT_TEST_TEE_STARTED=done ${SHELL_PATH} "$0" "$@" 2>&1;
echo $? > $BASE.exit) | tee $BASE.out
test "$(cat $BASE.exit)" = 0
exit
;;
esac
# For repeatability, reset the environment to known value.
# TERM is sanitized below, after saving color control sequences.
LANG=C
LC_ALL=C
PAGER=cat
TZ=UTC
export LANG LC_ALL PAGER TZ
EDITOR=:
# A call to "unset" with no arguments causes at least Solaris 10
# /usr/xpg4/bin/sh and /bin/ksh to bail out. So keep the unsets
# deriving from the command substitution clustered with the other
# ones.
unset VISUAL EMAIL LANGUAGE COLUMNS $("$PERL_PATH" -e '
my @env = keys %ENV;
my $ok = join("|", qw(
TRACE
DEBUG
USE_LOOKUP
TEST
.*_TEST
PROVE
VALGRIND
UNZIP
PERF_
CURL_VERBOSE
));
my @vars = grep(/^GIT_/ && !/^GIT_($ok)/o, @env);
print join("\n", @vars);
')
unset XDG_CONFIG_HOME
unset GITPERLLIB
GIT_AUTHOR_EMAIL=author@example.com
GIT_AUTHOR_NAME='A U Thor'
GIT_COMMITTER_EMAIL=committer@example.com
GIT_COMMITTER_NAME='C O Mitter'
GIT_MERGE_VERBOSITY=5
merge: use editor by default in interactive sessions Traditionally, a cleanly resolved merge was committed by "git merge" using the auto-generated merge commit log message without invoking the editor. After 5 years of use in the field, it turns out that people perform too many unjustified merges of the upstream history into their topic branches. These merges are not just useless, but they are often not explained well, and making the end result unreadable when it gets time for merging their history back to their upstream. Earlier we added the "--edit" option to the command, so that people can edit the log message to explain and justify their merge commits. Let's take it one step further and spawn the editor by default when we are in an interactive session (i.e. the standard input and the standard output are pointing at the same tty device). There may be existing scripts that leave the standard input and the standard output of the "git merge" connected to whatever environment the scripts were started, and such invocation might trigger the above "interactive session" heuristics. GIT_MERGE_AUTOEDIT environment variable can be set to "no" at the beginning of such scripts to use the historical behaviour while the script runs. Note that this backward compatibility is meant only for scripts, and we deliberately do *not* support "merge.edit = yes/no/auto" configuration option to allow people to keep the historical behaviour. Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-11 14:44:45 +08:00
GIT_MERGE_AUTOEDIT=no
export GIT_MERGE_VERBOSITY GIT_MERGE_AUTOEDIT
export GIT_AUTHOR_EMAIL GIT_AUTHOR_NAME
export GIT_COMMITTER_EMAIL GIT_COMMITTER_NAME
export EDITOR
# Tests using GIT_TRACE typically don't want <timestamp> <file>:<line> output
GIT_TRACE_BARE=1
export GIT_TRACE_BARE
if test -n "${TEST_GIT_INDEX_VERSION:+isset}"
then
GIT_INDEX_VERSION="$TEST_GIT_INDEX_VERSION"
export GIT_INDEX_VERSION
fi
Add MALLOC_CHECK_ and MALLOC_PERTURB_ libc env to the test suite for detecting heap corruption Recent versions of Linux libc (later than 5.4.23) and glibc (2.x) include a malloc() implementation which is tunable via environment variables. When MALLOC_CHECK_ is set, a special (less efficient) implementation is used which is designed to be tolerant against simple errors, such as double calls of free() with the same argument, or overruns of a single byte (off-by-one bugs). When MALLOC_CHECK_ is set to 3, a diagnostic message is printed on stderr and the program is aborted. Setting the MALLOC_PERTURB_ environment variable causes the malloc functions in libc to return memory which has been wiped and clear memory when it is returned. Of course this does not affect calloc which always does clear the memory. The reason for this exercise is, of course, to find code which uses memory returned by malloc without initializing it and code which uses code after it is freed. valgrind can do this but it's costly to run. The MALLOC_PERTURB_ exchanges the ability to detect problems in 100% of the cases with speed. The byte value used to initialize values returned by malloc is the byte value of the environment value. The value used to clear memory is the bitwise inverse. Setting MALLOC_PERTURB_ to zero disables the feature. This technique can find hard to detect bugs. It is therefore suggested to always use this flag (at least temporarily) when testing out code or a new distribution. But the test suite can use also valgrind(memcheck) via 'make valgrind' or 'make GIT_TEST_OPTS="--valgrind"'. Memcheck wraps client calls to malloc(), and puts a "red zone" on each end of each block in order to detect access overruns. Memcheck already detects double free() (up to the limit of the buffer which remembers pending free()). Thus memcheck subsumes all the documented coverage of MALLOC_CHECK_. If MALLOC_CHECK_ is set non-zero when running memcheck, then the overruns that might be detected by MALLOC_CHECK_ would be overruns on the wrapped blocks which include the red zones. Thus MALLOC_CHECK_ would be checking memcheck, and not the client. This is not useful, and actually is wasteful. The only possible [documented] advantage of using MALLOC_CHECK_ and memcheck together, would be if MALLOC_CHECK_ detected duplicate free() in more cases than memcheck because memcheck's buffer is too small. Therefore we don't use MALLOC_CHECK_ and valgrind(memcheck) at the same time. Signed-off-by: Elia Pinto <gitter.spiros@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-09-15 00:54:22 +08:00
# Add libc MALLOC and MALLOC_PERTURB test
# only if we are not executing the test with valgrind
if expr " $GIT_TEST_OPTS " : ".* --valgrind " >/dev/null ||
test -n "$TEST_NO_MALLOC_CHECK"
then
setup_malloc_check () {
: nothing
}
teardown_malloc_check () {
: nothing
}
else
setup_malloc_check () {
MALLOC_CHECK_=3 MALLOC_PERTURB_=165
export MALLOC_CHECK_ MALLOC_PERTURB_
}
teardown_malloc_check () {
unset MALLOC_CHECK_ MALLOC_PERTURB_
}
fi
Add MALLOC_CHECK_ and MALLOC_PERTURB_ libc env to the test suite for detecting heap corruption Recent versions of Linux libc (later than 5.4.23) and glibc (2.x) include a malloc() implementation which is tunable via environment variables. When MALLOC_CHECK_ is set, a special (less efficient) implementation is used which is designed to be tolerant against simple errors, such as double calls of free() with the same argument, or overruns of a single byte (off-by-one bugs). When MALLOC_CHECK_ is set to 3, a diagnostic message is printed on stderr and the program is aborted. Setting the MALLOC_PERTURB_ environment variable causes the malloc functions in libc to return memory which has been wiped and clear memory when it is returned. Of course this does not affect calloc which always does clear the memory. The reason for this exercise is, of course, to find code which uses memory returned by malloc without initializing it and code which uses code after it is freed. valgrind can do this but it's costly to run. The MALLOC_PERTURB_ exchanges the ability to detect problems in 100% of the cases with speed. The byte value used to initialize values returned by malloc is the byte value of the environment value. The value used to clear memory is the bitwise inverse. Setting MALLOC_PERTURB_ to zero disables the feature. This technique can find hard to detect bugs. It is therefore suggested to always use this flag (at least temporarily) when testing out code or a new distribution. But the test suite can use also valgrind(memcheck) via 'make valgrind' or 'make GIT_TEST_OPTS="--valgrind"'. Memcheck wraps client calls to malloc(), and puts a "red zone" on each end of each block in order to detect access overruns. Memcheck already detects double free() (up to the limit of the buffer which remembers pending free()). Thus memcheck subsumes all the documented coverage of MALLOC_CHECK_. If MALLOC_CHECK_ is set non-zero when running memcheck, then the overruns that might be detected by MALLOC_CHECK_ would be overruns on the wrapped blocks which include the red zones. Thus MALLOC_CHECK_ would be checking memcheck, and not the client. This is not useful, and actually is wasteful. The only possible [documented] advantage of using MALLOC_CHECK_ and memcheck together, would be if MALLOC_CHECK_ detected duplicate free() in more cases than memcheck because memcheck's buffer is too small. Therefore we don't use MALLOC_CHECK_ and valgrind(memcheck) at the same time. Signed-off-by: Elia Pinto <gitter.spiros@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-09-15 00:54:22 +08:00
: ${ASAN_OPTIONS=detect_leaks=0}
export ASAN_OPTIONS
# Protect ourselves from common misconfiguration to export
# CDPATH into the environment
unset CDPATH
unset GREP_OPTIONS
unset UNZIP
case $(echo $GIT_TRACE |tr "[A-Z]" "[a-z]") in
1|2|true)
GIT_TRACE=4
;;
esac
# Convenience
#
# A regexp to match 5 and 40 hexdigits
_x05='[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]'
_x40="$_x05$_x05$_x05$_x05$_x05$_x05$_x05$_x05"
# Zero SHA-1
_z40=0000000000000000000000000000000000000000
# Line feed
LF='
'
# UTF-8 ZERO WIDTH NON-JOINER, which HFS+ ignores
# when case-folding filenames
u200c=$(printf '\342\200\214')
export _x05 _x40 _z40 LF u200c
Introduce a performance testing framework This introduces a performance testing framework under t/perf/. It tries to be as close to the test-lib.sh infrastructure as possible, and thus should be easy to get used to for git developers. The following points were considered for the implementation: 1. You usually want to compare arbitrary revisions/build trees against each other. They may not have the performance test under consideration, or even the perf-lib.sh infrastructure. To cope with this, the 'run' script lets you specify arbitrary build dirs and revisions. It even automatically builds the revisions if it doesn't have them at hand yet. 2. Usually you would not want to run all tests. It would take too long anyway. The 'run' script lets you specify which tests to run; or you can also do it manually. There is a Makefile for discoverability and 'make clean', but it is not meant for real-world use. 3. Creating test repos from scratch in every test is extremely time-consuming, and shipping or downloading such large/weird repos is out of the question. We leave this decision to the user. Two different sizes of test repos can be configured, and the scripts just copy one or more of those (using hardlinks for the object store). By default it tries to use the build tree's git.git repository. This is fairly fast and versatile. Using a copy instead of a clone preserves many properties that the user may want to test for, such as lots of loose objects, unpacked refs, etc. Signed-off-by: Thomas Rast <trast@student.ethz.ch> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-02-17 18:25:09 +08:00
# Each test should start with something like this, after copyright notices:
#
# test_description='Description of this test...
# This test checks if command xyzzy does the right thing...
# '
# . ./test-lib.sh
test "x$TERM" != "xdumb" && (
test -t 1 &&
tput bold >/dev/null 2>&1 &&
tput setaf 1 >/dev/null 2>&1 &&
tput sgr0 >/dev/null 2>&1
) &&
color=t
while test "$#" -ne 0
do
case "$1" in
-d|--d|--de|--deb|--debu|--debug)
debug=t; shift ;;
-i|--i|--im|--imm|--imme|--immed|--immedi|--immedia|--immediat|--immediate)
immediate=t; shift ;;
-l|--l|--lo|--lon|--long|--long-|--long-t|--long-te|--long-tes|--long-test|--long-tests)
GIT_TEST_LONG=t; export GIT_TEST_LONG; shift ;;
-r)
shift; test "$#" -ne 0 || {
echo 'error: -r requires an argument' >&2;
exit 1;
}
run_list=$1; shift ;;
--run=*)
run_list=$(expr "z$1" : 'z[^=]*=\(.*\)'); shift ;;
-h|--h|--he|--hel|--help)
help=t; shift ;;
-v|--v|--ve|--ver|--verb|--verbo|--verbos|--verbose)
verbose=t; shift ;;
--verbose-only=*)
verbose_only=$(expr "z$1" : 'z[^=]*=\(.*\)')
shift ;;
-q|--q|--qu|--qui|--quie|--quiet)
# Ignore --quiet under a TAP::Harness. Saying how many tests
# passed without the ok/not ok details is always an error.
test -z "$HARNESS_ACTIVE" && quiet=t; shift ;;
--with-dashes)
with_dashes=t; shift ;;
--no-color)
color=; shift ;;
Add valgrind support in test scripts This patch adds the ability to use valgrind's memcheck tool to diagnose memory problems in Git while running the test scripts. It requires valgrind 3.4.0 or newer. It works by creating symlinks to a valgrind script, which have the same name as our Git binaries, and then putting that directory in front of the test script's PATH as well as set GIT_EXEC_PATH to that directory. Git scripts are symlinked from that directory directly. That way, Git binaries called by Git scripts are valgrinded, too. Valgrind can be used by specifying "GIT_TEST_OPTS=--valgrind" in the make invocation. Any invocation of git that finds any errors under valgrind will exit with failure code 126. Any valgrind output will go to the usual stderr channel for tests (i.e., /dev/null, unless -v has been specified). If you need to pass options to valgrind -- you might want to run another tool than memcheck, for example -- you can set the environment variable GIT_VALGRIND_OPTIONS. A few default suppressions are included, since libz seems to trigger quite a few false positives. We'll assume that libz works and that we can ignore any errors which are reported there. Note: it is safe to run the valgrind tests in parallel, as the links in t/valgrind/bin/ are created using proper locking. Initial patch and all the hard work by Jeff King. Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de> Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-02-04 07:25:59 +08:00
--va|--val|--valg|--valgr|--valgri|--valgrin|--valgrind)
valgrind=memcheck
shift ;;
--valgrind=*)
valgrind=$(expr "z$1" : 'z[^=]*=\(.*\)')
shift ;;
--valgrind-only=*)
valgrind_only=$(expr "z$1" : 'z[^=]*=\(.*\)')
shift ;;
--tee)
shift ;; # was handled already
--root=*)
root=$(expr "z$1" : 'z[^=]*=\(.*\)')
shift ;;
t/test-lib: introduce --chain-lint option It's easy to miss an "&&"-chain in a test script, like: test_expect_success 'check something important' ' cmd1 && cmd2 cmd3 ' The test harness will notice if cmd3 fails, but a failure of cmd1 or cmd2 will go unnoticed, as their exit status is lost after cmd3 runs. The toy example above is easy to spot because the "cmds" are all the same length, but real code is much more complicated. It's also difficult to detect these situations by statically analyzing the shell code with regexps (like the check-non-portable-shell script does); there's too much context required to know whether a &&-chain is appropriate on a given line or not. This patch instead lets the shell check each test by sticking a command with a specific and unusual return code at the top of each test, like: (exit 117) && cmd1 && cmd2 cmd3 In a well-formed test, the non-zero exit from the first command prevents any of the rest from being run, and the test's exit code is 117. In a bad test (like the one above), the 117 is lost, and cmd3 is run. When we encounter a failure of this check, we abort the test script entirely. For one thing, we have no clue which subset of the commands in the test snippet were actually run. Running further tests would be pointless, because we're now in an unknown state. And two, this is not a "test failure" in the traditional sense. The test script is buggy, not the code it is testing. We should be able to fix these problems in the script once, and not have them come back later as a regression in git's code. After checking a test snippet for --chain-lint, we do still run the test itself. We could actually have a pure-lint mode which just checks each test, but there are a few reasons not to. One, because the tests are executing arbitrary code, which could impact the later environment (e.g., that could impact which set of tests we run at all). And two, because a pure-lint mode would still be expensive to run, because a significant amount of code runs outside of the test_expect_* blocks. Instead, this option is designed to be used as part of a normal test suite run, where it adds very little overhead. Turning on this option detects quite a few problems in existing tests, which will be fixed in subsequent patches. However, there are a number of places it cannot reach: - it cannot find a failure to break out of loops on error, like: cmd1 && for i in a b c; do cmd2 $i done && cmd3 which will not notice failures of "cmd2 a" or "cmd b" - it cannot find a missing &&-chain inside a block or subfunction, like: foo () { cmd1 cmd2 } foo && bar which will not notice a failure of cmd1. - it only checks tests that you run; every platform will have some tests skipped due to missing prequisites, so it's impossible to say from one run that the test suite is free of broken &&-chains. However, all tests get run by _somebody_, so eventually we will notice problems. - it does not operate on test_when_finished or prerequisite blocks. It could, but these tends to be much shorter and less of a problem, so I punted on them in this patch. This patch was inspired by an earlier patch by Jonathan Nieder: http://article.gmane.org/gmane.comp.version-control.git/235913 This implementation and all bugs are mine. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-20 18:05:48 +08:00
--chain-lint)
GIT_TEST_CHAIN_LINT=1
shift ;;
--no-chain-lint)
GIT_TEST_CHAIN_LINT=0
shift ;;
-x)
trace=t
verbose=t
shift ;;
*)
echo "error: unknown test option '$1'" >&2; exit 1 ;;
esac
done
if test -n "$valgrind_only"
then
test -z "$valgrind" && valgrind=memcheck
test -z "$verbose" && verbose_only="$valgrind_only"
elif test -n "$valgrind"
then
verbose=t
fi
if test -n "$color"
then
# Save the color control sequences now rather than run tput
# each time say_color() is called. This is done for two
# reasons:
# * TERM will be changed to dumb
# * HOME will be changed to a temporary directory and tput
# might need to read ~/.terminfo from the original HOME
# directory to get the control sequences
# Note: This approach assumes the control sequences don't end
# in a newline for any terminal of interest (command
# substitutions strip trailing newlines). Given that most
# (all?) terminals in common use are related to ECMA-48, this
# shouldn't be a problem.
say_color_error=$(tput bold; tput setaf 1) # bold red
say_color_skip=$(tput setaf 4) # blue
say_color_warn=$(tput setaf 3) # brown/yellow
say_color_pass=$(tput setaf 2) # green
say_color_info=$(tput setaf 6) # cyan
say_color_reset=$(tput sgr0)
say_color_="" # no formatting for normal text
say_color () {
test -z "$1" && test -n "$quiet" && return
eval "say_color_color=\$say_color_$1"
shift
printf "%s\\n" "$say_color_color$*$say_color_reset"
}
else
say_color() {
test -z "$1" && test -n "$quiet" && return
shift
printf "%s\n" "$*"
}
fi
TERM=dumb
export TERM
error () {
say_color error "error: $*"
GIT_EXIT_OK=t
exit 1
}
say () {
say_color info "$*"
}
test "${test_description}" != "" ||
error "Test script did not set test_description."
if test "$help" = "t"
then
printf '%s\n' "$test_description"
exit 0
fi
exec 5>&1
exec 6<&0
if test "$verbose" = "t"
then
exec 4>&2 3>&1
else
exec 4>/dev/null 3>/dev/null
fi
test_failure=0
test_count=0
Sane use of test_expect_failure Originally, test_expect_failure was designed to be the opposite of test_expect_success, but this was a bad decision. Most tests run a series of commands that leads to the single command that needs to be tested, like this: test_expect_{success,failure} 'test title' ' setup1 && setup2 && setup3 && what is to be tested ' And expecting a failure exit from the whole sequence misses the point of writing tests. Your setup$N that are supposed to succeed may have failed without even reaching what you are trying to test. The only valid use of test_expect_failure is to check a trivial single command that is expected to fail, which is a minority in tests of Porcelain-ish commands. This large-ish patch rewrites all uses of test_expect_failure to use test_expect_success and rewrites the condition of what is tested, like this: test_expect_success 'test title' ' setup1 && setup2 && setup3 && ! this command should fail ' test_expect_failure is redefined to serve as a reminder that that test *should* succeed but due to a known breakage in git it currently does not pass. So if git-foo command should create a file 'bar' but you discovered a bug that it doesn't, you can write a test like this: test_expect_failure 'git-foo should create bar' ' rm -f bar && git foo && test -f bar ' This construct acts similar to test_expect_success, but instead of reporting "ok/FAIL" like test_expect_success does, the outcome is reported as "FIXED/still broken". Signed-off-by: Junio C Hamano <gitster@pobox.com>
2008-02-01 17:50:53 +08:00
test_fixed=0
test_broken=0
test_success=0
test_external_has_tap=0
die () {
code=$?
if test -n "$GIT_EXIT_OK"
then
exit $code
else
echo >&5 "FATAL: Unexpected exit with code $code"
exit 1
fi
}
GIT_EXIT_OK=
trap 'die' EXIT
trap 'exit $?' INT
# The user-facing functions are loaded from a separate file so that
# test_perf subshells can have them too
. "$TEST_DIRECTORY/test-lib-functions.sh"
# You are not expected to call test_ok_ and test_failure_ directly, use
# the test_expect_* functions instead.
test_ok_ () {
test_success=$(($test_success + 1))
say_color "" "ok $test_count - $@"
}
test_failure_ () {
test_failure=$(($test_failure + 1))
say_color error "not ok $test_count - $1"
shift
printf '%s\n' "$*" | sed -e 's/^/# /'
test "$immediate" = "" || { GIT_EXIT_OK=t; exit 1; }
}
Sane use of test_expect_failure Originally, test_expect_failure was designed to be the opposite of test_expect_success, but this was a bad decision. Most tests run a series of commands that leads to the single command that needs to be tested, like this: test_expect_{success,failure} 'test title' ' setup1 && setup2 && setup3 && what is to be tested ' And expecting a failure exit from the whole sequence misses the point of writing tests. Your setup$N that are supposed to succeed may have failed without even reaching what you are trying to test. The only valid use of test_expect_failure is to check a trivial single command that is expected to fail, which is a minority in tests of Porcelain-ish commands. This large-ish patch rewrites all uses of test_expect_failure to use test_expect_success and rewrites the condition of what is tested, like this: test_expect_success 'test title' ' setup1 && setup2 && setup3 && ! this command should fail ' test_expect_failure is redefined to serve as a reminder that that test *should* succeed but due to a known breakage in git it currently does not pass. So if git-foo command should create a file 'bar' but you discovered a bug that it doesn't, you can write a test like this: test_expect_failure 'git-foo should create bar' ' rm -f bar && git foo && test -f bar ' This construct acts similar to test_expect_success, but instead of reporting "ok/FAIL" like test_expect_success does, the outcome is reported as "FIXED/still broken". Signed-off-by: Junio C Hamano <gitster@pobox.com>
2008-02-01 17:50:53 +08:00
test_known_broken_ok_ () {
test_fixed=$(($test_fixed+1))
say_color error "ok $test_count - $@ # TODO known breakage vanished"
Sane use of test_expect_failure Originally, test_expect_failure was designed to be the opposite of test_expect_success, but this was a bad decision. Most tests run a series of commands that leads to the single command that needs to be tested, like this: test_expect_{success,failure} 'test title' ' setup1 && setup2 && setup3 && what is to be tested ' And expecting a failure exit from the whole sequence misses the point of writing tests. Your setup$N that are supposed to succeed may have failed without even reaching what you are trying to test. The only valid use of test_expect_failure is to check a trivial single command that is expected to fail, which is a minority in tests of Porcelain-ish commands. This large-ish patch rewrites all uses of test_expect_failure to use test_expect_success and rewrites the condition of what is tested, like this: test_expect_success 'test title' ' setup1 && setup2 && setup3 && ! this command should fail ' test_expect_failure is redefined to serve as a reminder that that test *should* succeed but due to a known breakage in git it currently does not pass. So if git-foo command should create a file 'bar' but you discovered a bug that it doesn't, you can write a test like this: test_expect_failure 'git-foo should create bar' ' rm -f bar && git foo && test -f bar ' This construct acts similar to test_expect_success, but instead of reporting "ok/FAIL" like test_expect_success does, the outcome is reported as "FIXED/still broken". Signed-off-by: Junio C Hamano <gitster@pobox.com>
2008-02-01 17:50:53 +08:00
}
test_known_broken_failure_ () {
test_broken=$(($test_broken+1))
say_color warn "not ok $test_count - $@ # TODO known breakage"
Sane use of test_expect_failure Originally, test_expect_failure was designed to be the opposite of test_expect_success, but this was a bad decision. Most tests run a series of commands that leads to the single command that needs to be tested, like this: test_expect_{success,failure} 'test title' ' setup1 && setup2 && setup3 && what is to be tested ' And expecting a failure exit from the whole sequence misses the point of writing tests. Your setup$N that are supposed to succeed may have failed without even reaching what you are trying to test. The only valid use of test_expect_failure is to check a trivial single command that is expected to fail, which is a minority in tests of Porcelain-ish commands. This large-ish patch rewrites all uses of test_expect_failure to use test_expect_success and rewrites the condition of what is tested, like this: test_expect_success 'test title' ' setup1 && setup2 && setup3 && ! this command should fail ' test_expect_failure is redefined to serve as a reminder that that test *should* succeed but due to a known breakage in git it currently does not pass. So if git-foo command should create a file 'bar' but you discovered a bug that it doesn't, you can write a test like this: test_expect_failure 'git-foo should create bar' ' rm -f bar && git foo && test -f bar ' This construct acts similar to test_expect_success, but instead of reporting "ok/FAIL" like test_expect_success does, the outcome is reported as "FIXED/still broken". Signed-off-by: Junio C Hamano <gitster@pobox.com>
2008-02-01 17:50:53 +08:00
}
test_debug () {
test "$debug" = "" || eval "$1"
}
match_pattern_list () {
arg="$1"
shift
test -z "$*" && return 1
for pattern_
do
case "$arg" in
$pattern_)
return 0
esac
done
return 1
}
match_test_selector_list () {
title="$1"
shift
arg="$1"
shift
test -z "$1" && return 0
# Both commas and whitespace are accepted as separators.
OLDIFS=$IFS
IFS=' ,'
set -- $1
IFS=$OLDIFS
# If the first selector is negative we include by default.
include=
case "$1" in
!*) include=t ;;
esac
for selector
do
orig_selector=$selector
positive=t
case "$selector" in
!*)
positive=
selector=${selector##?}
;;
esac
test -z "$selector" && continue
case "$selector" in
*-*)
if expr "z${selector%%-*}" : "z[0-9]*[^0-9]" >/dev/null
then
echo "error: $title: invalid non-numeric in range" \
"start: '$orig_selector'" >&2
exit 1
fi
if expr "z${selector#*-}" : "z[0-9]*[^0-9]" >/dev/null
then
echo "error: $title: invalid non-numeric in range" \
"end: '$orig_selector'" >&2
exit 1
fi
;;
*)
if expr "z$selector" : "z[0-9]*[^0-9]" >/dev/null
then
echo "error: $title: invalid non-numeric in test" \
"selector: '$orig_selector'" >&2
exit 1
fi
esac
# Short cut for "obvious" cases
test -z "$include" && test -z "$positive" && continue
test -n "$include" && test -n "$positive" && continue
case "$selector" in
-*)
if test $arg -le ${selector#-}
then
include=$positive
fi
;;
*-)
if test $arg -ge ${selector%-}
then
include=$positive
fi
;;
*-*)
if test ${selector%%-*} -le $arg \
&& test $arg -le ${selector#*-}
then
include=$positive
fi
;;
*)
if test $arg -eq $selector
then
include=$positive
fi
;;
esac
done
test -n "$include"
}
maybe_teardown_verbose () {
test -z "$verbose_only" && return
exec 4>/dev/null 3>/dev/null
verbose=
}
last_verbose=t
maybe_setup_verbose () {
test -z "$verbose_only" && return
if match_pattern_list $test_count $verbose_only
then
exec 4>&2 3>&1
# Emit a delimiting blank line when going from
# non-verbose to verbose. Within verbose mode the
# delimiter is printed by test_expect_*. The choice
# of the initial $last_verbose is such that before
# test 1, we do not print it.
test -z "$last_verbose" && echo >&3 ""
verbose=t
else
exec 4>/dev/null 3>/dev/null
verbose=
fi
last_verbose=$verbose
}
maybe_teardown_valgrind () {
test -z "$GIT_VALGRIND" && return
GIT_VALGRIND_ENABLED=
}
maybe_setup_valgrind () {
test -z "$GIT_VALGRIND" && return
if test -z "$valgrind_only"
then
GIT_VALGRIND_ENABLED=t
return
fi
GIT_VALGRIND_ENABLED=
if match_pattern_list $test_count $valgrind_only
then
GIT_VALGRIND_ENABLED=t
fi
}
test-lib: disable trace when test is not verbose The "-x" test-script option turns on the shell's "-x" tracing, which can help show why a particular test is failing. Unfortunately, this can create false negatives in some tests if they invoke a shell function with its stderr redirected. t5512.10 is such a test, as it does: test_must_fail git ls-remote refs*master >actual 2>&1 && test_cmp exp actual The "actual" file gets the "-x" trace for the test_must_fail function, which prevents it from matching the expected output. There's no way to avoid this without managing the trace flag inside each sub-function, which isn't really a workable solution. But unless you specifically care about t5512.10, we can work around it by enabling tracing only for the specific tests we want. You can already do: ./t5512-ls-remote.sh -x --verbose-only=16 to see the trace only for a specific test. But that doesn't _disable_ the tracing in the other tests; it just sends it to /dev/null. However, there's no point in generating a trace that the user won't see, so we can simply disable tracing whenever it doesn't have a matching verbose flag. The normal case of just "./t5512-ls-remote.sh -x" stays the same, as "-x" already implies "--verbose" (and "--verbose-only" overrides "--verbose", which is why this works at all). And for our test, we need only check $verbose, as maybe_setup_verbose will have already set that flag based on the $verbose_only list). Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-08-06 13:33:57 +08:00
want_trace () {
test "$trace" = t && test "$verbose" = t
}
# This is a separate function because some tests use
# "return" to end a test_expect_success block early
# (and we want to make sure we run any cleanup like
# "set +x").
test_eval_inner_ () {
# Do not add anything extra (including LF) after '$*'
eval "
test-lib: disable trace when test is not verbose The "-x" test-script option turns on the shell's "-x" tracing, which can help show why a particular test is failing. Unfortunately, this can create false negatives in some tests if they invoke a shell function with its stderr redirected. t5512.10 is such a test, as it does: test_must_fail git ls-remote refs*master >actual 2>&1 && test_cmp exp actual The "actual" file gets the "-x" trace for the test_must_fail function, which prevents it from matching the expected output. There's no way to avoid this without managing the trace flag inside each sub-function, which isn't really a workable solution. But unless you specifically care about t5512.10, we can work around it by enabling tracing only for the specific tests we want. You can already do: ./t5512-ls-remote.sh -x --verbose-only=16 to see the trace only for a specific test. But that doesn't _disable_ the tracing in the other tests; it just sends it to /dev/null. However, there's no point in generating a trace that the user won't see, so we can simply disable tracing whenever it doesn't have a matching verbose flag. The normal case of just "./t5512-ls-remote.sh -x" stays the same, as "-x" already implies "--verbose" (and "--verbose-only" overrides "--verbose", which is why this works at all). And for our test, we need only check $verbose, as maybe_setup_verbose will have already set that flag based on the $verbose_only list). Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-08-06 13:33:57 +08:00
want_trace && set -x
$*"
}
test_eval_ () {
# We run this block with stderr redirected to avoid extra cruft
# during a "-x" trace. Once in "set -x" mode, we cannot prevent
# the shell from printing the "set +x" to turn it off (nor the saving
# of $? before that). But we can make sure that the output goes to
# /dev/null.
#
# The test itself is run with stderr put back to &4 (so either to
# /dev/null, or to the original stderr if --verbose was used).
{
test_eval_inner_ "$@" </dev/null >&3 2>&4
test_eval_ret_=$?
test-lib: disable trace when test is not verbose The "-x" test-script option turns on the shell's "-x" tracing, which can help show why a particular test is failing. Unfortunately, this can create false negatives in some tests if they invoke a shell function with its stderr redirected. t5512.10 is such a test, as it does: test_must_fail git ls-remote refs*master >actual 2>&1 && test_cmp exp actual The "actual" file gets the "-x" trace for the test_must_fail function, which prevents it from matching the expected output. There's no way to avoid this without managing the trace flag inside each sub-function, which isn't really a workable solution. But unless you specifically care about t5512.10, we can work around it by enabling tracing only for the specific tests we want. You can already do: ./t5512-ls-remote.sh -x --verbose-only=16 to see the trace only for a specific test. But that doesn't _disable_ the tracing in the other tests; it just sends it to /dev/null. However, there's no point in generating a trace that the user won't see, so we can simply disable tracing whenever it doesn't have a matching verbose flag. The normal case of just "./t5512-ls-remote.sh -x" stays the same, as "-x" already implies "--verbose" (and "--verbose-only" overrides "--verbose", which is why this works at all). And for our test, we need only check $verbose, as maybe_setup_verbose will have already set that flag based on the $verbose_only list). Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-08-06 13:33:57 +08:00
if want_trace
then
set +x
if test "$test_eval_ret_" != 0
then
say_color error >&4 "error: last command exited with \$?=$test_eval_ret_"
fi
fi
} 2>/dev/null
return $test_eval_ret_
}
test_run_ () {
test_cleanup=:
expecting_failure=$2
t/test-lib: introduce --chain-lint option It's easy to miss an "&&"-chain in a test script, like: test_expect_success 'check something important' ' cmd1 && cmd2 cmd3 ' The test harness will notice if cmd3 fails, but a failure of cmd1 or cmd2 will go unnoticed, as their exit status is lost after cmd3 runs. The toy example above is easy to spot because the "cmds" are all the same length, but real code is much more complicated. It's also difficult to detect these situations by statically analyzing the shell code with regexps (like the check-non-portable-shell script does); there's too much context required to know whether a &&-chain is appropriate on a given line or not. This patch instead lets the shell check each test by sticking a command with a specific and unusual return code at the top of each test, like: (exit 117) && cmd1 && cmd2 cmd3 In a well-formed test, the non-zero exit from the first command prevents any of the rest from being run, and the test's exit code is 117. In a bad test (like the one above), the 117 is lost, and cmd3 is run. When we encounter a failure of this check, we abort the test script entirely. For one thing, we have no clue which subset of the commands in the test snippet were actually run. Running further tests would be pointless, because we're now in an unknown state. And two, this is not a "test failure" in the traditional sense. The test script is buggy, not the code it is testing. We should be able to fix these problems in the script once, and not have them come back later as a regression in git's code. After checking a test snippet for --chain-lint, we do still run the test itself. We could actually have a pure-lint mode which just checks each test, but there are a few reasons not to. One, because the tests are executing arbitrary code, which could impact the later environment (e.g., that could impact which set of tests we run at all). And two, because a pure-lint mode would still be expensive to run, because a significant amount of code runs outside of the test_expect_* blocks. Instead, this option is designed to be used as part of a normal test suite run, where it adds very little overhead. Turning on this option detects quite a few problems in existing tests, which will be fixed in subsequent patches. However, there are a number of places it cannot reach: - it cannot find a failure to break out of loops on error, like: cmd1 && for i in a b c; do cmd2 $i done && cmd3 which will not notice failures of "cmd2 a" or "cmd b" - it cannot find a missing &&-chain inside a block or subfunction, like: foo () { cmd1 cmd2 } foo && bar which will not notice a failure of cmd1. - it only checks tests that you run; every platform will have some tests skipped due to missing prequisites, so it's impossible to say from one run that the test suite is free of broken &&-chains. However, all tests get run by _somebody_, so eventually we will notice problems. - it does not operate on test_when_finished or prerequisite blocks. It could, but these tends to be much shorter and less of a problem, so I punted on them in this patch. This patch was inspired by an earlier patch by Jonathan Nieder: http://article.gmane.org/gmane.comp.version-control.git/235913 This implementation and all bugs are mine. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-20 18:05:48 +08:00
if test "${GIT_TEST_CHAIN_LINT:-1}" != 0; then
# turn off tracing for this test-eval, as it simply creates
# confusing noise in the "-x" output
trace_tmp=$trace
trace=
t/test-lib: introduce --chain-lint option It's easy to miss an "&&"-chain in a test script, like: test_expect_success 'check something important' ' cmd1 && cmd2 cmd3 ' The test harness will notice if cmd3 fails, but a failure of cmd1 or cmd2 will go unnoticed, as their exit status is lost after cmd3 runs. The toy example above is easy to spot because the "cmds" are all the same length, but real code is much more complicated. It's also difficult to detect these situations by statically analyzing the shell code with regexps (like the check-non-portable-shell script does); there's too much context required to know whether a &&-chain is appropriate on a given line or not. This patch instead lets the shell check each test by sticking a command with a specific and unusual return code at the top of each test, like: (exit 117) && cmd1 && cmd2 cmd3 In a well-formed test, the non-zero exit from the first command prevents any of the rest from being run, and the test's exit code is 117. In a bad test (like the one above), the 117 is lost, and cmd3 is run. When we encounter a failure of this check, we abort the test script entirely. For one thing, we have no clue which subset of the commands in the test snippet were actually run. Running further tests would be pointless, because we're now in an unknown state. And two, this is not a "test failure" in the traditional sense. The test script is buggy, not the code it is testing. We should be able to fix these problems in the script once, and not have them come back later as a regression in git's code. After checking a test snippet for --chain-lint, we do still run the test itself. We could actually have a pure-lint mode which just checks each test, but there are a few reasons not to. One, because the tests are executing arbitrary code, which could impact the later environment (e.g., that could impact which set of tests we run at all). And two, because a pure-lint mode would still be expensive to run, because a significant amount of code runs outside of the test_expect_* blocks. Instead, this option is designed to be used as part of a normal test suite run, where it adds very little overhead. Turning on this option detects quite a few problems in existing tests, which will be fixed in subsequent patches. However, there are a number of places it cannot reach: - it cannot find a failure to break out of loops on error, like: cmd1 && for i in a b c; do cmd2 $i done && cmd3 which will not notice failures of "cmd2 a" or "cmd b" - it cannot find a missing &&-chain inside a block or subfunction, like: foo () { cmd1 cmd2 } foo && bar which will not notice a failure of cmd1. - it only checks tests that you run; every platform will have some tests skipped due to missing prequisites, so it's impossible to say from one run that the test suite is free of broken &&-chains. However, all tests get run by _somebody_, so eventually we will notice problems. - it does not operate on test_when_finished or prerequisite blocks. It could, but these tends to be much shorter and less of a problem, so I punted on them in this patch. This patch was inspired by an earlier patch by Jonathan Nieder: http://article.gmane.org/gmane.comp.version-control.git/235913 This implementation and all bugs are mine. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-20 18:05:48 +08:00
# 117 is magic because it is unlikely to match the exit
# code of other programs
test_eval_ "(exit 117) && $1"
if test "$?" != 117; then
error "bug in the test script: broken &&-chain: $1"
fi
trace=$trace_tmp
t/test-lib: introduce --chain-lint option It's easy to miss an "&&"-chain in a test script, like: test_expect_success 'check something important' ' cmd1 && cmd2 cmd3 ' The test harness will notice if cmd3 fails, but a failure of cmd1 or cmd2 will go unnoticed, as their exit status is lost after cmd3 runs. The toy example above is easy to spot because the "cmds" are all the same length, but real code is much more complicated. It's also difficult to detect these situations by statically analyzing the shell code with regexps (like the check-non-portable-shell script does); there's too much context required to know whether a &&-chain is appropriate on a given line or not. This patch instead lets the shell check each test by sticking a command with a specific and unusual return code at the top of each test, like: (exit 117) && cmd1 && cmd2 cmd3 In a well-formed test, the non-zero exit from the first command prevents any of the rest from being run, and the test's exit code is 117. In a bad test (like the one above), the 117 is lost, and cmd3 is run. When we encounter a failure of this check, we abort the test script entirely. For one thing, we have no clue which subset of the commands in the test snippet were actually run. Running further tests would be pointless, because we're now in an unknown state. And two, this is not a "test failure" in the traditional sense. The test script is buggy, not the code it is testing. We should be able to fix these problems in the script once, and not have them come back later as a regression in git's code. After checking a test snippet for --chain-lint, we do still run the test itself. We could actually have a pure-lint mode which just checks each test, but there are a few reasons not to. One, because the tests are executing arbitrary code, which could impact the later environment (e.g., that could impact which set of tests we run at all). And two, because a pure-lint mode would still be expensive to run, because a significant amount of code runs outside of the test_expect_* blocks. Instead, this option is designed to be used as part of a normal test suite run, where it adds very little overhead. Turning on this option detects quite a few problems in existing tests, which will be fixed in subsequent patches. However, there are a number of places it cannot reach: - it cannot find a failure to break out of loops on error, like: cmd1 && for i in a b c; do cmd2 $i done && cmd3 which will not notice failures of "cmd2 a" or "cmd b" - it cannot find a missing &&-chain inside a block or subfunction, like: foo () { cmd1 cmd2 } foo && bar which will not notice a failure of cmd1. - it only checks tests that you run; every platform will have some tests skipped due to missing prequisites, so it's impossible to say from one run that the test suite is free of broken &&-chains. However, all tests get run by _somebody_, so eventually we will notice problems. - it does not operate on test_when_finished or prerequisite blocks. It could, but these tends to be much shorter and less of a problem, so I punted on them in this patch. This patch was inspired by an earlier patch by Jonathan Nieder: http://article.gmane.org/gmane.comp.version-control.git/235913 This implementation and all bugs are mine. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-20 18:05:48 +08:00
fi
setup_malloc_check
test_eval_ "$1"
eval_ret=$?
teardown_malloc_check
if test -z "$immediate" || test $eval_ret = 0 ||
test -n "$expecting_failure" && test "$test_cleanup" != ":"
then
setup_malloc_check
test_eval_ "$test_cleanup"
teardown_malloc_check
fi
if test "$verbose" = "t" && test -n "$HARNESS_ACTIVE"
then
echo ""
fi
return "$eval_ret"
}
test_start_ () {
test_count=$(($test_count+1))
maybe_setup_verbose
maybe_setup_valgrind
}
test_finish_ () {
echo >&3 ""
maybe_teardown_valgrind
maybe_teardown_verbose
}
test_skip () {
to_skip=
skipped_reason=
if match_pattern_list $this_test.$test_count $GIT_SKIP_TESTS
then
to_skip=t
skipped_reason="GIT_SKIP_TESTS"
fi
if test -z "$to_skip" && test -n "$test_prereq" &&
! test_have_prereq "$test_prereq"
then
to_skip=t
of_prereq=
if test "$missing_prereq" != "$test_prereq"
then
of_prereq=" of $test_prereq"
fi
skipped_reason="missing $missing_prereq${of_prereq}"
fi
if test -z "$to_skip" && test -n "$run_list" &&
! match_test_selector_list '--run' $test_count "$run_list"
then
to_skip=t
skipped_reason="--run"
fi
case "$to_skip" in
t)
say_color skip >&3 "skipping test: $@"
say_color skip "ok $test_count # skip $1 ($skipped_reason)"
: true
;;
*)
false
;;
esac
}
Introduce a performance testing framework This introduces a performance testing framework under t/perf/. It tries to be as close to the test-lib.sh infrastructure as possible, and thus should be easy to get used to for git developers. The following points were considered for the implementation: 1. You usually want to compare arbitrary revisions/build trees against each other. They may not have the performance test under consideration, or even the perf-lib.sh infrastructure. To cope with this, the 'run' script lets you specify arbitrary build dirs and revisions. It even automatically builds the revisions if it doesn't have them at hand yet. 2. Usually you would not want to run all tests. It would take too long anyway. The 'run' script lets you specify which tests to run; or you can also do it manually. There is a Makefile for discoverability and 'make clean', but it is not meant for real-world use. 3. Creating test repos from scratch in every test is extremely time-consuming, and shipping or downloading such large/weird repos is out of the question. We leave this decision to the user. Two different sizes of test repos can be configured, and the scripts just copy one or more of those (using hardlinks for the object store). By default it tries to use the build tree's git.git repository. This is fairly fast and versatile. Using a copy instead of a clone preserves many properties that the user may want to test for, such as lots of loose objects, unpacked refs, etc. Signed-off-by: Thomas Rast <trast@student.ethz.ch> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-02-17 18:25:09 +08:00
# stub; perf-lib overrides it
test_at_end_hook_ () {
:
}
test_done () {
GIT_EXIT_OK=t
if test -z "$HARNESS_ACTIVE"
then
Introduce a performance testing framework This introduces a performance testing framework under t/perf/. It tries to be as close to the test-lib.sh infrastructure as possible, and thus should be easy to get used to for git developers. The following points were considered for the implementation: 1. You usually want to compare arbitrary revisions/build trees against each other. They may not have the performance test under consideration, or even the perf-lib.sh infrastructure. To cope with this, the 'run' script lets you specify arbitrary build dirs and revisions. It even automatically builds the revisions if it doesn't have them at hand yet. 2. Usually you would not want to run all tests. It would take too long anyway. The 'run' script lets you specify which tests to run; or you can also do it manually. There is a Makefile for discoverability and 'make clean', but it is not meant for real-world use. 3. Creating test repos from scratch in every test is extremely time-consuming, and shipping or downloading such large/weird repos is out of the question. We leave this decision to the user. Two different sizes of test repos can be configured, and the scripts just copy one or more of those (using hardlinks for the object store). By default it tries to use the build tree's git.git repository. This is fairly fast and versatile. Using a copy instead of a clone preserves many properties that the user may want to test for, such as lots of loose objects, unpacked refs, etc. Signed-off-by: Thomas Rast <trast@student.ethz.ch> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-02-17 18:25:09 +08:00
test_results_dir="$TEST_OUTPUT_DIRECTORY/test-results"
mkdir -p "$test_results_dir"
base=${0##*/}
test_results_path="$test_results_dir/${base%.sh}-$$.counts"
cat >>"$test_results_path" <<-EOF
total $test_count
success $test_success
fixed $test_fixed
broken $test_broken
failed $test_failure
EOF
fi
Sane use of test_expect_failure Originally, test_expect_failure was designed to be the opposite of test_expect_success, but this was a bad decision. Most tests run a series of commands that leads to the single command that needs to be tested, like this: test_expect_{success,failure} 'test title' ' setup1 && setup2 && setup3 && what is to be tested ' And expecting a failure exit from the whole sequence misses the point of writing tests. Your setup$N that are supposed to succeed may have failed without even reaching what you are trying to test. The only valid use of test_expect_failure is to check a trivial single command that is expected to fail, which is a minority in tests of Porcelain-ish commands. This large-ish patch rewrites all uses of test_expect_failure to use test_expect_success and rewrites the condition of what is tested, like this: test_expect_success 'test title' ' setup1 && setup2 && setup3 && ! this command should fail ' test_expect_failure is redefined to serve as a reminder that that test *should* succeed but due to a known breakage in git it currently does not pass. So if git-foo command should create a file 'bar' but you discovered a bug that it doesn't, you can write a test like this: test_expect_failure 'git-foo should create bar' ' rm -f bar && git foo && test -f bar ' This construct acts similar to test_expect_success, but instead of reporting "ok/FAIL" like test_expect_success does, the outcome is reported as "FIXED/still broken". Signed-off-by: Junio C Hamano <gitster@pobox.com>
2008-02-01 17:50:53 +08:00
if test "$test_fixed" != 0
then
say_color error "# $test_fixed known breakage(s) vanished; please update test(s)"
Sane use of test_expect_failure Originally, test_expect_failure was designed to be the opposite of test_expect_success, but this was a bad decision. Most tests run a series of commands that leads to the single command that needs to be tested, like this: test_expect_{success,failure} 'test title' ' setup1 && setup2 && setup3 && what is to be tested ' And expecting a failure exit from the whole sequence misses the point of writing tests. Your setup$N that are supposed to succeed may have failed without even reaching what you are trying to test. The only valid use of test_expect_failure is to check a trivial single command that is expected to fail, which is a minority in tests of Porcelain-ish commands. This large-ish patch rewrites all uses of test_expect_failure to use test_expect_success and rewrites the condition of what is tested, like this: test_expect_success 'test title' ' setup1 && setup2 && setup3 && ! this command should fail ' test_expect_failure is redefined to serve as a reminder that that test *should* succeed but due to a known breakage in git it currently does not pass. So if git-foo command should create a file 'bar' but you discovered a bug that it doesn't, you can write a test like this: test_expect_failure 'git-foo should create bar' ' rm -f bar && git foo && test -f bar ' This construct acts similar to test_expect_success, but instead of reporting "ok/FAIL" like test_expect_success does, the outcome is reported as "FIXED/still broken". Signed-off-by: Junio C Hamano <gitster@pobox.com>
2008-02-01 17:50:53 +08:00
fi
if test "$test_broken" != 0
then
say_color warn "# still have $test_broken known breakage(s)"
fi
if test "$test_broken" != 0 || test "$test_fixed" != 0
then
test_remaining=$(( $test_count - $test_broken - $test_fixed ))
msg="remaining $test_remaining test(s)"
else
test_remaining=$test_count
msg="$test_count test(s)"
Sane use of test_expect_failure Originally, test_expect_failure was designed to be the opposite of test_expect_success, but this was a bad decision. Most tests run a series of commands that leads to the single command that needs to be tested, like this: test_expect_{success,failure} 'test title' ' setup1 && setup2 && setup3 && what is to be tested ' And expecting a failure exit from the whole sequence misses the point of writing tests. Your setup$N that are supposed to succeed may have failed without even reaching what you are trying to test. The only valid use of test_expect_failure is to check a trivial single command that is expected to fail, which is a minority in tests of Porcelain-ish commands. This large-ish patch rewrites all uses of test_expect_failure to use test_expect_success and rewrites the condition of what is tested, like this: test_expect_success 'test title' ' setup1 && setup2 && setup3 && ! this command should fail ' test_expect_failure is redefined to serve as a reminder that that test *should* succeed but due to a known breakage in git it currently does not pass. So if git-foo command should create a file 'bar' but you discovered a bug that it doesn't, you can write a test like this: test_expect_failure 'git-foo should create bar' ' rm -f bar && git foo && test -f bar ' This construct acts similar to test_expect_success, but instead of reporting "ok/FAIL" like test_expect_success does, the outcome is reported as "FIXED/still broken". Signed-off-by: Junio C Hamano <gitster@pobox.com>
2008-02-01 17:50:53 +08:00
fi
case "$test_failure" in
0)
test-lib: Adjust output to be valid TAP format TAP, the Test Anything Protocol, is a simple text-based interface between testing modules in a test harness. test-lib.sh's output was already very close to being valid TAP. This change brings it all the way there. Before: $ ./t0005-signals.sh * ok 1: sigchain works * passed all 1 test(s) And after: $ ./t0005-signals.sh ok 1 - sigchain works # passed all 1 test(s) 1..1 The advantage of using TAP is that any program that reads the format (a "test harness") can run the tests. The most popular of these is the prove(1) utility that comes with Perl. It can run tests in parallel, display colored output, format the output to console, file, HTML etc., and much more. An example: $ prove ./t0005-signals.sh ./t0005-signals.sh .. ok All tests successful. Files=1, Tests=1, 0 wallclock secs ( 0.03 usr 0.00 sys + 0.01 cusr 0.02 csys = 0.06 CPU) Result: PASS prove(1) gives you human readable output without being too verbose. Running the test suite in parallel with `make test -j15` produces a flood of text. Running them with `prove -j 15 ./t[0-9]*.sh` makes it easy to follow what's going on. All this patch does is re-arrange the output a bit so that it conforms with the TAP spec, everything that the test suite did before continues to work. That includes aggregating results in t/test-results/, the --verbose, --debug and other options for tests, and the test color output. TAP harnesses ignore everything that they don't know about, so running the tests with --verbose works: $ prove ./t0005-signals.sh :: --verbose --debug ./t0005-signals.sh .. Terminated ./t0005-signals.sh .. ok All tests successful. Files=1, Tests=1, 0 wallclock secs ( 0.02 usr 0.01 sys + 0.01 cusr 0.01 csys = 0.05 CPU) Result: PASS Just supply the -v option to prove itself to get all the verbose output that it suppresses: $ prove -v ./t0005-signals.sh :: --verbose --debug ./t0005-signals.sh .. Initialized empty Git repository in /home/avar/g/git/t/trash directory.t0005-signals/.git/ expecting success: test-sigchain >actual case "$?" in 143) true ;; # POSIX w/ SIGTERM=15 3) true ;; # Windows *) false ;; esac && test_cmp expect actual Terminated ok 1 - sigchain works # passed all 1 test(s) 1..1 ok All tests successful. Files=1, Tests=1, 0 wallclock secs ( 0.02 usr 0.00 sys + 0.01 cusr 0.01 csys = 0.04 CPU) Result: PASS As a further example, consider this test script that uses a lot of test-lib.sh features by Jakub Narebski: #!/bin/sh test_description='this is a sample test. This test is here to see various test outputs.' . ./test-lib.sh say 'diagnostic message' test_expect_success 'true test' 'true' test_expect_success 'false test' 'false' test_expect_failure 'true test (todo)' 'true' test_expect_failure 'false test (todo)' 'false' test_debug 'echo "debug message"' test_done The output of that was previously: * diagnostic message # yellow * ok 1: true test * FAIL 2: false test # bold red false * FIXED 3: true test (todo) * still broken 4: false test (todo) # bold green * fixed 1 known breakage(s) # green * still have 1 known breakage(s) # bold red * failed 1 among remaining 3 test(s) # bold red But is now: diagnostic message # yellow ok 1 - true test not ok - 2 false test # bold red # false ok 3 - true test (todo) # TODO known breakage not ok 4 - false test (todo) # TODO known breakage # bold green # fixed 1 known breakage(s) # green # still have 1 known breakage(s) # bold red # failed 1 among remaining 3 test(s) # bold red 1..4 All the coloring is preserved when the test is run manually. Under prove(1) the test performs as expected, even with --debug and --verbose options: $ prove ./example.sh :: --debug --verbose ./example.sh .. Dubious, test returned 1 (wstat 256, 0x100) Failed 1/4 subtests (1 TODO test unexpectedly succeeded) Test Summary Report ------------------- ./example.sh (Wstat: 256 Tests: 4 Failed: 1) Failed test: 2 TODO passed: 3 Non-zero exit status: 1 Files=1, Tests=4, 0 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.01 csys = 0.03 CPU) Result: FAIL The TAP harness itself doesn't get confused by the color output, they aren't used by test-lib.sh stdout isn't open to a terminal (test -t 1). Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-06-25 05:52:12 +08:00
# Maybe print SKIP message
if test -n "$skip_all" && test $test_count -gt 0
then
error "Can't use skip_all after running some tests"
fi
test -z "$skip_all" || skip_all=" # SKIP $skip_all"
test-lib: Adjust output to be valid TAP format TAP, the Test Anything Protocol, is a simple text-based interface between testing modules in a test harness. test-lib.sh's output was already very close to being valid TAP. This change brings it all the way there. Before: $ ./t0005-signals.sh * ok 1: sigchain works * passed all 1 test(s) And after: $ ./t0005-signals.sh ok 1 - sigchain works # passed all 1 test(s) 1..1 The advantage of using TAP is that any program that reads the format (a "test harness") can run the tests. The most popular of these is the prove(1) utility that comes with Perl. It can run tests in parallel, display colored output, format the output to console, file, HTML etc., and much more. An example: $ prove ./t0005-signals.sh ./t0005-signals.sh .. ok All tests successful. Files=1, Tests=1, 0 wallclock secs ( 0.03 usr 0.00 sys + 0.01 cusr 0.02 csys = 0.06 CPU) Result: PASS prove(1) gives you human readable output without being too verbose. Running the test suite in parallel with `make test -j15` produces a flood of text. Running them with `prove -j 15 ./t[0-9]*.sh` makes it easy to follow what's going on. All this patch does is re-arrange the output a bit so that it conforms with the TAP spec, everything that the test suite did before continues to work. That includes aggregating results in t/test-results/, the --verbose, --debug and other options for tests, and the test color output. TAP harnesses ignore everything that they don't know about, so running the tests with --verbose works: $ prove ./t0005-signals.sh :: --verbose --debug ./t0005-signals.sh .. Terminated ./t0005-signals.sh .. ok All tests successful. Files=1, Tests=1, 0 wallclock secs ( 0.02 usr 0.01 sys + 0.01 cusr 0.01 csys = 0.05 CPU) Result: PASS Just supply the -v option to prove itself to get all the verbose output that it suppresses: $ prove -v ./t0005-signals.sh :: --verbose --debug ./t0005-signals.sh .. Initialized empty Git repository in /home/avar/g/git/t/trash directory.t0005-signals/.git/ expecting success: test-sigchain >actual case "$?" in 143) true ;; # POSIX w/ SIGTERM=15 3) true ;; # Windows *) false ;; esac && test_cmp expect actual Terminated ok 1 - sigchain works # passed all 1 test(s) 1..1 ok All tests successful. Files=1, Tests=1, 0 wallclock secs ( 0.02 usr 0.00 sys + 0.01 cusr 0.01 csys = 0.04 CPU) Result: PASS As a further example, consider this test script that uses a lot of test-lib.sh features by Jakub Narebski: #!/bin/sh test_description='this is a sample test. This test is here to see various test outputs.' . ./test-lib.sh say 'diagnostic message' test_expect_success 'true test' 'true' test_expect_success 'false test' 'false' test_expect_failure 'true test (todo)' 'true' test_expect_failure 'false test (todo)' 'false' test_debug 'echo "debug message"' test_done The output of that was previously: * diagnostic message # yellow * ok 1: true test * FAIL 2: false test # bold red false * FIXED 3: true test (todo) * still broken 4: false test (todo) # bold green * fixed 1 known breakage(s) # green * still have 1 known breakage(s) # bold red * failed 1 among remaining 3 test(s) # bold red But is now: diagnostic message # yellow ok 1 - true test not ok - 2 false test # bold red # false ok 3 - true test (todo) # TODO known breakage not ok 4 - false test (todo) # TODO known breakage # bold green # fixed 1 known breakage(s) # green # still have 1 known breakage(s) # bold red # failed 1 among remaining 3 test(s) # bold red 1..4 All the coloring is preserved when the test is run manually. Under prove(1) the test performs as expected, even with --debug and --verbose options: $ prove ./example.sh :: --debug --verbose ./example.sh .. Dubious, test returned 1 (wstat 256, 0x100) Failed 1/4 subtests (1 TODO test unexpectedly succeeded) Test Summary Report ------------------- ./example.sh (Wstat: 256 Tests: 4 Failed: 1) Failed test: 2 TODO passed: 3 Non-zero exit status: 1 Files=1, Tests=4, 0 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.01 csys = 0.03 CPU) Result: FAIL The TAP harness itself doesn't get confused by the color output, they aren't used by test-lib.sh stdout isn't open to a terminal (test -t 1). Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-06-25 05:52:12 +08:00
if test $test_external_has_tap -eq 0
then
if test $test_remaining -gt 0
then
say_color pass "# passed all $msg"
fi
say "1..$test_count$skip_all"
fi
test -d "$remove_trash" &&
cd "$(dirname "$remove_trash")" &&
rm -rf "$(basename "$remove_trash")"
Introduce a performance testing framework This introduces a performance testing framework under t/perf/. It tries to be as close to the test-lib.sh infrastructure as possible, and thus should be easy to get used to for git developers. The following points were considered for the implementation: 1. You usually want to compare arbitrary revisions/build trees against each other. They may not have the performance test under consideration, or even the perf-lib.sh infrastructure. To cope with this, the 'run' script lets you specify arbitrary build dirs and revisions. It even automatically builds the revisions if it doesn't have them at hand yet. 2. Usually you would not want to run all tests. It would take too long anyway. The 'run' script lets you specify which tests to run; or you can also do it manually. There is a Makefile for discoverability and 'make clean', but it is not meant for real-world use. 3. Creating test repos from scratch in every test is extremely time-consuming, and shipping or downloading such large/weird repos is out of the question. We leave this decision to the user. Two different sizes of test repos can be configured, and the scripts just copy one or more of those (using hardlinks for the object store). By default it tries to use the build tree's git.git repository. This is fairly fast and versatile. Using a copy instead of a clone preserves many properties that the user may want to test for, such as lots of loose objects, unpacked refs, etc. Signed-off-by: Thomas Rast <trast@student.ethz.ch> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-02-17 18:25:09 +08:00
test_at_end_hook_
exit 0 ;;
*)
if test $test_external_has_tap -eq 0
then
say_color error "# failed $test_failure among $msg"
say "1..$test_count"
fi
test-lib: Adjust output to be valid TAP format TAP, the Test Anything Protocol, is a simple text-based interface between testing modules in a test harness. test-lib.sh's output was already very close to being valid TAP. This change brings it all the way there. Before: $ ./t0005-signals.sh * ok 1: sigchain works * passed all 1 test(s) And after: $ ./t0005-signals.sh ok 1 - sigchain works # passed all 1 test(s) 1..1 The advantage of using TAP is that any program that reads the format (a "test harness") can run the tests. The most popular of these is the prove(1) utility that comes with Perl. It can run tests in parallel, display colored output, format the output to console, file, HTML etc., and much more. An example: $ prove ./t0005-signals.sh ./t0005-signals.sh .. ok All tests successful. Files=1, Tests=1, 0 wallclock secs ( 0.03 usr 0.00 sys + 0.01 cusr 0.02 csys = 0.06 CPU) Result: PASS prove(1) gives you human readable output without being too verbose. Running the test suite in parallel with `make test -j15` produces a flood of text. Running them with `prove -j 15 ./t[0-9]*.sh` makes it easy to follow what's going on. All this patch does is re-arrange the output a bit so that it conforms with the TAP spec, everything that the test suite did before continues to work. That includes aggregating results in t/test-results/, the --verbose, --debug and other options for tests, and the test color output. TAP harnesses ignore everything that they don't know about, so running the tests with --verbose works: $ prove ./t0005-signals.sh :: --verbose --debug ./t0005-signals.sh .. Terminated ./t0005-signals.sh .. ok All tests successful. Files=1, Tests=1, 0 wallclock secs ( 0.02 usr 0.01 sys + 0.01 cusr 0.01 csys = 0.05 CPU) Result: PASS Just supply the -v option to prove itself to get all the verbose output that it suppresses: $ prove -v ./t0005-signals.sh :: --verbose --debug ./t0005-signals.sh .. Initialized empty Git repository in /home/avar/g/git/t/trash directory.t0005-signals/.git/ expecting success: test-sigchain >actual case "$?" in 143) true ;; # POSIX w/ SIGTERM=15 3) true ;; # Windows *) false ;; esac && test_cmp expect actual Terminated ok 1 - sigchain works # passed all 1 test(s) 1..1 ok All tests successful. Files=1, Tests=1, 0 wallclock secs ( 0.02 usr 0.00 sys + 0.01 cusr 0.01 csys = 0.04 CPU) Result: PASS As a further example, consider this test script that uses a lot of test-lib.sh features by Jakub Narebski: #!/bin/sh test_description='this is a sample test. This test is here to see various test outputs.' . ./test-lib.sh say 'diagnostic message' test_expect_success 'true test' 'true' test_expect_success 'false test' 'false' test_expect_failure 'true test (todo)' 'true' test_expect_failure 'false test (todo)' 'false' test_debug 'echo "debug message"' test_done The output of that was previously: * diagnostic message # yellow * ok 1: true test * FAIL 2: false test # bold red false * FIXED 3: true test (todo) * still broken 4: false test (todo) # bold green * fixed 1 known breakage(s) # green * still have 1 known breakage(s) # bold red * failed 1 among remaining 3 test(s) # bold red But is now: diagnostic message # yellow ok 1 - true test not ok - 2 false test # bold red # false ok 3 - true test (todo) # TODO known breakage not ok 4 - false test (todo) # TODO known breakage # bold green # fixed 1 known breakage(s) # green # still have 1 known breakage(s) # bold red # failed 1 among remaining 3 test(s) # bold red 1..4 All the coloring is preserved when the test is run manually. Under prove(1) the test performs as expected, even with --debug and --verbose options: $ prove ./example.sh :: --debug --verbose ./example.sh .. Dubious, test returned 1 (wstat 256, 0x100) Failed 1/4 subtests (1 TODO test unexpectedly succeeded) Test Summary Report ------------------- ./example.sh (Wstat: 256 Tests: 4 Failed: 1) Failed test: 2 TODO passed: 3 Non-zero exit status: 1 Files=1, Tests=4, 0 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.01 csys = 0.03 CPU) Result: FAIL The TAP harness itself doesn't get confused by the color output, they aren't used by test-lib.sh stdout isn't open to a terminal (test -t 1). Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-06-25 05:52:12 +08:00
exit 1 ;;
esac
}
if test -n "$valgrind"
Add valgrind support in test scripts This patch adds the ability to use valgrind's memcheck tool to diagnose memory problems in Git while running the test scripts. It requires valgrind 3.4.0 or newer. It works by creating symlinks to a valgrind script, which have the same name as our Git binaries, and then putting that directory in front of the test script's PATH as well as set GIT_EXEC_PATH to that directory. Git scripts are symlinked from that directory directly. That way, Git binaries called by Git scripts are valgrinded, too. Valgrind can be used by specifying "GIT_TEST_OPTS=--valgrind" in the make invocation. Any invocation of git that finds any errors under valgrind will exit with failure code 126. Any valgrind output will go to the usual stderr channel for tests (i.e., /dev/null, unless -v has been specified). If you need to pass options to valgrind -- you might want to run another tool than memcheck, for example -- you can set the environment variable GIT_VALGRIND_OPTIONS. A few default suppressions are included, since libz seems to trigger quite a few false positives. We'll assume that libz works and that we can ignore any errors which are reported there. Note: it is safe to run the valgrind tests in parallel, as the links in t/valgrind/bin/ are created using proper locking. Initial patch and all the hard work by Jeff King. Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de> Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-02-04 07:25:59 +08:00
then
make_symlink () {
test -h "$2" &&
test "$1" = "$(readlink "$2")" || {
# be super paranoid
if mkdir "$2".lock
then
rm -f "$2" &&
ln -s "$1" "$2" &&
rm -r "$2".lock
else
while test -d "$2".lock
do
say "Waiting for lock on $2."
sleep 1
done
fi
}
}
make_valgrind_symlink () {
# handle only executables, unless they are shell libraries that
remove #!interpreter line from shell libraries In a shell snippet meant to be sourced by other shell scripts, an opening #! line does more harm than good. The harm: - When the shell library is sourced, the interpreter and options from the #! line are not used. Specifying a particular shell can confuse the reader into thinking it is safe for the shell library to rely on idiosyncrasies of that shell. - Using #! instead of a plain comment drops a helpful visual clue that this is a shell library and not a self-contained script. - Tools such as lintian can use a #! line to tell when an installation script has failed by forgetting to set a script executable. This check does not work if shell libraries also start with a #! line. The good: - Text editors notice the #! line and use it for syntax highlighting if you try to edit the installed scripts (without ".sh" suffix) in place. The use of the #! for file type detection is not needed because Git's shell libraries are meant to be edited in source form (with ".sh" suffix). Replace the opening #! lines with comments. This involves tweaking the test harness's valgrind support to find shell libraries by looking for "# " in the first line instead of "#!" (see v1.7.6-rc3~7, 2011-06-17). Suggested by Russ Allbery through lintian. Thanks to Jeff King and Clemens Buchacher for further analysis. Tested by searching for non-executable scripts with #! line: find . -name .git -prune -o -type f -not -executable | while read file do read line <"$file" case $line in '#!'*) echo "$file" ;; esac done The only remaining scripts found are templates for shell scripts (unimplemented.sh, wrap-for-bin.sh) and sample input used in tests (t/t4034/perl/{pre,post}). Signed-off-by: Jonathan Nieder <jrnieder@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-11-26 05:03:52 +08:00
# need to be in the exec-path.
test -x "$1" ||
remove #!interpreter line from shell libraries In a shell snippet meant to be sourced by other shell scripts, an opening #! line does more harm than good. The harm: - When the shell library is sourced, the interpreter and options from the #! line are not used. Specifying a particular shell can confuse the reader into thinking it is safe for the shell library to rely on idiosyncrasies of that shell. - Using #! instead of a plain comment drops a helpful visual clue that this is a shell library and not a self-contained script. - Tools such as lintian can use a #! line to tell when an installation script has failed by forgetting to set a script executable. This check does not work if shell libraries also start with a #! line. The good: - Text editors notice the #! line and use it for syntax highlighting if you try to edit the installed scripts (without ".sh" suffix) in place. The use of the #! for file type detection is not needed because Git's shell libraries are meant to be edited in source form (with ".sh" suffix). Replace the opening #! lines with comments. This involves tweaking the test harness's valgrind support to find shell libraries by looking for "# " in the first line instead of "#!" (see v1.7.6-rc3~7, 2011-06-17). Suggested by Russ Allbery through lintian. Thanks to Jeff King and Clemens Buchacher for further analysis. Tested by searching for non-executable scripts with #! line: find . -name .git -prune -o -type f -not -executable | while read file do read line <"$file" case $line in '#!'*) echo "$file" ;; esac done The only remaining scripts found are templates for shell scripts (unimplemented.sh, wrap-for-bin.sh) and sample input used in tests (t/t4034/perl/{pre,post}). Signed-off-by: Jonathan Nieder <jrnieder@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-11-26 05:03:52 +08:00
test "# " = "$(head -c 2 <"$1")" ||
return;
Add valgrind support in test scripts This patch adds the ability to use valgrind's memcheck tool to diagnose memory problems in Git while running the test scripts. It requires valgrind 3.4.0 or newer. It works by creating symlinks to a valgrind script, which have the same name as our Git binaries, and then putting that directory in front of the test script's PATH as well as set GIT_EXEC_PATH to that directory. Git scripts are symlinked from that directory directly. That way, Git binaries called by Git scripts are valgrinded, too. Valgrind can be used by specifying "GIT_TEST_OPTS=--valgrind" in the make invocation. Any invocation of git that finds any errors under valgrind will exit with failure code 126. Any valgrind output will go to the usual stderr channel for tests (i.e., /dev/null, unless -v has been specified). If you need to pass options to valgrind -- you might want to run another tool than memcheck, for example -- you can set the environment variable GIT_VALGRIND_OPTIONS. A few default suppressions are included, since libz seems to trigger quite a few false positives. We'll assume that libz works and that we can ignore any errors which are reported there. Note: it is safe to run the valgrind tests in parallel, as the links in t/valgrind/bin/ are created using proper locking. Initial patch and all the hard work by Jeff King. Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de> Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-02-04 07:25:59 +08:00
base=$(basename "$1")
symlink_target=$GIT_BUILD_DIR/$base
Add valgrind support in test scripts This patch adds the ability to use valgrind's memcheck tool to diagnose memory problems in Git while running the test scripts. It requires valgrind 3.4.0 or newer. It works by creating symlinks to a valgrind script, which have the same name as our Git binaries, and then putting that directory in front of the test script's PATH as well as set GIT_EXEC_PATH to that directory. Git scripts are symlinked from that directory directly. That way, Git binaries called by Git scripts are valgrinded, too. Valgrind can be used by specifying "GIT_TEST_OPTS=--valgrind" in the make invocation. Any invocation of git that finds any errors under valgrind will exit with failure code 126. Any valgrind output will go to the usual stderr channel for tests (i.e., /dev/null, unless -v has been specified). If you need to pass options to valgrind -- you might want to run another tool than memcheck, for example -- you can set the environment variable GIT_VALGRIND_OPTIONS. A few default suppressions are included, since libz seems to trigger quite a few false positives. We'll assume that libz works and that we can ignore any errors which are reported there. Note: it is safe to run the valgrind tests in parallel, as the links in t/valgrind/bin/ are created using proper locking. Initial patch and all the hard work by Jeff King. Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de> Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-02-04 07:25:59 +08:00
# do not override scripts
if test -x "$symlink_target" &&
test ! -d "$symlink_target" &&
test "#!" != "$(head -c 2 < "$symlink_target")"
then
symlink_target=../valgrind.sh
fi
case "$base" in
*.sh|*.perl)
symlink_target=../unprocessed-script
esac
Add valgrind support in test scripts This patch adds the ability to use valgrind's memcheck tool to diagnose memory problems in Git while running the test scripts. It requires valgrind 3.4.0 or newer. It works by creating symlinks to a valgrind script, which have the same name as our Git binaries, and then putting that directory in front of the test script's PATH as well as set GIT_EXEC_PATH to that directory. Git scripts are symlinked from that directory directly. That way, Git binaries called by Git scripts are valgrinded, too. Valgrind can be used by specifying "GIT_TEST_OPTS=--valgrind" in the make invocation. Any invocation of git that finds any errors under valgrind will exit with failure code 126. Any valgrind output will go to the usual stderr channel for tests (i.e., /dev/null, unless -v has been specified). If you need to pass options to valgrind -- you might want to run another tool than memcheck, for example -- you can set the environment variable GIT_VALGRIND_OPTIONS. A few default suppressions are included, since libz seems to trigger quite a few false positives. We'll assume that libz works and that we can ignore any errors which are reported there. Note: it is safe to run the valgrind tests in parallel, as the links in t/valgrind/bin/ are created using proper locking. Initial patch and all the hard work by Jeff King. Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de> Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-02-04 07:25:59 +08:00
# create the link, or replace it if it is out of date
make_symlink "$symlink_target" "$GIT_VALGRIND/bin/$base" || exit
}
# override all git executables in TEST_DIRECTORY/..
GIT_VALGRIND=$TEST_DIRECTORY/valgrind
mkdir -p "$GIT_VALGRIND"/bin
for file in $GIT_BUILD_DIR/git* $GIT_BUILD_DIR/test-*
do
make_valgrind_symlink $file
done
# special-case the mergetools loadables
make_symlink "$GIT_BUILD_DIR"/mergetools "$GIT_VALGRIND/bin/mergetools"
OLDIFS=$IFS
IFS=:
for path in $PATH
do
ls "$path"/git-* 2> /dev/null |
while read file
do
make_valgrind_symlink "$file"
done
done
IFS=$OLDIFS
Add valgrind support in test scripts This patch adds the ability to use valgrind's memcheck tool to diagnose memory problems in Git while running the test scripts. It requires valgrind 3.4.0 or newer. It works by creating symlinks to a valgrind script, which have the same name as our Git binaries, and then putting that directory in front of the test script's PATH as well as set GIT_EXEC_PATH to that directory. Git scripts are symlinked from that directory directly. That way, Git binaries called by Git scripts are valgrinded, too. Valgrind can be used by specifying "GIT_TEST_OPTS=--valgrind" in the make invocation. Any invocation of git that finds any errors under valgrind will exit with failure code 126. Any valgrind output will go to the usual stderr channel for tests (i.e., /dev/null, unless -v has been specified). If you need to pass options to valgrind -- you might want to run another tool than memcheck, for example -- you can set the environment variable GIT_VALGRIND_OPTIONS. A few default suppressions are included, since libz seems to trigger quite a few false positives. We'll assume that libz works and that we can ignore any errors which are reported there. Note: it is safe to run the valgrind tests in parallel, as the links in t/valgrind/bin/ are created using proper locking. Initial patch and all the hard work by Jeff King. Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de> Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-02-04 07:25:59 +08:00
PATH=$GIT_VALGRIND/bin:$PATH
GIT_EXEC_PATH=$GIT_VALGRIND/bin
export GIT_VALGRIND
GIT_VALGRIND_MODE="$valgrind"
export GIT_VALGRIND_MODE
GIT_VALGRIND_ENABLED=t
test -n "$valgrind_only" && GIT_VALGRIND_ENABLED=
export GIT_VALGRIND_ENABLED
elif test -n "$GIT_TEST_INSTALLED"
then
GIT_EXEC_PATH=$($GIT_TEST_INSTALLED/git --exec-path) ||
error "Cannot run git from $GIT_TEST_INSTALLED."
PATH=$GIT_TEST_INSTALLED:$GIT_BUILD_DIR:$PATH
GIT_EXEC_PATH=${GIT_TEST_EXEC_PATH:-$GIT_EXEC_PATH}
else # normal case, use ../bin-wrappers only unless $with_dashes:
git_bin_dir="$GIT_BUILD_DIR/bin-wrappers"
if ! test -x "$git_bin_dir/git"
then
if test -z "$with_dashes"
then
say "$git_bin_dir/git is not executable; using GIT_EXEC_PATH"
fi
with_dashes=t
fi
PATH="$git_bin_dir:$PATH"
GIT_EXEC_PATH=$GIT_BUILD_DIR
if test -n "$with_dashes"
then
PATH="$GIT_BUILD_DIR:$PATH"
fi
Add valgrind support in test scripts This patch adds the ability to use valgrind's memcheck tool to diagnose memory problems in Git while running the test scripts. It requires valgrind 3.4.0 or newer. It works by creating symlinks to a valgrind script, which have the same name as our Git binaries, and then putting that directory in front of the test script's PATH as well as set GIT_EXEC_PATH to that directory. Git scripts are symlinked from that directory directly. That way, Git binaries called by Git scripts are valgrinded, too. Valgrind can be used by specifying "GIT_TEST_OPTS=--valgrind" in the make invocation. Any invocation of git that finds any errors under valgrind will exit with failure code 126. Any valgrind output will go to the usual stderr channel for tests (i.e., /dev/null, unless -v has been specified). If you need to pass options to valgrind -- you might want to run another tool than memcheck, for example -- you can set the environment variable GIT_VALGRIND_OPTIONS. A few default suppressions are included, since libz seems to trigger quite a few false positives. We'll assume that libz works and that we can ignore any errors which are reported there. Note: it is safe to run the valgrind tests in parallel, as the links in t/valgrind/bin/ are created using proper locking. Initial patch and all the hard work by Jeff King. Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de> Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-02-04 07:25:59 +08:00
fi
GIT_TEMPLATE_DIR="$GIT_BUILD_DIR"/templates/blt
GIT_CONFIG_NOSYSTEM=1
GIT_ATTR_NOSYSTEM=1
export PATH GIT_EXEC_PATH GIT_TEMPLATE_DIR GIT_CONFIG_NOSYSTEM GIT_ATTR_NOSYSTEM
if test -z "$GIT_TEST_CMP"
then
if test -n "$GIT_TEST_CMP_USE_COPIED_CONTEXT"
then
GIT_TEST_CMP="$DIFF -c"
else
GIT_TEST_CMP="$DIFF -u"
fi
fi
GITPERLLIB="$GIT_BUILD_DIR"/perl/blib/lib:"$GIT_BUILD_DIR"/perl/blib/arch/auto/Git
export GITPERLLIB
test -d "$GIT_BUILD_DIR"/templates/blt || {
error "You haven't built things yet, have you?"
}
if ! test -x "$GIT_BUILD_DIR"/test-chmtime
then
echo >&2 'You need to build test-chmtime:'
echo >&2 'Run "make test-chmtime" in the source (toplevel) directory'
exit 1
fi
# Test repository
TRASH_DIRECTORY="trash directory.$(basename "$0" .sh)"
test -n "$root" && TRASH_DIRECTORY="$root/$TRASH_DIRECTORY"
case "$TRASH_DIRECTORY" in
/*) ;; # absolute path is good
*) TRASH_DIRECTORY="$TEST_OUTPUT_DIRECTORY/$TRASH_DIRECTORY" ;;
esac
test ! -z "$debug" || remove_trash=$TRASH_DIRECTORY
rm -fr "$TRASH_DIRECTORY" || {
GIT_EXIT_OK=t
echo >&5 "FATAL: Cannot prepare test area"
exit 1
}
HOME="$TRASH_DIRECTORY"
signed push: teach smart-HTTP to pass "git push --signed" around The "--signed" option received by "git push" is first passed to the transport layer, which the native transport directly uses to notice that a push certificate needs to be sent. When the transport-helper is involved, however, the option needs to be told to the helper with set_helper_option(), and the helper needs to take necessary action. For the smart-HTTP helper, the "necessary action" involves spawning the "git send-pack" subprocess with the "--signed" option. Once the above all gets wired in, the smart-HTTP transport now can use the push certificate mechanism to authenticate its pushes. Add a test that is modeled after tests for the native transport in t5534-push-signed.sh to t5541-http-push-smart.sh. Update the test Apache configuration to pass GNUPGHOME environment variable through. As PassEnv would trigger warnings for an environment variable that is not set, export it from test-lib.sh set to a harmless value when GnuPG is not being used in the tests. Note that the added test is deliberately loose and does not check the nonce in this step. This is because the stateless RPC mode is inevitably flaky and a nonce that comes back in the actual push processing is one issued by a different process; if the two interactions with the server crossed a second boundary, the nonces will not match and such a check will fail. A later patch in the series will work around this shortcoming. Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-09-16 05:59:00 +08:00
GNUPGHOME="$HOME/gnupg-home-not-used"
export HOME GNUPGHOME
if test -z "$TEST_NO_CREATE_REPO"
then
test_create_repo "$TRASH_DIRECTORY"
Introduce a performance testing framework This introduces a performance testing framework under t/perf/. It tries to be as close to the test-lib.sh infrastructure as possible, and thus should be easy to get used to for git developers. The following points were considered for the implementation: 1. You usually want to compare arbitrary revisions/build trees against each other. They may not have the performance test under consideration, or even the perf-lib.sh infrastructure. To cope with this, the 'run' script lets you specify arbitrary build dirs and revisions. It even automatically builds the revisions if it doesn't have them at hand yet. 2. Usually you would not want to run all tests. It would take too long anyway. The 'run' script lets you specify which tests to run; or you can also do it manually. There is a Makefile for discoverability and 'make clean', but it is not meant for real-world use. 3. Creating test repos from scratch in every test is extremely time-consuming, and shipping or downloading such large/weird repos is out of the question. We leave this decision to the user. Two different sizes of test repos can be configured, and the scripts just copy one or more of those (using hardlinks for the object store). By default it tries to use the build tree's git.git repository. This is fairly fast and versatile. Using a copy instead of a clone preserves many properties that the user may want to test for, such as lots of loose objects, unpacked refs, etc. Signed-off-by: Thomas Rast <trast@student.ethz.ch> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-02-17 18:25:09 +08:00
else
mkdir -p "$TRASH_DIRECTORY"
Introduce a performance testing framework This introduces a performance testing framework under t/perf/. It tries to be as close to the test-lib.sh infrastructure as possible, and thus should be easy to get used to for git developers. The following points were considered for the implementation: 1. You usually want to compare arbitrary revisions/build trees against each other. They may not have the performance test under consideration, or even the perf-lib.sh infrastructure. To cope with this, the 'run' script lets you specify arbitrary build dirs and revisions. It even automatically builds the revisions if it doesn't have them at hand yet. 2. Usually you would not want to run all tests. It would take too long anyway. The 'run' script lets you specify which tests to run; or you can also do it manually. There is a Makefile for discoverability and 'make clean', but it is not meant for real-world use. 3. Creating test repos from scratch in every test is extremely time-consuming, and shipping or downloading such large/weird repos is out of the question. We leave this decision to the user. Two different sizes of test repos can be configured, and the scripts just copy one or more of those (using hardlinks for the object store). By default it tries to use the build tree's git.git repository. This is fairly fast and versatile. Using a copy instead of a clone preserves many properties that the user may want to test for, such as lots of loose objects, unpacked refs, etc. Signed-off-by: Thomas Rast <trast@student.ethz.ch> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-02-17 18:25:09 +08:00
fi
# Use -P to resolve symlinks in our working directory so that the cwd
# in subprocesses like git equals our $PWD (for pathname comparisons).
cd -P "$TRASH_DIRECTORY" || exit 1
this_test=${0##*/}
this_test=${this_test%%-*}
if match_pattern_list "$this_test" $GIT_SKIP_TESTS
then
say_color info >&3 "skipping test $this_test altogether"
skip_all="skip all tests in $this_test"
test_done
fi
# Provide an implementation of the 'yes' utility
yes () {
if test $# = 0
then
y=y
else
y="$*"
fi
while echo "$y"
do
:
done
}
# Fix some commands on Windows
case $(uname -s) in
*MINGW*)
# Windows has its own (incompatible) sort and find
sort () {
/usr/bin/sort "$@"
}
find () {
/usr/bin/find "$@"
}
sum () {
md5sum "$@"
}
# git sees Windows-style pwd
pwd () {
builtin pwd -W
}
# no POSIX permissions
# backslashes in pathspec are converted to '/'
# exec does not inherit the PID
test_set_prereq MINGW
test_set_prereq NATIVE_CRLF
test_set_prereq SED_STRIPS_CR
test_set_prereq GREP_STRIPS_CR
GIT_TEST_CMP=mingw_test_cmp
;;
*CYGWIN*)
test_set_prereq POSIXPERM
test_set_prereq EXECKEEPSPID
test_set_prereq CYGWIN
test_set_prereq SED_STRIPS_CR
test_set_prereq GREP_STRIPS_CR
;;
*)
test_set_prereq POSIXPERM
test_set_prereq BSLASHPSPEC
test_set_prereq EXECKEEPSPID
;;
esac
( COLUMNS=1 && test $COLUMNS = 1 ) && test_set_prereq COLUMNS_CAN_BE_1
test -z "$NO_PERL" && test_set_prereq PERL
test -z "$NO_PYTHON" && test_set_prereq PYTHON
test -n "$USE_LIBPCRE" && test_set_prereq LIBPCRE
i18n: add infrastructure for translating Git with gettext Change the skeleton implementation of i18n in Git to one that can show localized strings to users for our C, Shell and Perl programs using either GNU libintl or the Solaris gettext implementation. This new internationalization support is enabled by default. If gettext isn't available, or if Git is compiled with NO_GETTEXT=YesPlease, Git falls back on its current behavior of showing interface messages in English. When using the autoconf script we'll auto-detect if the gettext libraries are installed and act appropriately. This change is somewhat large because as well as adding a C, Shell and Perl i18n interface we're adding a lot of tests for them, and for those tests to work we need a skeleton PO file to actually test translations. A minimal Icelandic translation is included for this purpose. Icelandic includes multi-byte characters which makes it easy to test various edge cases, and it's a language I happen to understand. The rest of the commit message goes into detail about various sub-parts of this commit. = Installation Gettext .mo files will be installed and looked for in the standard $(prefix)/share/locale path. GIT_TEXTDOMAINDIR can also be set to override that, but that's only intended to be used to test Git itself. = Perl Perl code that's to be localized should use the new Git::I18n module. It imports a __ function into the caller's package by default. Instead of using the high level Locale::TextDomain interface I've opted to use the low-level (equivalent to the C interface) Locale::Messages module, which Locale::TextDomain itself uses. Locale::TextDomain does a lot of redundant work we don't need, and some of it would potentially introduce bugs. It tries to set the $TEXTDOMAIN based on package of the caller, and has its own hardcoded paths where it'll search for messages. I found it easier just to completely avoid it rather than try to circumvent its behavior. In any case, this is an issue wholly internal Git::I18N. Its guts can be changed later if that's deemed necessary. See <AANLkTilYD_NyIZMyj9dHtVk-ylVBfvyxpCC7982LWnVd@mail.gmail.com> for a further elaboration on this topic. = Shell Shell code that's to be localized should use the git-sh-i18n library. It's basically just a wrapper for the system's gettext.sh. If gettext.sh isn't available we'll fall back on gettext(1) if it's available. The latter is available without the former on Solaris, which has its own non-GNU gettext implementation. We also need to emulate eval_gettext() there. If neither are present we'll use a dumb printf(1) fall-through wrapper. = About libcharset.h and langinfo.h We use libcharset to query the character set of the current locale if it's available. I.e. we'll use it instead of nl_langinfo if HAVE_LIBCHARSET_H is set. The GNU gettext manual recommends using langinfo.h's nl_langinfo(CODESET) to acquire the current character set, but on systems that have libcharset.h's locale_charset() using the latter is either saner, or the only option on those systems. GNU and Solaris have a nl_langinfo(CODESET), FreeBSD can use either, but MinGW and some others need to use libcharset.h's locale_charset() instead. =Credits This patch is based on work by Jeff Epler <jepler@unpythonic.net> who did the initial Makefile / C work, and a lot of comments from the Git mailing list, including Jonathan Nieder, Jakub Narebski, Johannes Sixt, Erik Faye-Lund, Peter Krefting, Junio C Hamano, Thomas Rast and others. [jc: squashed a small Makefile fix from Ramsay] Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com> Signed-off-by: Ramsay Jones <ramsay@ramsay1.demon.co.uk> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-11-18 07:14:42 +08:00
test -z "$NO_GETTEXT" && test_set_prereq GETTEXT
# Can we rely on git's output in the C locale?
if test -n "$GETTEXT_POISON"
then
GIT_GETTEXT_POISON=YesPlease
export GIT_GETTEXT_POISON
i18n: add infrastructure for translating Git with gettext Change the skeleton implementation of i18n in Git to one that can show localized strings to users for our C, Shell and Perl programs using either GNU libintl or the Solaris gettext implementation. This new internationalization support is enabled by default. If gettext isn't available, or if Git is compiled with NO_GETTEXT=YesPlease, Git falls back on its current behavior of showing interface messages in English. When using the autoconf script we'll auto-detect if the gettext libraries are installed and act appropriately. This change is somewhat large because as well as adding a C, Shell and Perl i18n interface we're adding a lot of tests for them, and for those tests to work we need a skeleton PO file to actually test translations. A minimal Icelandic translation is included for this purpose. Icelandic includes multi-byte characters which makes it easy to test various edge cases, and it's a language I happen to understand. The rest of the commit message goes into detail about various sub-parts of this commit. = Installation Gettext .mo files will be installed and looked for in the standard $(prefix)/share/locale path. GIT_TEXTDOMAINDIR can also be set to override that, but that's only intended to be used to test Git itself. = Perl Perl code that's to be localized should use the new Git::I18n module. It imports a __ function into the caller's package by default. Instead of using the high level Locale::TextDomain interface I've opted to use the low-level (equivalent to the C interface) Locale::Messages module, which Locale::TextDomain itself uses. Locale::TextDomain does a lot of redundant work we don't need, and some of it would potentially introduce bugs. It tries to set the $TEXTDOMAIN based on package of the caller, and has its own hardcoded paths where it'll search for messages. I found it easier just to completely avoid it rather than try to circumvent its behavior. In any case, this is an issue wholly internal Git::I18N. Its guts can be changed later if that's deemed necessary. See <AANLkTilYD_NyIZMyj9dHtVk-ylVBfvyxpCC7982LWnVd@mail.gmail.com> for a further elaboration on this topic. = Shell Shell code that's to be localized should use the git-sh-i18n library. It's basically just a wrapper for the system's gettext.sh. If gettext.sh isn't available we'll fall back on gettext(1) if it's available. The latter is available without the former on Solaris, which has its own non-GNU gettext implementation. We also need to emulate eval_gettext() there. If neither are present we'll use a dumb printf(1) fall-through wrapper. = About libcharset.h and langinfo.h We use libcharset to query the character set of the current locale if it's available. I.e. we'll use it instead of nl_langinfo if HAVE_LIBCHARSET_H is set. The GNU gettext manual recommends using langinfo.h's nl_langinfo(CODESET) to acquire the current character set, but on systems that have libcharset.h's locale_charset() using the latter is either saner, or the only option on those systems. GNU and Solaris have a nl_langinfo(CODESET), FreeBSD can use either, but MinGW and some others need to use libcharset.h's locale_charset() instead. =Credits This patch is based on work by Jeff Epler <jepler@unpythonic.net> who did the initial Makefile / C work, and a lot of comments from the Git mailing list, including Jonathan Nieder, Jakub Narebski, Johannes Sixt, Erik Faye-Lund, Peter Krefting, Junio C Hamano, Thomas Rast and others. [jc: squashed a small Makefile fix from Ramsay] Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com> Signed-off-by: Ramsay Jones <ramsay@ramsay1.demon.co.uk> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-11-18 07:14:42 +08:00
test_set_prereq GETTEXT_POISON
else
test_set_prereq C_LOCALE_OUTPUT
fi
# Use this instead of test_cmp to compare files that contain expected and
# actual output from git commands that can be translated. When running
# under GETTEXT_POISON this pretends that the command produced expected
# results.
test_i18ncmp () {
test -n "$GETTEXT_POISON" || test_cmp "$@"
}
# Use this instead of "grep expected-string actual" to see if the
# output from a git command that can be translated either contains an
# expected string, or does not contain an unwanted one. When running
# under GETTEXT_POISON this pretends that the command produced expected
# results.
test_i18ngrep () {
if test -n "$GETTEXT_POISON"
then
: # pretend success
elif test "x!" = "x$1"
then
shift
! grep "$@"
else
grep "$@"
fi
}
test_lazy_prereq PIPE '
# test whether the filesystem supports FIFOs
case $(uname -s) in
CYGWIN*)
false
;;
*)
rm -f testfifo && mkfifo testfifo
;;
esac
'
test: allow prerequisite to be evaluated lazily The test prerequisite mechanism is a useful way to allow some tests in a test script to be skipped in environments that do not support certain features (e.g. it is pointless to attempt checking how well symbolic links are handled by Git on filesystems that do not support them). It is OK for commonly used prerequisites to be always tested during start-up of a test script by having a codeblock that tests a feature and calls test_set_prereq, but for an uncommon feature, forcing 90% of scripts to pay the same probing overhead for prerequisite they do not care about is wasteful. Introduce a mechanism to probe the prerequiste lazily. Changes are: - test_lazy_prereq () function, which takes the name of the prerequisite it probes and the script to probe for it, is added. This only registers the name of the prerequiste that can be lazily probed and the script to eval (without running). - test_have_prereq() function (which is used by test_expect_success and also can be called directly by test scripts) learns to look at the list of prerequisites that can be lazily probed, and the prerequisites that have already been probed that way. When asked for a prerequiste that can be but haven't been probed, the script registered with an earlier call to test_lazy_prereq is evaluated and the prerequisite is set. - test_run_lazy_prereq_() function is a helper to run the probe script with the same kind of sandbox as regular tests, helped by Jeff King. Update the codeblock to probe and set SYMLINKS prerequisite using the new mechanism as an example. Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-07-27 06:50:45 +08:00
test_lazy_prereq SYMLINKS '
# test whether the filesystem supports symbolic links
ln -s x y && test -h y
'
test_lazy_prereq FILEMODE '
test "$(git config --bool core.filemode)" = true
'
test_lazy_prereq CASE_INSENSITIVE_FS '
echo good >CamelCase &&
echo bad >camelcase &&
test "$(cat CamelCase)" != good
'
test_lazy_prereq UTF8_NFD_TO_NFC '
# check whether FS converts nfd unicode to nfc
auml=$(printf "\303\244")
aumlcdiar=$(printf "\141\314\210")
>"$auml" &&
case "$(echo *)" in
"$aumlcdiar")
true ;;
*)
false ;;
esac
'
test_lazy_prereq AUTOIDENT '
sane_unset GIT_AUTHOR_NAME &&
sane_unset GIT_AUTHOR_EMAIL &&
git var GIT_AUTHOR_IDENT
'
test_lazy_prereq EXPENSIVE '
test -n "$GIT_TEST_LONG"
'
test_lazy_prereq USR_BIN_TIME '
test -x /usr/bin/time
'
test_lazy_prereq NOT_ROOT '
uid=$(id -u) &&
test "$uid" != 0
'
# On a filesystem that lacks SANITY, a file can be deleted even if
# the containing directory doesn't have write permissions, or a file
# can be accessed even if the containing directory doesn't have read
# or execute permissions, causing our tests that validate that Git
# works sensibly in such situations.
test_lazy_prereq SANITY '
mkdir SANETESTD.1 SANETESTD.2 &&
chmod +w SANETESTD.1 SANETESTD.2 &&
>SANETESTD.1/x 2>SANETESTD.2/x &&
chmod -w SANETESTD.1 &&
chmod -rx SANETESTD.2 ||
error "bug in test sript: cannot prepare SANETESTD"
! rm SANETESTD.1/x && ! test -f SANETESTD.2/x
status=$?
chmod +rwx SANETESTD.1 SANETESTD.2 &&
rm -rf SANETESTD.1 SANETESTD.2 ||
error "bug in test sript: cannot clean SANETESTD"
return $status
'
GIT_UNZIP=${GIT_UNZIP:-unzip}
test_lazy_prereq UNZIP '
"$GIT_UNZIP" -v
test $? -ne 127
'
run_with_limited_cmdline () {
(ulimit -s 128 && "$@")
}
test_lazy_prereq CMDLINE_LIMIT 'run_with_limited_cmdline true'