2006-05-17 10:02:14 +08:00
|
|
|
/*
|
|
|
|
* This handles recursive filename detection with exclude
|
|
|
|
* files, index knowledge etc..
|
|
|
|
*
|
2012-12-27 10:32:21 +08:00
|
|
|
* See Documentation/technical/api-directory-listing.txt
|
|
|
|
*
|
2006-05-17 10:02:14 +08:00
|
|
|
* Copyright (C) Linus Torvalds, 2005-2006
|
|
|
|
* Junio Hamano, 2005-2006
|
|
|
|
*/
|
|
|
|
#include "cache.h"
|
|
|
|
#include "dir.h"
|
2007-04-12 05:49:44 +08:00
|
|
|
#include "refs.h"
|
2012-10-15 14:26:02 +08:00
|
|
|
#include "wildmatch.h"
|
2013-07-14 16:35:25 +08:00
|
|
|
#include "pathspec.h"
|
2015-04-17 01:45:29 +08:00
|
|
|
#include "utf8.h"
|
2015-03-08 18:12:33 +08:00
|
|
|
#include "varint.h"
|
|
|
|
#include "ewah/ewok.h"
|
2006-05-17 10:02:14 +08:00
|
|
|
|
Optimize directory listing with pathspec limiter.
The way things are set up, you can now pass a "pathspec" to the
"read_directory()" function. If you pass NULL, it acts exactly
like it used to do (read everything). If you pass a non-NULL
pointer, it will simplify it into a "these are the prefixes
without any special characters", and stop any readdir() early if
the path in question doesn't match any of the prefixes.
NOTE! This does *not* obviate the need for the caller to do the *exact*
pathspec match later. It's a first-level filter on "read_directory()", but
it does not do the full pathspec thing. Maybe it should. But in the
meantime, builtin-add.c really does need to do first
read_directory(dir, .., pathspec);
if (pathspec)
prune_directory(dir, pathspec, baselen);
ie the "prune_directory()" part will do the *exact* pathspec pruning,
while the "read_directory()" will use the pathspec just to do some quick
high-level pruning of the directories it will recurse into.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-03-31 11:39:30 +08:00
|
|
|
struct path_simplify {
|
|
|
|
int len;
|
|
|
|
const char *path;
|
|
|
|
};
|
|
|
|
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
/*
|
|
|
|
* Tells read_directory_recursive how a file or directory should be treated.
|
|
|
|
* Values are ordered by significance, e.g. if a directory contains both
|
|
|
|
* excluded and untracked files, it is listed as untracked because
|
|
|
|
* path_untracked > path_excluded.
|
|
|
|
*/
|
|
|
|
enum path_treatment {
|
|
|
|
path_none = 0,
|
|
|
|
path_recurse,
|
|
|
|
path_excluded,
|
|
|
|
path_untracked
|
|
|
|
};
|
|
|
|
|
2015-03-08 18:12:28 +08:00
|
|
|
/*
|
|
|
|
* Support data structure for our opendir/readdir/closedir wrappers
|
|
|
|
*/
|
|
|
|
struct cached_dir {
|
|
|
|
DIR *fdir;
|
|
|
|
struct untracked_cache_dir *untracked;
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:29 +08:00
|
|
|
int nr_files;
|
|
|
|
int nr_dirs;
|
|
|
|
|
2015-03-08 18:12:28 +08:00
|
|
|
struct dirent *de;
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:29 +08:00
|
|
|
const char *file;
|
|
|
|
struct untracked_cache_dir *ucd;
|
2015-03-08 18:12:28 +08:00
|
|
|
};
|
|
|
|
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
static enum path_treatment read_directory_recursive(struct dir_struct *dir,
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
const char *path, int len, struct untracked_cache_dir *untracked,
|
2007-04-12 05:49:44 +08:00
|
|
|
int check_only, const struct path_simplify *simplify);
|
2009-07-09 10:31:49 +08:00
|
|
|
static int get_dtype(struct dirent *de, const char *path, int len);
|
2007-04-12 05:49:44 +08:00
|
|
|
|
2010-10-03 17:56:41 +08:00
|
|
|
/* helper string functions with support for the ignore_case flag */
|
|
|
|
int strcmp_icase(const char *a, const char *b)
|
|
|
|
{
|
|
|
|
return ignore_case ? strcasecmp(a, b) : strcmp(a, b);
|
|
|
|
}
|
|
|
|
|
|
|
|
int strncmp_icase(const char *a, const char *b, size_t count)
|
|
|
|
{
|
|
|
|
return ignore_case ? strncasecmp(a, b, count) : strncmp(a, b, count);
|
|
|
|
}
|
|
|
|
|
|
|
|
int fnmatch_icase(const char *pattern, const char *string, int flags)
|
|
|
|
{
|
2014-02-15 10:01:46 +08:00
|
|
|
return wildmatch(pattern, string,
|
|
|
|
flags | (ignore_case ? WM_CASEFOLD : 0),
|
|
|
|
NULL);
|
2010-10-03 17:56:41 +08:00
|
|
|
}
|
|
|
|
|
2014-03-29 23:39:00 +08:00
|
|
|
int git_fnmatch(const struct pathspec_item *item,
|
|
|
|
const char *pattern, const char *string,
|
|
|
|
int prefix)
|
2012-11-24 12:33:49 +08:00
|
|
|
{
|
|
|
|
if (prefix > 0) {
|
2013-07-14 16:36:09 +08:00
|
|
|
if (ps_strncmp(item, pattern, string, prefix))
|
2014-02-15 10:01:46 +08:00
|
|
|
return WM_NOMATCH;
|
2012-11-24 12:33:49 +08:00
|
|
|
pattern += prefix;
|
|
|
|
string += prefix;
|
|
|
|
}
|
2013-07-14 16:36:08 +08:00
|
|
|
if (item->flags & PATHSPEC_ONESTAR) {
|
2012-11-24 12:33:50 +08:00
|
|
|
int pattern_len = strlen(++pattern);
|
|
|
|
int string_len = strlen(string);
|
|
|
|
return string_len < pattern_len ||
|
2013-07-14 16:36:09 +08:00
|
|
|
ps_strcmp(item, pattern,
|
|
|
|
string + string_len - pattern_len);
|
2012-11-24 12:33:50 +08:00
|
|
|
}
|
2013-07-14 16:36:08 +08:00
|
|
|
if (item->magic & PATHSPEC_GLOB)
|
2013-07-14 16:36:09 +08:00
|
|
|
return wildmatch(pattern, string,
|
|
|
|
WM_PATHNAME |
|
|
|
|
(item->magic & PATHSPEC_ICASE ? WM_CASEFOLD : 0),
|
|
|
|
NULL);
|
2013-07-14 16:36:08 +08:00
|
|
|
else
|
|
|
|
/* wildmatch has not learned no FNM_PATHNAME mode yet */
|
2014-02-15 10:01:46 +08:00
|
|
|
return wildmatch(pattern, string,
|
|
|
|
item->magic & PATHSPEC_ICASE ? WM_CASEFOLD : 0,
|
|
|
|
NULL);
|
2012-11-24 12:33:49 +08:00
|
|
|
}
|
|
|
|
|
dir.c::match_basename(): pay attention to the length of string parameters
The function takes two counted strings (<basename, basenamelen> and
<pattern, patternlen>) as parameters, together with prefix (the
length of the prefix in pattern that is to be matched literally
without globbing against the basename) and EXC_* flags that tells it
how to match the pattern against the basename.
However, it did not pay attention to the length of these counted
strings. Update them to do the following:
* When the entire pattern is to be matched literally, the pattern
matches the basename only when the lengths of them are the same,
and they match up to that length.
* When the pattern is "*" followed by a string to be matched
literally, make sure that the basenamelen is equal or longer than
the "literal" part of the pattern, and the tail of the basename
string matches that literal part.
* Otherwise, use the new fnmatch_icase_mem helper to make
sure we only lookmake sure we use only look at the
counted part of the strings. Because these counted strings are
full strings most of the time, we check for termination
to avoid unnecessary allocation.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-29 05:47:28 +08:00
|
|
|
static int fnmatch_icase_mem(const char *pattern, int patternlen,
|
|
|
|
const char *string, int stringlen,
|
|
|
|
int flags)
|
|
|
|
{
|
|
|
|
int match_status;
|
|
|
|
struct strbuf pat_buf = STRBUF_INIT;
|
|
|
|
struct strbuf str_buf = STRBUF_INIT;
|
|
|
|
const char *use_pat = pattern;
|
|
|
|
const char *use_str = string;
|
|
|
|
|
|
|
|
if (pattern[patternlen]) {
|
|
|
|
strbuf_add(&pat_buf, pattern, patternlen);
|
|
|
|
use_pat = pat_buf.buf;
|
|
|
|
}
|
|
|
|
if (string[stringlen]) {
|
|
|
|
strbuf_add(&str_buf, string, stringlen);
|
|
|
|
use_str = str_buf.buf;
|
|
|
|
}
|
|
|
|
|
2013-04-04 00:34:04 +08:00
|
|
|
if (ignore_case)
|
|
|
|
flags |= WM_CASEFOLD;
|
|
|
|
match_status = wildmatch(use_pat, use_str, flags, NULL);
|
dir.c::match_basename(): pay attention to the length of string parameters
The function takes two counted strings (<basename, basenamelen> and
<pattern, patternlen>) as parameters, together with prefix (the
length of the prefix in pattern that is to be matched literally
without globbing against the basename) and EXC_* flags that tells it
how to match the pattern against the basename.
However, it did not pay attention to the length of these counted
strings. Update them to do the following:
* When the entire pattern is to be matched literally, the pattern
matches the basename only when the lengths of them are the same,
and they match up to that length.
* When the pattern is "*" followed by a string to be matched
literally, make sure that the basenamelen is equal or longer than
the "literal" part of the pattern, and the tail of the basename
string matches that literal part.
* Otherwise, use the new fnmatch_icase_mem helper to make
sure we only lookmake sure we use only look at the
counted part of the strings. Because these counted strings are
full strings most of the time, we check for termination
to avoid unnecessary allocation.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-29 05:47:28 +08:00
|
|
|
|
|
|
|
strbuf_release(&pat_buf);
|
|
|
|
strbuf_release(&str_buf);
|
|
|
|
|
|
|
|
return match_status;
|
|
|
|
}
|
|
|
|
|
2013-07-14 16:35:57 +08:00
|
|
|
static size_t common_prefix_len(const struct pathspec *pathspec)
|
2006-05-20 07:07:51 +08:00
|
|
|
{
|
2013-07-14 16:35:57 +08:00
|
|
|
int n;
|
2011-09-07 03:32:30 +08:00
|
|
|
size_t max = 0;
|
2006-05-20 07:07:51 +08:00
|
|
|
|
2013-07-14 16:36:09 +08:00
|
|
|
/*
|
|
|
|
* ":(icase)path" is treated as a pathspec full of
|
|
|
|
* wildcard. In other words, only prefix is considered common
|
|
|
|
* prefix. If the pathspec is abc/foo abc/bar, running in
|
|
|
|
* subdir xyz, the common prefix is still xyz, not xuz/abc as
|
|
|
|
* in non-:(icase).
|
|
|
|
*/
|
2013-07-14 16:36:06 +08:00
|
|
|
GUARD_PATHSPEC(pathspec,
|
|
|
|
PATHSPEC_FROMTOP |
|
|
|
|
PATHSPEC_MAXDEPTH |
|
2013-07-14 16:36:08 +08:00
|
|
|
PATHSPEC_LITERAL |
|
2013-07-14 16:36:09 +08:00
|
|
|
PATHSPEC_GLOB |
|
2013-12-06 15:30:48 +08:00
|
|
|
PATHSPEC_ICASE |
|
|
|
|
PATHSPEC_EXCLUDE);
|
2011-09-07 03:32:30 +08:00
|
|
|
|
2013-07-14 16:35:57 +08:00
|
|
|
for (n = 0; n < pathspec->nr; n++) {
|
2013-07-14 16:36:09 +08:00
|
|
|
size_t i = 0, len = 0, item_len;
|
2013-12-06 15:30:48 +08:00
|
|
|
if (pathspec->items[n].magic & PATHSPEC_EXCLUDE)
|
|
|
|
continue;
|
2013-07-14 16:36:09 +08:00
|
|
|
if (pathspec->items[n].magic & PATHSPEC_ICASE)
|
|
|
|
item_len = pathspec->items[n].prefix;
|
|
|
|
else
|
|
|
|
item_len = pathspec->items[n].nowildcard_len;
|
|
|
|
while (i < item_len && (n == 0 || i < max)) {
|
2013-07-14 16:35:57 +08:00
|
|
|
char c = pathspec->items[n].match[i];
|
|
|
|
if (c != pathspec->items[0].match[i])
|
2011-09-07 03:32:30 +08:00
|
|
|
break;
|
|
|
|
if (c == '/')
|
|
|
|
len = i + 1;
|
2013-07-14 16:35:57 +08:00
|
|
|
i++;
|
2011-09-07 03:32:30 +08:00
|
|
|
}
|
2013-07-14 16:35:57 +08:00
|
|
|
if (n == 0 || len < max) {
|
2011-09-07 03:32:30 +08:00
|
|
|
max = len;
|
|
|
|
if (!max)
|
|
|
|
break;
|
|
|
|
}
|
2006-05-20 07:07:51 +08:00
|
|
|
}
|
2011-09-07 03:32:30 +08:00
|
|
|
return max;
|
2006-05-20 07:07:51 +08:00
|
|
|
}
|
|
|
|
|
2011-09-04 18:42:01 +08:00
|
|
|
/*
|
|
|
|
* Returns a copy of the longest leading path common among all
|
|
|
|
* pathspecs.
|
|
|
|
*/
|
2013-07-14 16:35:57 +08:00
|
|
|
char *common_prefix(const struct pathspec *pathspec)
|
2011-09-04 18:42:01 +08:00
|
|
|
{
|
|
|
|
unsigned long len = common_prefix_len(pathspec);
|
|
|
|
|
2013-07-14 16:35:57 +08:00
|
|
|
return len ? xmemdupz(pathspec->items[0].match, len) : NULL;
|
2011-09-04 18:42:01 +08:00
|
|
|
}
|
|
|
|
|
2013-07-14 16:35:55 +08:00
|
|
|
int fill_directory(struct dir_struct *dir, const struct pathspec *pathspec)
|
2009-05-15 04:22:36 +08:00
|
|
|
{
|
2011-09-07 03:32:30 +08:00
|
|
|
size_t len;
|
2009-05-15 04:22:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate common prefix for the pathspec, and
|
|
|
|
* use that to optimize the directory walk
|
|
|
|
*/
|
2011-09-07 03:32:30 +08:00
|
|
|
len = common_prefix_len(pathspec);
|
2009-05-15 04:22:36 +08:00
|
|
|
|
|
|
|
/* Read the directory and prune it */
|
2013-07-14 16:36:02 +08:00
|
|
|
read_directory(dir, pathspec->nr ? pathspec->_raw[0] : "", len, pathspec);
|
2009-07-09 10:24:39 +08:00
|
|
|
return len;
|
2009-05-15 04:22:36 +08:00
|
|
|
}
|
|
|
|
|
2010-12-15 23:02:44 +08:00
|
|
|
int within_depth(const char *name, int namelen,
|
|
|
|
int depth, int max_depth)
|
|
|
|
{
|
|
|
|
const char *cp = name, *cpe = name + namelen;
|
|
|
|
|
|
|
|
while (cp < cpe) {
|
|
|
|
if (*cp++ != '/')
|
|
|
|
continue;
|
|
|
|
depth++;
|
|
|
|
if (depth > max_depth)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2014-01-24 21:40:31 +08:00
|
|
|
#define DO_MATCH_EXCLUDE 1
|
2014-01-24 21:40:32 +08:00
|
|
|
#define DO_MATCH_DIRECTORY 2
|
2014-01-24 21:40:31 +08:00
|
|
|
|
2010-12-15 23:02:48 +08:00
|
|
|
/*
|
|
|
|
* Does 'match' match the given name?
|
|
|
|
* A match is found if
|
|
|
|
*
|
|
|
|
* (1) the 'match' string is leading directory of 'name', or
|
|
|
|
* (2) the 'match' string is a wildcard and matches 'name', or
|
|
|
|
* (3) the 'match' string is exactly the same as 'name'.
|
|
|
|
*
|
|
|
|
* and the return value tells which case it was.
|
|
|
|
*
|
|
|
|
* It returns 0 when there is no match.
|
|
|
|
*/
|
|
|
|
static int match_pathspec_item(const struct pathspec_item *item, int prefix,
|
2014-01-24 21:40:31 +08:00
|
|
|
const char *name, int namelen, unsigned flags)
|
2010-12-15 23:02:48 +08:00
|
|
|
{
|
|
|
|
/* name/namelen has prefix cut off by caller */
|
|
|
|
const char *match = item->match + prefix;
|
|
|
|
int matchlen = item->len - prefix;
|
|
|
|
|
2013-07-14 16:36:09 +08:00
|
|
|
/*
|
|
|
|
* The normal call pattern is:
|
|
|
|
* 1. prefix = common_prefix_len(ps);
|
|
|
|
* 2. prune something, or fill_directory
|
2014-01-24 21:40:30 +08:00
|
|
|
* 3. match_pathspec()
|
2013-07-14 16:36:09 +08:00
|
|
|
*
|
|
|
|
* 'prefix' at #1 may be shorter than the command's prefix and
|
|
|
|
* it's ok for #2 to match extra files. Those extras will be
|
|
|
|
* trimmed at #3.
|
|
|
|
*
|
|
|
|
* Suppose the pathspec is 'foo' and '../bar' running from
|
|
|
|
* subdir 'xyz'. The common prefix at #1 will be empty, thanks
|
|
|
|
* to "../". We may have xyz/foo _and_ XYZ/foo after #2. The
|
|
|
|
* user does not want XYZ/foo, only the "foo" part should be
|
|
|
|
* case-insensitive. We need to filter out XYZ/foo here. In
|
|
|
|
* other words, we do not trust the caller on comparing the
|
|
|
|
* prefix part when :(icase) is involved. We do exact
|
|
|
|
* comparison ourselves.
|
|
|
|
*
|
|
|
|
* Normally the caller (common_prefix_len() in fact) does
|
|
|
|
* _exact_ matching on name[-prefix+1..-1] and we do not need
|
|
|
|
* to check that part. Be defensive and check it anyway, in
|
|
|
|
* case common_prefix_len is changed, or a new caller is
|
|
|
|
* introduced that does not use common_prefix_len.
|
|
|
|
*
|
|
|
|
* If the penalty turns out too high when prefix is really
|
|
|
|
* long, maybe change it to
|
|
|
|
* strncmp(match, name, item->prefix - prefix)
|
|
|
|
*/
|
|
|
|
if (item->prefix && (item->magic & PATHSPEC_ICASE) &&
|
|
|
|
strncmp(item->match, name - prefix, item->prefix))
|
|
|
|
return 0;
|
|
|
|
|
2010-12-15 23:02:48 +08:00
|
|
|
/* If the match was just the prefix, we matched */
|
|
|
|
if (!*match)
|
|
|
|
return MATCHED_RECURSIVELY;
|
|
|
|
|
2013-07-14 16:36:09 +08:00
|
|
|
if (matchlen <= namelen && !ps_strncmp(item, match, name, matchlen)) {
|
2010-12-15 23:02:48 +08:00
|
|
|
if (matchlen == namelen)
|
|
|
|
return MATCHED_EXACTLY;
|
|
|
|
|
|
|
|
if (match[matchlen-1] == '/' || name[matchlen] == '/')
|
|
|
|
return MATCHED_RECURSIVELY;
|
2014-01-24 21:40:32 +08:00
|
|
|
} else if ((flags & DO_MATCH_DIRECTORY) &&
|
|
|
|
match[matchlen - 1] == '/' &&
|
|
|
|
namelen == matchlen - 1 &&
|
|
|
|
!ps_strncmp(item, match, name, namelen))
|
|
|
|
return MATCHED_EXACTLY;
|
2010-12-15 23:02:48 +08:00
|
|
|
|
2012-11-24 12:33:49 +08:00
|
|
|
if (item->nowildcard_len < item->len &&
|
2013-07-14 16:36:08 +08:00
|
|
|
!git_fnmatch(item, match, name,
|
2012-11-24 12:33:50 +08:00
|
|
|
item->nowildcard_len - prefix))
|
2010-12-15 23:02:48 +08:00
|
|
|
return MATCHED_FNMATCH;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2013-01-07 00:58:06 +08:00
|
|
|
* Given a name and a list of pathspecs, returns the nature of the
|
|
|
|
* closest (i.e. most specific) match of the name to any of the
|
|
|
|
* pathspecs.
|
|
|
|
*
|
|
|
|
* The caller typically calls this multiple times with the same
|
|
|
|
* pathspec and seen[] array but with different name/namelen
|
|
|
|
* (e.g. entries from the index) and is interested in seeing if and
|
|
|
|
* how each pathspec matches all the names it calls this function
|
|
|
|
* with. A mark is left in the seen[] array for each pathspec element
|
|
|
|
* indicating the closest type of match that element achieved, so if
|
|
|
|
* seen[n] remains zero after multiple invocations, that means the nth
|
|
|
|
* pathspec did not match any names, which could indicate that the
|
|
|
|
* user mistyped the nth pathspec.
|
2010-12-15 23:02:48 +08:00
|
|
|
*/
|
2014-01-24 21:40:30 +08:00
|
|
|
static int do_match_pathspec(const struct pathspec *ps,
|
|
|
|
const char *name, int namelen,
|
|
|
|
int prefix, char *seen,
|
2014-01-24 21:40:31 +08:00
|
|
|
unsigned flags)
|
2010-12-15 23:02:48 +08:00
|
|
|
{
|
2014-01-24 21:40:31 +08:00
|
|
|
int i, retval = 0, exclude = flags & DO_MATCH_EXCLUDE;
|
2010-12-15 23:02:48 +08:00
|
|
|
|
2013-07-14 16:36:06 +08:00
|
|
|
GUARD_PATHSPEC(ps,
|
|
|
|
PATHSPEC_FROMTOP |
|
|
|
|
PATHSPEC_MAXDEPTH |
|
2013-07-14 16:36:08 +08:00
|
|
|
PATHSPEC_LITERAL |
|
2013-07-14 16:36:09 +08:00
|
|
|
PATHSPEC_GLOB |
|
2013-12-06 15:30:48 +08:00
|
|
|
PATHSPEC_ICASE |
|
|
|
|
PATHSPEC_EXCLUDE);
|
2013-07-14 16:35:36 +08:00
|
|
|
|
2010-12-15 23:02:48 +08:00
|
|
|
if (!ps->nr) {
|
2013-07-14 16:35:32 +08:00
|
|
|
if (!ps->recursive ||
|
|
|
|
!(ps->magic & PATHSPEC_MAXDEPTH) ||
|
|
|
|
ps->max_depth == -1)
|
2010-12-15 23:02:48 +08:00
|
|
|
return MATCHED_RECURSIVELY;
|
|
|
|
|
|
|
|
if (within_depth(name, namelen, 0, ps->max_depth))
|
|
|
|
return MATCHED_EXACTLY;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
name += prefix;
|
|
|
|
namelen -= prefix;
|
|
|
|
|
|
|
|
for (i = ps->nr - 1; i >= 0; i--) {
|
|
|
|
int how;
|
2013-12-06 15:30:48 +08:00
|
|
|
|
|
|
|
if ((!exclude && ps->items[i].magic & PATHSPEC_EXCLUDE) ||
|
|
|
|
( exclude && !(ps->items[i].magic & PATHSPEC_EXCLUDE)))
|
|
|
|
continue;
|
|
|
|
|
2010-12-15 23:02:48 +08:00
|
|
|
if (seen && seen[i] == MATCHED_EXACTLY)
|
|
|
|
continue;
|
2013-12-06 15:30:48 +08:00
|
|
|
/*
|
|
|
|
* Make exclude patterns optional and never report
|
|
|
|
* "pathspec ':(exclude)foo' matches no files"
|
|
|
|
*/
|
|
|
|
if (seen && ps->items[i].magic & PATHSPEC_EXCLUDE)
|
|
|
|
seen[i] = MATCHED_FNMATCH;
|
2014-01-24 21:40:31 +08:00
|
|
|
how = match_pathspec_item(ps->items+i, prefix, name,
|
|
|
|
namelen, flags);
|
2013-07-14 16:35:32 +08:00
|
|
|
if (ps->recursive &&
|
|
|
|
(ps->magic & PATHSPEC_MAXDEPTH) &&
|
|
|
|
ps->max_depth != -1 &&
|
2010-12-15 23:02:48 +08:00
|
|
|
how && how != MATCHED_FNMATCH) {
|
|
|
|
int len = ps->items[i].len;
|
|
|
|
if (name[len] == '/')
|
|
|
|
len++;
|
|
|
|
if (within_depth(name+len, namelen-len, 0, ps->max_depth))
|
|
|
|
how = MATCHED_EXACTLY;
|
|
|
|
else
|
|
|
|
how = 0;
|
|
|
|
}
|
|
|
|
if (how) {
|
|
|
|
if (retval < how)
|
|
|
|
retval = how;
|
|
|
|
if (seen && seen[i] < how)
|
|
|
|
seen[i] = how;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2014-01-24 21:40:30 +08:00
|
|
|
int match_pathspec(const struct pathspec *ps,
|
|
|
|
const char *name, int namelen,
|
2014-01-24 21:40:33 +08:00
|
|
|
int prefix, char *seen, int is_dir)
|
2013-12-06 15:30:48 +08:00
|
|
|
{
|
|
|
|
int positive, negative;
|
2014-01-24 21:40:33 +08:00
|
|
|
unsigned flags = is_dir ? DO_MATCH_DIRECTORY : 0;
|
2014-01-24 21:40:31 +08:00
|
|
|
positive = do_match_pathspec(ps, name, namelen,
|
|
|
|
prefix, seen, flags);
|
2013-12-06 15:30:48 +08:00
|
|
|
if (!(ps->magic & PATHSPEC_EXCLUDE) || !positive)
|
|
|
|
return positive;
|
2014-01-24 21:40:31 +08:00
|
|
|
negative = do_match_pathspec(ps, name, namelen,
|
|
|
|
prefix, seen,
|
|
|
|
flags | DO_MATCH_EXCLUDE);
|
2013-12-06 15:30:48 +08:00
|
|
|
return negative ? 0 : positive;
|
|
|
|
}
|
|
|
|
|
2015-03-25 05:12:10 +08:00
|
|
|
int report_path_error(const char *ps_matched,
|
|
|
|
const struct pathspec *pathspec,
|
|
|
|
const char *prefix)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Make sure all pathspec matched; otherwise it is an error.
|
|
|
|
*/
|
|
|
|
int num, errors = 0;
|
|
|
|
for (num = 0; num < pathspec->nr; num++) {
|
|
|
|
int other, found_dup;
|
|
|
|
|
|
|
|
if (ps_matched[num])
|
|
|
|
continue;
|
|
|
|
/*
|
|
|
|
* The caller might have fed identical pathspec
|
|
|
|
* twice. Do not barf on such a mistake.
|
|
|
|
* FIXME: parse_pathspec should have eliminated
|
|
|
|
* duplicate pathspec.
|
|
|
|
*/
|
|
|
|
for (found_dup = other = 0;
|
|
|
|
!found_dup && other < pathspec->nr;
|
|
|
|
other++) {
|
|
|
|
if (other == num || !ps_matched[other])
|
|
|
|
continue;
|
|
|
|
if (!strcmp(pathspec->items[other].original,
|
|
|
|
pathspec->items[num].original))
|
|
|
|
/*
|
|
|
|
* Ok, we have a match already.
|
|
|
|
*/
|
|
|
|
found_dup = 1;
|
|
|
|
}
|
|
|
|
if (found_dup)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
error("pathspec '%s' did not match any file(s) known to git.",
|
|
|
|
pathspec->items[num].original);
|
|
|
|
errors++;
|
|
|
|
}
|
|
|
|
return errors;
|
|
|
|
}
|
|
|
|
|
2012-06-07 15:53:35 +08:00
|
|
|
/*
|
|
|
|
* Return the length of the "simple" part of a path match limiter.
|
|
|
|
*/
|
2013-07-14 16:35:28 +08:00
|
|
|
int simple_length(const char *match)
|
2012-06-07 15:53:35 +08:00
|
|
|
{
|
|
|
|
int len = -1;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
unsigned char c = *match++;
|
|
|
|
len++;
|
|
|
|
if (c == '\0' || is_glob_special(c))
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-14 16:35:28 +08:00
|
|
|
int no_wildcard(const char *string)
|
2007-10-29 04:27:13 +08:00
|
|
|
{
|
2012-06-07 15:53:35 +08:00
|
|
|
return string[simple_length(string)] == '\0';
|
2007-10-29 04:27:13 +08:00
|
|
|
}
|
|
|
|
|
2012-10-15 14:24:39 +08:00
|
|
|
void parse_exclude_pattern(const char **pattern,
|
|
|
|
int *patternlen,
|
2016-03-02 01:02:59 +08:00
|
|
|
unsigned *flags,
|
2012-10-15 14:24:39 +08:00
|
|
|
int *nowildcardlen)
|
2012-10-15 14:24:38 +08:00
|
|
|
{
|
|
|
|
const char *p = *pattern;
|
|
|
|
size_t i, len;
|
|
|
|
|
|
|
|
*flags = 0;
|
|
|
|
if (*p == '!') {
|
|
|
|
*flags |= EXC_FLAG_NEGATIVE;
|
|
|
|
p++;
|
|
|
|
}
|
|
|
|
len = strlen(p);
|
|
|
|
if (len && p[len - 1] == '/') {
|
|
|
|
len--;
|
|
|
|
*flags |= EXC_FLAG_MUSTBEDIR;
|
|
|
|
}
|
|
|
|
for (i = 0; i < len; i++) {
|
|
|
|
if (p[i] == '/')
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (i == len)
|
|
|
|
*flags |= EXC_FLAG_NODIR;
|
|
|
|
*nowildcardlen = simple_length(p);
|
|
|
|
/*
|
|
|
|
* we should have excluded the trailing slash from 'p' too,
|
|
|
|
* but that's one more allocation. Instead just make sure
|
|
|
|
* nowildcardlen does not exceed real patternlen
|
|
|
|
*/
|
|
|
|
if (*nowildcardlen > len)
|
|
|
|
*nowildcardlen = len;
|
|
|
|
if (*p == '*' && no_wildcard(p + 1))
|
|
|
|
*flags |= EXC_FLAG_ENDSWITH;
|
|
|
|
*pattern = p;
|
|
|
|
*patternlen = len;
|
|
|
|
}
|
|
|
|
|
2006-05-17 10:02:14 +08:00
|
|
|
void add_exclude(const char *string, const char *base,
|
2013-01-07 00:58:04 +08:00
|
|
|
int baselen, struct exclude_list *el, int srcpos)
|
2006-05-17 10:02:14 +08:00
|
|
|
{
|
2008-01-31 17:17:48 +08:00
|
|
|
struct exclude *x;
|
2012-10-15 14:24:38 +08:00
|
|
|
int patternlen;
|
2016-03-02 01:02:59 +08:00
|
|
|
unsigned flags;
|
2012-10-15 14:24:38 +08:00
|
|
|
int nowildcardlen;
|
2006-05-17 10:02:14 +08:00
|
|
|
|
2012-10-15 14:24:38 +08:00
|
|
|
parse_exclude_pattern(&string, &patternlen, &flags, &nowildcardlen);
|
|
|
|
if (flags & EXC_FLAG_MUSTBEDIR) {
|
2016-02-23 06:44:32 +08:00
|
|
|
FLEXPTR_ALLOC_MEM(x, pattern, string, patternlen);
|
2008-01-31 17:17:48 +08:00
|
|
|
} else {
|
|
|
|
x = xmalloc(sizeof(*x));
|
|
|
|
x->pattern = string;
|
|
|
|
}
|
2012-10-15 14:24:38 +08:00
|
|
|
x->patternlen = patternlen;
|
|
|
|
x->nowildcardlen = nowildcardlen;
|
2006-05-17 10:02:14 +08:00
|
|
|
x->base = base;
|
|
|
|
x->baselen = baselen;
|
2008-01-31 17:17:48 +08:00
|
|
|
x->flags = flags;
|
2013-01-07 00:58:04 +08:00
|
|
|
x->srcpos = srcpos;
|
2012-12-27 10:32:22 +08:00
|
|
|
ALLOC_GROW(el->excludes, el->nr + 1, el->alloc);
|
|
|
|
el->excludes[el->nr++] = x;
|
2013-01-07 00:58:04 +08:00
|
|
|
x->el = el;
|
2006-05-17 10:02:14 +08:00
|
|
|
}
|
|
|
|
|
2015-03-08 18:12:24 +08:00
|
|
|
static void *read_skip_worktree_file_from_index(const char *path, size_t *size,
|
|
|
|
struct sha1_stat *sha1_stat)
|
2009-08-20 21:47:01 +08:00
|
|
|
{
|
|
|
|
int pos, len;
|
|
|
|
unsigned long sz;
|
|
|
|
enum object_type type;
|
|
|
|
void *data;
|
|
|
|
|
|
|
|
len = strlen(path);
|
2013-08-16 03:08:45 +08:00
|
|
|
pos = cache_name_pos(path, len);
|
2009-08-20 21:47:01 +08:00
|
|
|
if (pos < 0)
|
|
|
|
return NULL;
|
2013-08-16 03:08:45 +08:00
|
|
|
if (!ce_skip_worktree(active_cache[pos]))
|
2009-08-20 21:47:01 +08:00
|
|
|
return NULL;
|
2013-08-16 03:08:45 +08:00
|
|
|
data = read_sha1_file(active_cache[pos]->sha1, &type, &sz);
|
2009-08-20 21:47:01 +08:00
|
|
|
if (!data || type != OBJ_BLOB) {
|
|
|
|
free(data);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
*size = xsize_t(sz);
|
2015-03-08 18:12:24 +08:00
|
|
|
if (sha1_stat) {
|
|
|
|
memset(&sha1_stat->stat, 0, sizeof(sha1_stat->stat));
|
|
|
|
hashcpy(sha1_stat->sha1, active_cache[pos]->sha1);
|
|
|
|
}
|
2009-08-20 21:47:01 +08:00
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
2012-12-27 10:32:29 +08:00
|
|
|
/*
|
|
|
|
* Frees memory within el which was allocated for exclude patterns and
|
|
|
|
* the file buffer. Does not free el itself.
|
|
|
|
*/
|
|
|
|
void clear_exclude_list(struct exclude_list *el)
|
2010-11-27 02:17:44 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2016-03-19 02:06:15 +08:00
|
|
|
for (i = 0; i < el->nr; i++)
|
2010-11-27 02:17:44 +08:00
|
|
|
free(el->excludes[i]);
|
|
|
|
free(el->excludes);
|
2013-01-07 00:58:03 +08:00
|
|
|
free(el->filebuf);
|
2010-11-27 02:17:44 +08:00
|
|
|
|
2015-12-27 09:54:34 +08:00
|
|
|
memset(el, 0, sizeof(*el));
|
2010-11-27 02:17:44 +08:00
|
|
|
}
|
|
|
|
|
2014-02-09 08:26:38 +08:00
|
|
|
static void trim_trailing_spaces(char *buf)
|
2014-02-09 08:26:37 +08:00
|
|
|
{
|
2014-06-03 06:36:56 +08:00
|
|
|
char *p, *last_space = NULL;
|
|
|
|
|
|
|
|
for (p = buf; *p; p++)
|
|
|
|
switch (*p) {
|
|
|
|
case ' ':
|
|
|
|
if (!last_space)
|
|
|
|
last_space = p;
|
|
|
|
break;
|
|
|
|
case '\\':
|
|
|
|
p++;
|
|
|
|
if (!*p)
|
|
|
|
return;
|
|
|
|
/* fallthrough */
|
|
|
|
default:
|
|
|
|
last_space = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (last_space)
|
|
|
|
*last_space = '\0';
|
2014-02-09 08:26:37 +08:00
|
|
|
}
|
|
|
|
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
/*
|
|
|
|
* Given a subdirectory name and "dir" of the current directory,
|
|
|
|
* search the subdir in "dir" and return it, or create a new one if it
|
|
|
|
* does not exist in "dir".
|
|
|
|
*
|
|
|
|
* If "name" has the trailing slash, it'll be excluded in the search.
|
|
|
|
*/
|
|
|
|
static struct untracked_cache_dir *lookup_untracked(struct untracked_cache *uc,
|
|
|
|
struct untracked_cache_dir *dir,
|
|
|
|
const char *name, int len)
|
|
|
|
{
|
|
|
|
int first, last;
|
|
|
|
struct untracked_cache_dir *d;
|
|
|
|
if (!dir)
|
|
|
|
return NULL;
|
|
|
|
if (len && name[len - 1] == '/')
|
|
|
|
len--;
|
|
|
|
first = 0;
|
|
|
|
last = dir->dirs_nr;
|
|
|
|
while (last > first) {
|
|
|
|
int cmp, next = (last + first) >> 1;
|
|
|
|
d = dir->dirs[next];
|
|
|
|
cmp = strncmp(name, d->name, len);
|
|
|
|
if (!cmp && strlen(d->name) > len)
|
|
|
|
cmp = -1;
|
|
|
|
if (!cmp)
|
|
|
|
return d;
|
|
|
|
if (cmp < 0) {
|
|
|
|
last = next;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
first = next+1;
|
|
|
|
}
|
|
|
|
|
|
|
|
uc->dir_created++;
|
2016-02-23 06:44:32 +08:00
|
|
|
FLEX_ALLOC_MEM(d, name, name, len);
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
|
|
|
|
ALLOC_GROW(dir->dirs, dir->dirs_nr + 1, dir->dirs_alloc);
|
|
|
|
memmove(dir->dirs + first + 1, dir->dirs + first,
|
|
|
|
(dir->dirs_nr - first) * sizeof(*dir->dirs));
|
|
|
|
dir->dirs_nr++;
|
|
|
|
dir->dirs[first] = d;
|
|
|
|
return d;
|
|
|
|
}
|
|
|
|
|
2015-03-08 18:12:26 +08:00
|
|
|
static void do_invalidate_gitignore(struct untracked_cache_dir *dir)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
dir->valid = 0;
|
|
|
|
dir->untracked_nr = 0;
|
|
|
|
for (i = 0; i < dir->dirs_nr; i++)
|
|
|
|
do_invalidate_gitignore(dir->dirs[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void invalidate_gitignore(struct untracked_cache *uc,
|
|
|
|
struct untracked_cache_dir *dir)
|
|
|
|
{
|
|
|
|
uc->gitignore_invalidated++;
|
|
|
|
do_invalidate_gitignore(dir);
|
|
|
|
}
|
|
|
|
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:29 +08:00
|
|
|
static void invalidate_directory(struct untracked_cache *uc,
|
|
|
|
struct untracked_cache_dir *dir)
|
|
|
|
{
|
2015-03-08 18:12:30 +08:00
|
|
|
int i;
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:29 +08:00
|
|
|
uc->dir_invalidated++;
|
|
|
|
dir->valid = 0;
|
|
|
|
dir->untracked_nr = 0;
|
2015-03-08 18:12:30 +08:00
|
|
|
for (i = 0; i < dir->dirs_nr; i++)
|
|
|
|
dir->dirs[i]->recurse = 0;
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:29 +08:00
|
|
|
}
|
|
|
|
|
2015-03-08 18:12:24 +08:00
|
|
|
/*
|
|
|
|
* Given a file with name "fname", read it (either from disk, or from
|
|
|
|
* the index if "check_index" is non-zero), parse it and store the
|
|
|
|
* exclude rules in "el".
|
|
|
|
*
|
|
|
|
* If "ss" is not NULL, compute SHA-1 of the exclude file and fill
|
|
|
|
* stat data from disk (only valid if add_excludes returns zero). If
|
|
|
|
* ss_valid is non-zero, "ss" must contain good value as input.
|
|
|
|
*/
|
|
|
|
static int add_excludes(const char *fname, const char *base, int baselen,
|
|
|
|
struct exclude_list *el, int check_index,
|
|
|
|
struct sha1_stat *sha1_stat)
|
2006-05-17 10:02:14 +08:00
|
|
|
{
|
2006-08-28 07:55:46 +08:00
|
|
|
struct stat st;
|
2013-01-07 00:58:04 +08:00
|
|
|
int fd, i, lineno = 1;
|
2010-09-17 04:53:22 +08:00
|
|
|
size_t size = 0;
|
2006-05-17 10:02:14 +08:00
|
|
|
char *buf, *entry;
|
|
|
|
|
|
|
|
fd = open(fname, O_RDONLY);
|
2009-08-20 21:47:01 +08:00
|
|
|
if (fd < 0 || fstat(fd, &st) < 0) {
|
2012-08-21 14:26:07 +08:00
|
|
|
if (errno != ENOENT)
|
2012-08-22 05:52:07 +08:00
|
|
|
warn_on_inaccessible(fname);
|
2009-08-20 21:47:01 +08:00
|
|
|
if (0 <= fd)
|
|
|
|
close(fd);
|
|
|
|
if (!check_index ||
|
2015-03-08 18:12:24 +08:00
|
|
|
(buf = read_skip_worktree_file_from_index(fname, &size, sha1_stat)) == NULL)
|
2009-08-20 21:47:01 +08:00
|
|
|
return -1;
|
2010-01-20 22:09:16 +08:00
|
|
|
if (size == 0) {
|
|
|
|
free(buf);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (buf[size-1] != '\n') {
|
2016-02-23 06:44:35 +08:00
|
|
|
buf = xrealloc(buf, st_add(size, 1));
|
2010-01-20 22:09:16 +08:00
|
|
|
buf[size++] = '\n';
|
|
|
|
}
|
2014-07-14 17:47:11 +08:00
|
|
|
} else {
|
2009-08-20 21:47:01 +08:00
|
|
|
size = xsize_t(st.st_size);
|
|
|
|
if (size == 0) {
|
2015-03-08 18:12:24 +08:00
|
|
|
if (sha1_stat) {
|
|
|
|
fill_stat_data(&sha1_stat->stat, &st);
|
|
|
|
hashcpy(sha1_stat->sha1, EMPTY_BLOB_SHA1_BIN);
|
|
|
|
sha1_stat->valid = 1;
|
|
|
|
}
|
2009-08-20 21:47:01 +08:00
|
|
|
close(fd);
|
|
|
|
return 0;
|
|
|
|
}
|
2016-02-23 06:44:28 +08:00
|
|
|
buf = xmallocz(size);
|
2009-08-20 21:47:01 +08:00
|
|
|
if (read_in_full(fd, buf, size) != size) {
|
2010-01-20 22:09:16 +08:00
|
|
|
free(buf);
|
2009-08-20 21:47:01 +08:00
|
|
|
close(fd);
|
|
|
|
return -1;
|
|
|
|
}
|
2010-01-20 22:09:16 +08:00
|
|
|
buf[size++] = '\n';
|
2009-08-20 21:47:01 +08:00
|
|
|
close(fd);
|
2015-03-08 18:12:24 +08:00
|
|
|
if (sha1_stat) {
|
|
|
|
int pos;
|
|
|
|
if (sha1_stat->valid &&
|
2015-03-08 18:12:37 +08:00
|
|
|
!match_stat_data_racy(&the_index, &sha1_stat->stat, &st))
|
2015-03-08 18:12:24 +08:00
|
|
|
; /* no content change, ss->sha1 still good */
|
|
|
|
else if (check_index &&
|
|
|
|
(pos = cache_name_pos(fname, strlen(fname))) >= 0 &&
|
|
|
|
!ce_stage(active_cache[pos]) &&
|
|
|
|
ce_uptodate(active_cache[pos]) &&
|
|
|
|
!would_convert_to_git(fname))
|
|
|
|
hashcpy(sha1_stat->sha1, active_cache[pos]->sha1);
|
|
|
|
else
|
|
|
|
hash_sha1_file(buf, size, "blob", sha1_stat->sha1);
|
|
|
|
fill_stat_data(&sha1_stat->stat, &st);
|
|
|
|
sha1_stat->valid = 1;
|
|
|
|
}
|
2007-12-16 12:53:26 +08:00
|
|
|
}
|
2006-05-17 10:02:14 +08:00
|
|
|
|
2013-01-07 00:58:03 +08:00
|
|
|
el->filebuf = buf;
|
2015-04-16 22:05:12 +08:00
|
|
|
|
2015-04-17 01:45:29 +08:00
|
|
|
if (skip_utf8_bom(&buf, size))
|
|
|
|
size -= buf - el->filebuf;
|
|
|
|
|
2006-05-17 10:02:14 +08:00
|
|
|
entry = buf;
|
2015-04-16 22:05:12 +08:00
|
|
|
|
2010-01-20 22:09:16 +08:00
|
|
|
for (i = 0; i < size; i++) {
|
|
|
|
if (buf[i] == '\n') {
|
2006-05-17 10:02:14 +08:00
|
|
|
if (entry != buf + i && entry[0] != '#') {
|
|
|
|
buf[i - (i && buf[i-1] == '\r')] = 0;
|
2014-02-09 08:26:38 +08:00
|
|
|
trim_trailing_spaces(entry);
|
2013-01-07 00:58:04 +08:00
|
|
|
add_exclude(entry, base, baselen, el, lineno);
|
2006-05-17 10:02:14 +08:00
|
|
|
}
|
2013-01-07 00:58:04 +08:00
|
|
|
lineno++;
|
2006-05-17 10:02:14 +08:00
|
|
|
entry = buf + i + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-03-08 18:12:24 +08:00
|
|
|
int add_excludes_from_file_to_list(const char *fname, const char *base,
|
|
|
|
int baselen, struct exclude_list *el,
|
|
|
|
int check_index)
|
|
|
|
{
|
|
|
|
return add_excludes(fname, base, baselen, el, check_index, NULL);
|
|
|
|
}
|
|
|
|
|
2013-01-07 00:58:04 +08:00
|
|
|
struct exclude_list *add_exclude_list(struct dir_struct *dir,
|
|
|
|
int group_type, const char *src)
|
2013-01-07 00:58:03 +08:00
|
|
|
{
|
|
|
|
struct exclude_list *el;
|
|
|
|
struct exclude_list_group *group;
|
|
|
|
|
|
|
|
group = &dir->exclude_list_group[group_type];
|
|
|
|
ALLOC_GROW(group->el, group->nr + 1, group->alloc);
|
|
|
|
el = &group->el[group->nr++];
|
|
|
|
memset(el, 0, sizeof(*el));
|
2013-01-07 00:58:04 +08:00
|
|
|
el->src = src;
|
2013-01-07 00:58:03 +08:00
|
|
|
return el;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Used to set up core.excludesfile and .git/info/exclude lists.
|
|
|
|
*/
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
static void add_excludes_from_file_1(struct dir_struct *dir, const char *fname,
|
|
|
|
struct sha1_stat *sha1_stat)
|
2006-05-17 10:02:14 +08:00
|
|
|
{
|
2013-01-07 00:58:03 +08:00
|
|
|
struct exclude_list *el;
|
2015-03-08 18:12:26 +08:00
|
|
|
/*
|
|
|
|
* catch setup_standard_excludes() that's called before
|
|
|
|
* dir->untracked is assigned. That function behaves
|
|
|
|
* differently when dir->untracked is non-NULL.
|
|
|
|
*/
|
|
|
|
if (!dir->untracked)
|
|
|
|
dir->unmanaged_exclude_files++;
|
2013-01-07 00:58:04 +08:00
|
|
|
el = add_exclude_list(dir, EXC_FILE, fname);
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
if (add_excludes(fname, "", 0, el, 0, sha1_stat) < 0)
|
2006-05-17 10:02:14 +08:00
|
|
|
die("cannot use %s as an exclude file", fname);
|
|
|
|
}
|
|
|
|
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
void add_excludes_from_file(struct dir_struct *dir, const char *fname)
|
|
|
|
{
|
2015-03-08 18:12:26 +08:00
|
|
|
dir->unmanaged_exclude_files++; /* see validate_untracked_cache() */
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
add_excludes_from_file_1(dir, fname, NULL);
|
|
|
|
}
|
|
|
|
|
2012-10-15 14:24:39 +08:00
|
|
|
int match_basename(const char *basename, int basenamelen,
|
|
|
|
const char *pattern, int prefix, int patternlen,
|
2016-03-02 01:02:59 +08:00
|
|
|
unsigned flags)
|
2012-10-15 14:24:35 +08:00
|
|
|
{
|
|
|
|
if (prefix == patternlen) {
|
dir.c::match_basename(): pay attention to the length of string parameters
The function takes two counted strings (<basename, basenamelen> and
<pattern, patternlen>) as parameters, together with prefix (the
length of the prefix in pattern that is to be matched literally
without globbing against the basename) and EXC_* flags that tells it
how to match the pattern against the basename.
However, it did not pay attention to the length of these counted
strings. Update them to do the following:
* When the entire pattern is to be matched literally, the pattern
matches the basename only when the lengths of them are the same,
and they match up to that length.
* When the pattern is "*" followed by a string to be matched
literally, make sure that the basenamelen is equal or longer than
the "literal" part of the pattern, and the tail of the basename
string matches that literal part.
* Otherwise, use the new fnmatch_icase_mem helper to make
sure we only lookmake sure we use only look at the
counted part of the strings. Because these counted strings are
full strings most of the time, we check for termination
to avoid unnecessary allocation.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-29 05:47:28 +08:00
|
|
|
if (patternlen == basenamelen &&
|
|
|
|
!strncmp_icase(pattern, basename, basenamelen))
|
2012-10-15 14:24:35 +08:00
|
|
|
return 1;
|
|
|
|
} else if (flags & EXC_FLAG_ENDSWITH) {
|
dir.c::match_basename(): pay attention to the length of string parameters
The function takes two counted strings (<basename, basenamelen> and
<pattern, patternlen>) as parameters, together with prefix (the
length of the prefix in pattern that is to be matched literally
without globbing against the basename) and EXC_* flags that tells it
how to match the pattern against the basename.
However, it did not pay attention to the length of these counted
strings. Update them to do the following:
* When the entire pattern is to be matched literally, the pattern
matches the basename only when the lengths of them are the same,
and they match up to that length.
* When the pattern is "*" followed by a string to be matched
literally, make sure that the basenamelen is equal or longer than
the "literal" part of the pattern, and the tail of the basename
string matches that literal part.
* Otherwise, use the new fnmatch_icase_mem helper to make
sure we only lookmake sure we use only look at the
counted part of the strings. Because these counted strings are
full strings most of the time, we check for termination
to avoid unnecessary allocation.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-29 05:47:28 +08:00
|
|
|
/* "*literal" matching against "fooliteral" */
|
2012-10-15 14:24:35 +08:00
|
|
|
if (patternlen - 1 <= basenamelen &&
|
dir.c::match_basename(): pay attention to the length of string parameters
The function takes two counted strings (<basename, basenamelen> and
<pattern, patternlen>) as parameters, together with prefix (the
length of the prefix in pattern that is to be matched literally
without globbing against the basename) and EXC_* flags that tells it
how to match the pattern against the basename.
However, it did not pay attention to the length of these counted
strings. Update them to do the following:
* When the entire pattern is to be matched literally, the pattern
matches the basename only when the lengths of them are the same,
and they match up to that length.
* When the pattern is "*" followed by a string to be matched
literally, make sure that the basenamelen is equal or longer than
the "literal" part of the pattern, and the tail of the basename
string matches that literal part.
* Otherwise, use the new fnmatch_icase_mem helper to make
sure we only lookmake sure we use only look at the
counted part of the strings. Because these counted strings are
full strings most of the time, we check for termination
to avoid unnecessary allocation.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-29 05:47:28 +08:00
|
|
|
!strncmp_icase(pattern + 1,
|
|
|
|
basename + basenamelen - (patternlen - 1),
|
|
|
|
patternlen - 1))
|
2012-10-15 14:24:35 +08:00
|
|
|
return 1;
|
|
|
|
} else {
|
dir.c::match_basename(): pay attention to the length of string parameters
The function takes two counted strings (<basename, basenamelen> and
<pattern, patternlen>) as parameters, together with prefix (the
length of the prefix in pattern that is to be matched literally
without globbing against the basename) and EXC_* flags that tells it
how to match the pattern against the basename.
However, it did not pay attention to the length of these counted
strings. Update them to do the following:
* When the entire pattern is to be matched literally, the pattern
matches the basename only when the lengths of them are the same,
and they match up to that length.
* When the pattern is "*" followed by a string to be matched
literally, make sure that the basenamelen is equal or longer than
the "literal" part of the pattern, and the tail of the basename
string matches that literal part.
* Otherwise, use the new fnmatch_icase_mem helper to make
sure we only lookmake sure we use only look at the
counted part of the strings. Because these counted strings are
full strings most of the time, we check for termination
to avoid unnecessary allocation.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-29 05:47:28 +08:00
|
|
|
if (fnmatch_icase_mem(pattern, patternlen,
|
|
|
|
basename, basenamelen,
|
|
|
|
0) == 0)
|
2012-10-15 14:24:35 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-15 14:24:39 +08:00
|
|
|
int match_pathname(const char *pathname, int pathlen,
|
|
|
|
const char *base, int baselen,
|
|
|
|
const char *pattern, int prefix, int patternlen,
|
2016-03-02 01:02:59 +08:00
|
|
|
unsigned flags)
|
2012-10-15 14:24:37 +08:00
|
|
|
{
|
|
|
|
const char *name;
|
|
|
|
int namelen;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* match with FNM_PATHNAME; the pattern has base implicitly
|
|
|
|
* in front of it.
|
|
|
|
*/
|
|
|
|
if (*pattern == '/') {
|
|
|
|
pattern++;
|
2013-03-29 05:47:47 +08:00
|
|
|
patternlen--;
|
2012-10-15 14:24:37 +08:00
|
|
|
prefix--;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* baselen does not count the trailing slash. base[] may or
|
|
|
|
* may not end with a trailing slash though.
|
|
|
|
*/
|
|
|
|
if (pathlen < baselen + 1 ||
|
|
|
|
(baselen && pathname[baselen] != '/') ||
|
|
|
|
strncmp_icase(pathname, base, baselen))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
namelen = baselen ? pathlen - baselen - 1 : pathlen;
|
|
|
|
name = pathname + pathlen - namelen;
|
|
|
|
|
|
|
|
if (prefix) {
|
|
|
|
/*
|
|
|
|
* if the non-wildcard part is longer than the
|
|
|
|
* remaining pathname, surely it cannot match.
|
|
|
|
*/
|
|
|
|
if (prefix > namelen)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (strncmp_icase(pattern, name, prefix))
|
|
|
|
return 0;
|
|
|
|
pattern += prefix;
|
2013-03-29 05:48:21 +08:00
|
|
|
patternlen -= prefix;
|
2012-10-15 14:24:37 +08:00
|
|
|
name += prefix;
|
|
|
|
namelen -= prefix;
|
2013-03-29 05:48:21 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the whole pattern did not have a wildcard,
|
|
|
|
* then our prefix match is all we need; we
|
|
|
|
* do not need to call fnmatch at all.
|
|
|
|
*/
|
2016-03-19 02:06:15 +08:00
|
|
|
if (!patternlen && !namelen)
|
2013-03-29 05:48:21 +08:00
|
|
|
return 1;
|
2012-10-15 14:24:37 +08:00
|
|
|
}
|
|
|
|
|
2013-03-29 05:48:21 +08:00
|
|
|
return fnmatch_icase_mem(pattern, patternlen,
|
|
|
|
name, namelen,
|
2013-04-04 00:34:04 +08:00
|
|
|
WM_PATHNAME) == 0;
|
2012-10-15 14:24:37 +08:00
|
|
|
}
|
|
|
|
|
2012-12-27 10:32:26 +08:00
|
|
|
/*
|
|
|
|
* Scan the given exclude list in reverse to see whether pathname
|
|
|
|
* should be ignored. The first match (i.e. the last on the list), if
|
|
|
|
* any, determines the fate. Returns the exclude_list element which
|
|
|
|
* matched, or NULL for undecided.
|
2006-05-17 10:02:14 +08:00
|
|
|
*/
|
2012-12-27 10:32:26 +08:00
|
|
|
static struct exclude *last_exclude_matching_from_list(const char *pathname,
|
|
|
|
int pathlen,
|
|
|
|
const char *basename,
|
|
|
|
int *dtype,
|
|
|
|
struct exclude_list *el)
|
2006-05-17 10:02:14 +08:00
|
|
|
{
|
2015-09-21 17:56:14 +08:00
|
|
|
struct exclude *exc = NULL; /* undecided */
|
2016-03-19 02:06:15 +08:00
|
|
|
int i;
|
2006-05-17 10:02:14 +08:00
|
|
|
|
2012-05-26 20:31:12 +08:00
|
|
|
if (!el->nr)
|
2012-12-27 10:32:26 +08:00
|
|
|
return NULL; /* undefined */
|
2008-01-31 17:17:48 +08:00
|
|
|
|
2012-05-26 20:31:12 +08:00
|
|
|
for (i = el->nr - 1; 0 <= i; i--) {
|
|
|
|
struct exclude *x = el->excludes[i];
|
2012-10-15 14:24:37 +08:00
|
|
|
const char *exclude = x->pattern;
|
|
|
|
int prefix = x->nowildcardlen;
|
2012-05-26 20:31:12 +08:00
|
|
|
|
|
|
|
if (x->flags & EXC_FLAG_MUSTBEDIR) {
|
|
|
|
if (*dtype == DT_UNKNOWN)
|
|
|
|
*dtype = get_dtype(NULL, pathname, pathlen);
|
|
|
|
if (*dtype != DT_DIR)
|
|
|
|
continue;
|
|
|
|
}
|
2008-01-31 17:17:48 +08:00
|
|
|
|
2012-05-26 20:31:12 +08:00
|
|
|
if (x->flags & EXC_FLAG_NODIR) {
|
2012-10-15 14:24:35 +08:00
|
|
|
if (match_basename(basename,
|
|
|
|
pathlen - (basename - pathname),
|
|
|
|
exclude, prefix, x->patternlen,
|
2015-09-21 17:56:14 +08:00
|
|
|
x->flags)) {
|
|
|
|
exc = x;
|
|
|
|
break;
|
|
|
|
}
|
2012-05-26 20:31:12 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2012-10-15 14:24:37 +08:00
|
|
|
assert(x->baselen == 0 || x->base[x->baselen - 1] == '/');
|
|
|
|
if (match_pathname(pathname, pathlen,
|
|
|
|
x->base, x->baselen ? x->baselen - 1 : 0,
|
2015-09-21 17:56:14 +08:00
|
|
|
exclude, prefix, x->patternlen, x->flags)) {
|
|
|
|
exc = x;
|
|
|
|
break;
|
|
|
|
}
|
2006-05-17 10:02:14 +08:00
|
|
|
}
|
2015-09-21 17:56:14 +08:00
|
|
|
return exc;
|
2012-12-27 10:32:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Scan the list and let the last match determine the fate.
|
|
|
|
* Return 1 for exclude, 0 for include and -1 for undecided.
|
|
|
|
*/
|
|
|
|
int is_excluded_from_list(const char *pathname,
|
|
|
|
int pathlen, const char *basename, int *dtype,
|
|
|
|
struct exclude_list *el)
|
|
|
|
{
|
|
|
|
struct exclude *exclude;
|
|
|
|
exclude = last_exclude_matching_from_list(pathname, pathlen, basename, dtype, el);
|
|
|
|
if (exclude)
|
|
|
|
return exclude->flags & EXC_FLAG_NEGATIVE ? 0 : 1;
|
2006-05-17 10:02:14 +08:00
|
|
|
return -1; /* undecided */
|
|
|
|
}
|
|
|
|
|
2013-04-16 03:11:02 +08:00
|
|
|
static struct exclude *last_exclude_matching_from_lists(struct dir_struct *dir,
|
|
|
|
const char *pathname, int pathlen, const char *basename,
|
|
|
|
int *dtype_p)
|
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
struct exclude_list_group *group;
|
|
|
|
struct exclude *exclude;
|
|
|
|
for (i = EXC_CMDL; i <= EXC_FILE; i++) {
|
|
|
|
group = &dir->exclude_list_group[i];
|
|
|
|
for (j = group->nr - 1; j >= 0; j--) {
|
|
|
|
exclude = last_exclude_matching_from_list(
|
|
|
|
pathname, pathlen, basename, dtype_p,
|
|
|
|
&group->el[j]);
|
|
|
|
if (exclude)
|
|
|
|
return exclude;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2013-04-16 03:11:37 +08:00
|
|
|
/*
|
|
|
|
* Loads the per-directory exclude list for the substring of base
|
|
|
|
* which has a char length of baselen.
|
|
|
|
*/
|
|
|
|
static void prep_exclude(struct dir_struct *dir, const char *base, int baselen)
|
|
|
|
{
|
|
|
|
struct exclude_list_group *group;
|
|
|
|
struct exclude_list *el;
|
|
|
|
struct exclude_stack *stk = NULL;
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
struct untracked_cache_dir *untracked;
|
2013-04-16 03:11:37 +08:00
|
|
|
int current;
|
|
|
|
|
|
|
|
group = &dir->exclude_list_group[EXC_DIRS];
|
|
|
|
|
2014-07-14 17:47:11 +08:00
|
|
|
/*
|
|
|
|
* Pop the exclude lists from the EXCL_DIRS exclude_list_group
|
2013-04-16 03:11:37 +08:00
|
|
|
* which originate from directories not in the prefix of the
|
2014-07-14 17:47:11 +08:00
|
|
|
* path being checked.
|
|
|
|
*/
|
2013-04-16 03:11:37 +08:00
|
|
|
while ((stk = dir->exclude_stack) != NULL) {
|
|
|
|
if (stk->baselen <= baselen &&
|
2014-07-14 17:50:22 +08:00
|
|
|
!strncmp(dir->basebuf.buf, base, stk->baselen))
|
2013-04-16 03:11:37 +08:00
|
|
|
break;
|
|
|
|
el = &group->el[dir->exclude_stack->exclude_ix];
|
|
|
|
dir->exclude_stack = stk->prev;
|
dir.c: unify is_excluded and is_path_excluded APIs
The is_excluded and is_path_excluded APIs are very similar, except for a
few noteworthy differences:
is_excluded doesn't handle ignored directories, results for paths within
ignored directories are incorrect. This is probably based on the premise
that recursive directory scans should stop at ignored directories, which
is no longer true (in certain cases, read_directory_recursive currently
calls is_excluded *and* is_path_excluded to get correct ignored state).
is_excluded caches parsed .gitignore files of the last directory in struct
dir_struct. If the directory changes, it finds a common parent directory
and is very careful to drop only as much state as necessary. On the other
hand, is_excluded will also read and parse .gitignore files in already
ignored directories, which are completely irrelevant.
is_path_excluded correctly handles ignored directories by checking if any
component in the path is excluded. As it uses is_excluded internally, this
unfortunately forces is_excluded to drop and re-read all .gitignore files,
as there is no common parent directory for the root dir.
is_path_excluded tracks state in a separate struct path_exclude_check,
which is essentially a wrapper of dir_struct with two more fields. However,
as is_path_excluded also modifies dir_struct, it is not possible to e.g.
use multiple path_exclude_check structures with the same dir_struct in
parallel. The additional structure just unnecessarily complicates the API.
Teach is_excluded / prep_exclude about ignored directories: whenever
entering a new directory, first check if the entire directory is excluded.
Remember the excluded state in dir_struct. Don't traverse into already
ignored directories (i.e. don't read irrelevant .gitignore files).
Directories could also be excluded by exclude patterns specified on the
command line or .git/info/exclude, so we cannot simply skip prep_exclude
entirely if there's no .gitignore file name (dir_struct.exclude_per_dir).
Move this check to just before actually reading the file.
is_path_excluded is now equivalent to is_excluded, so we can simply
redirect to it (the public API is cleaned up in the next patch).
The performance impact of the additional ignored check per directory is
hardly noticeable when reading directories recursively (e.g. 'git status').
However, performance of git commands using the is_path_excluded API (e.g.
'git ls-files --cached --ignored --exclude-standard') is greatly improved
as this no longer re-reads .gitignore files on each call.
Here's some performance data from the linux and WebKit repos (best of 10
runs on a Debian Linux on SSD, core.preloadIndex=true):
| ls-files -ci | status | status --ignored
| linux | WebKit | linux | WebKit | linux | WebKit
-------+-------+--------+-------+--------+-------+---------
before | 0.506 | 6.539 | 0.212 | 1.555 | 0.323 | 2.541
after | 0.080 | 1.191 | 0.218 | 1.583 | 0.321 | 2.579
gain | 6.325 | 5.490 | 0.972 | 0.982 | 1.006 | 0.985
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:12:14 +08:00
|
|
|
dir->exclude = NULL;
|
2014-07-14 17:50:22 +08:00
|
|
|
free((char *)el->src); /* see strbuf_detach() below */
|
2013-04-16 03:11:37 +08:00
|
|
|
clear_exclude_list(el);
|
|
|
|
free(stk);
|
|
|
|
group->nr--;
|
|
|
|
}
|
|
|
|
|
dir.c: unify is_excluded and is_path_excluded APIs
The is_excluded and is_path_excluded APIs are very similar, except for a
few noteworthy differences:
is_excluded doesn't handle ignored directories, results for paths within
ignored directories are incorrect. This is probably based on the premise
that recursive directory scans should stop at ignored directories, which
is no longer true (in certain cases, read_directory_recursive currently
calls is_excluded *and* is_path_excluded to get correct ignored state).
is_excluded caches parsed .gitignore files of the last directory in struct
dir_struct. If the directory changes, it finds a common parent directory
and is very careful to drop only as much state as necessary. On the other
hand, is_excluded will also read and parse .gitignore files in already
ignored directories, which are completely irrelevant.
is_path_excluded correctly handles ignored directories by checking if any
component in the path is excluded. As it uses is_excluded internally, this
unfortunately forces is_excluded to drop and re-read all .gitignore files,
as there is no common parent directory for the root dir.
is_path_excluded tracks state in a separate struct path_exclude_check,
which is essentially a wrapper of dir_struct with two more fields. However,
as is_path_excluded also modifies dir_struct, it is not possible to e.g.
use multiple path_exclude_check structures with the same dir_struct in
parallel. The additional structure just unnecessarily complicates the API.
Teach is_excluded / prep_exclude about ignored directories: whenever
entering a new directory, first check if the entire directory is excluded.
Remember the excluded state in dir_struct. Don't traverse into already
ignored directories (i.e. don't read irrelevant .gitignore files).
Directories could also be excluded by exclude patterns specified on the
command line or .git/info/exclude, so we cannot simply skip prep_exclude
entirely if there's no .gitignore file name (dir_struct.exclude_per_dir).
Move this check to just before actually reading the file.
is_path_excluded is now equivalent to is_excluded, so we can simply
redirect to it (the public API is cleaned up in the next patch).
The performance impact of the additional ignored check per directory is
hardly noticeable when reading directories recursively (e.g. 'git status').
However, performance of git commands using the is_path_excluded API (e.g.
'git ls-files --cached --ignored --exclude-standard') is greatly improved
as this no longer re-reads .gitignore files on each call.
Here's some performance data from the linux and WebKit repos (best of 10
runs on a Debian Linux on SSD, core.preloadIndex=true):
| ls-files -ci | status | status --ignored
| linux | WebKit | linux | WebKit | linux | WebKit
-------+-------+--------+-------+--------+-------+---------
before | 0.506 | 6.539 | 0.212 | 1.555 | 0.323 | 2.541
after | 0.080 | 1.191 | 0.218 | 1.583 | 0.321 | 2.579
gain | 6.325 | 5.490 | 0.972 | 0.982 | 1.006 | 0.985
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:12:14 +08:00
|
|
|
/* Skip traversing into sub directories if the parent is excluded */
|
|
|
|
if (dir->exclude)
|
|
|
|
return;
|
|
|
|
|
2014-07-14 17:50:22 +08:00
|
|
|
/*
|
|
|
|
* Lazy initialization. All call sites currently just
|
|
|
|
* memset(dir, 0, sizeof(*dir)) before use. Changing all of
|
|
|
|
* them seems lots of work for little benefit.
|
|
|
|
*/
|
|
|
|
if (!dir->basebuf.buf)
|
|
|
|
strbuf_init(&dir->basebuf, PATH_MAX);
|
|
|
|
|
2013-04-16 03:11:37 +08:00
|
|
|
/* Read from the parent directories and push them down. */
|
|
|
|
current = stk ? stk->baselen : -1;
|
2014-07-14 17:50:22 +08:00
|
|
|
strbuf_setlen(&dir->basebuf, current < 0 ? 0 : current);
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
if (dir->untracked)
|
|
|
|
untracked = stk ? stk->ucd : dir->untracked->root;
|
|
|
|
else
|
|
|
|
untracked = NULL;
|
|
|
|
|
2013-04-16 03:11:37 +08:00
|
|
|
while (current < baselen) {
|
|
|
|
const char *cp;
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
struct sha1_stat sha1_stat;
|
2013-04-16 03:11:37 +08:00
|
|
|
|
2014-10-21 19:38:06 +08:00
|
|
|
stk = xcalloc(1, sizeof(*stk));
|
2013-04-16 03:11:37 +08:00
|
|
|
if (current < 0) {
|
|
|
|
cp = base;
|
|
|
|
current = 0;
|
2014-07-14 17:47:11 +08:00
|
|
|
} else {
|
2013-04-16 03:11:37 +08:00
|
|
|
cp = strchr(base + current + 1, '/');
|
|
|
|
if (!cp)
|
|
|
|
die("oops in prep_exclude");
|
|
|
|
cp++;
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
untracked =
|
|
|
|
lookup_untracked(dir->untracked, untracked,
|
|
|
|
base + current,
|
|
|
|
cp - base - current);
|
2013-04-16 03:11:37 +08:00
|
|
|
}
|
|
|
|
stk->prev = dir->exclude_stack;
|
|
|
|
stk->baselen = cp - base;
|
dir.c: unify is_excluded and is_path_excluded APIs
The is_excluded and is_path_excluded APIs are very similar, except for a
few noteworthy differences:
is_excluded doesn't handle ignored directories, results for paths within
ignored directories are incorrect. This is probably based on the premise
that recursive directory scans should stop at ignored directories, which
is no longer true (in certain cases, read_directory_recursive currently
calls is_excluded *and* is_path_excluded to get correct ignored state).
is_excluded caches parsed .gitignore files of the last directory in struct
dir_struct. If the directory changes, it finds a common parent directory
and is very careful to drop only as much state as necessary. On the other
hand, is_excluded will also read and parse .gitignore files in already
ignored directories, which are completely irrelevant.
is_path_excluded correctly handles ignored directories by checking if any
component in the path is excluded. As it uses is_excluded internally, this
unfortunately forces is_excluded to drop and re-read all .gitignore files,
as there is no common parent directory for the root dir.
is_path_excluded tracks state in a separate struct path_exclude_check,
which is essentially a wrapper of dir_struct with two more fields. However,
as is_path_excluded also modifies dir_struct, it is not possible to e.g.
use multiple path_exclude_check structures with the same dir_struct in
parallel. The additional structure just unnecessarily complicates the API.
Teach is_excluded / prep_exclude about ignored directories: whenever
entering a new directory, first check if the entire directory is excluded.
Remember the excluded state in dir_struct. Don't traverse into already
ignored directories (i.e. don't read irrelevant .gitignore files).
Directories could also be excluded by exclude patterns specified on the
command line or .git/info/exclude, so we cannot simply skip prep_exclude
entirely if there's no .gitignore file name (dir_struct.exclude_per_dir).
Move this check to just before actually reading the file.
is_path_excluded is now equivalent to is_excluded, so we can simply
redirect to it (the public API is cleaned up in the next patch).
The performance impact of the additional ignored check per directory is
hardly noticeable when reading directories recursively (e.g. 'git status').
However, performance of git commands using the is_path_excluded API (e.g.
'git ls-files --cached --ignored --exclude-standard') is greatly improved
as this no longer re-reads .gitignore files on each call.
Here's some performance data from the linux and WebKit repos (best of 10
runs on a Debian Linux on SSD, core.preloadIndex=true):
| ls-files -ci | status | status --ignored
| linux | WebKit | linux | WebKit | linux | WebKit
-------+-------+--------+-------+--------+-------+---------
before | 0.506 | 6.539 | 0.212 | 1.555 | 0.323 | 2.541
after | 0.080 | 1.191 | 0.218 | 1.583 | 0.321 | 2.579
gain | 6.325 | 5.490 | 0.972 | 0.982 | 1.006 | 0.985
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:12:14 +08:00
|
|
|
stk->exclude_ix = group->nr;
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
stk->ucd = untracked;
|
dir.c: unify is_excluded and is_path_excluded APIs
The is_excluded and is_path_excluded APIs are very similar, except for a
few noteworthy differences:
is_excluded doesn't handle ignored directories, results for paths within
ignored directories are incorrect. This is probably based on the premise
that recursive directory scans should stop at ignored directories, which
is no longer true (in certain cases, read_directory_recursive currently
calls is_excluded *and* is_path_excluded to get correct ignored state).
is_excluded caches parsed .gitignore files of the last directory in struct
dir_struct. If the directory changes, it finds a common parent directory
and is very careful to drop only as much state as necessary. On the other
hand, is_excluded will also read and parse .gitignore files in already
ignored directories, which are completely irrelevant.
is_path_excluded correctly handles ignored directories by checking if any
component in the path is excluded. As it uses is_excluded internally, this
unfortunately forces is_excluded to drop and re-read all .gitignore files,
as there is no common parent directory for the root dir.
is_path_excluded tracks state in a separate struct path_exclude_check,
which is essentially a wrapper of dir_struct with two more fields. However,
as is_path_excluded also modifies dir_struct, it is not possible to e.g.
use multiple path_exclude_check structures with the same dir_struct in
parallel. The additional structure just unnecessarily complicates the API.
Teach is_excluded / prep_exclude about ignored directories: whenever
entering a new directory, first check if the entire directory is excluded.
Remember the excluded state in dir_struct. Don't traverse into already
ignored directories (i.e. don't read irrelevant .gitignore files).
Directories could also be excluded by exclude patterns specified on the
command line or .git/info/exclude, so we cannot simply skip prep_exclude
entirely if there's no .gitignore file name (dir_struct.exclude_per_dir).
Move this check to just before actually reading the file.
is_path_excluded is now equivalent to is_excluded, so we can simply
redirect to it (the public API is cleaned up in the next patch).
The performance impact of the additional ignored check per directory is
hardly noticeable when reading directories recursively (e.g. 'git status').
However, performance of git commands using the is_path_excluded API (e.g.
'git ls-files --cached --ignored --exclude-standard') is greatly improved
as this no longer re-reads .gitignore files on each call.
Here's some performance data from the linux and WebKit repos (best of 10
runs on a Debian Linux on SSD, core.preloadIndex=true):
| ls-files -ci | status | status --ignored
| linux | WebKit | linux | WebKit | linux | WebKit
-------+-------+--------+-------+--------+-------+---------
before | 0.506 | 6.539 | 0.212 | 1.555 | 0.323 | 2.541
after | 0.080 | 1.191 | 0.218 | 1.583 | 0.321 | 2.579
gain | 6.325 | 5.490 | 0.972 | 0.982 | 1.006 | 0.985
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:12:14 +08:00
|
|
|
el = add_exclude_list(dir, EXC_DIRS, NULL);
|
2014-07-14 17:50:22 +08:00
|
|
|
strbuf_add(&dir->basebuf, base + current, stk->baselen - current);
|
|
|
|
assert(stk->baselen == dir->basebuf.len);
|
dir.c: unify is_excluded and is_path_excluded APIs
The is_excluded and is_path_excluded APIs are very similar, except for a
few noteworthy differences:
is_excluded doesn't handle ignored directories, results for paths within
ignored directories are incorrect. This is probably based on the premise
that recursive directory scans should stop at ignored directories, which
is no longer true (in certain cases, read_directory_recursive currently
calls is_excluded *and* is_path_excluded to get correct ignored state).
is_excluded caches parsed .gitignore files of the last directory in struct
dir_struct. If the directory changes, it finds a common parent directory
and is very careful to drop only as much state as necessary. On the other
hand, is_excluded will also read and parse .gitignore files in already
ignored directories, which are completely irrelevant.
is_path_excluded correctly handles ignored directories by checking if any
component in the path is excluded. As it uses is_excluded internally, this
unfortunately forces is_excluded to drop and re-read all .gitignore files,
as there is no common parent directory for the root dir.
is_path_excluded tracks state in a separate struct path_exclude_check,
which is essentially a wrapper of dir_struct with two more fields. However,
as is_path_excluded also modifies dir_struct, it is not possible to e.g.
use multiple path_exclude_check structures with the same dir_struct in
parallel. The additional structure just unnecessarily complicates the API.
Teach is_excluded / prep_exclude about ignored directories: whenever
entering a new directory, first check if the entire directory is excluded.
Remember the excluded state in dir_struct. Don't traverse into already
ignored directories (i.e. don't read irrelevant .gitignore files).
Directories could also be excluded by exclude patterns specified on the
command line or .git/info/exclude, so we cannot simply skip prep_exclude
entirely if there's no .gitignore file name (dir_struct.exclude_per_dir).
Move this check to just before actually reading the file.
is_path_excluded is now equivalent to is_excluded, so we can simply
redirect to it (the public API is cleaned up in the next patch).
The performance impact of the additional ignored check per directory is
hardly noticeable when reading directories recursively (e.g. 'git status').
However, performance of git commands using the is_path_excluded API (e.g.
'git ls-files --cached --ignored --exclude-standard') is greatly improved
as this no longer re-reads .gitignore files on each call.
Here's some performance data from the linux and WebKit repos (best of 10
runs on a Debian Linux on SSD, core.preloadIndex=true):
| ls-files -ci | status | status --ignored
| linux | WebKit | linux | WebKit | linux | WebKit
-------+-------+--------+-------+--------+-------+---------
before | 0.506 | 6.539 | 0.212 | 1.555 | 0.323 | 2.541
after | 0.080 | 1.191 | 0.218 | 1.583 | 0.321 | 2.579
gain | 6.325 | 5.490 | 0.972 | 0.982 | 1.006 | 0.985
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:12:14 +08:00
|
|
|
|
|
|
|
/* Abort if the directory is excluded */
|
|
|
|
if (stk->baselen) {
|
|
|
|
int dt = DT_DIR;
|
2014-07-14 17:50:22 +08:00
|
|
|
dir->basebuf.buf[stk->baselen - 1] = 0;
|
dir.c: unify is_excluded and is_path_excluded APIs
The is_excluded and is_path_excluded APIs are very similar, except for a
few noteworthy differences:
is_excluded doesn't handle ignored directories, results for paths within
ignored directories are incorrect. This is probably based on the premise
that recursive directory scans should stop at ignored directories, which
is no longer true (in certain cases, read_directory_recursive currently
calls is_excluded *and* is_path_excluded to get correct ignored state).
is_excluded caches parsed .gitignore files of the last directory in struct
dir_struct. If the directory changes, it finds a common parent directory
and is very careful to drop only as much state as necessary. On the other
hand, is_excluded will also read and parse .gitignore files in already
ignored directories, which are completely irrelevant.
is_path_excluded correctly handles ignored directories by checking if any
component in the path is excluded. As it uses is_excluded internally, this
unfortunately forces is_excluded to drop and re-read all .gitignore files,
as there is no common parent directory for the root dir.
is_path_excluded tracks state in a separate struct path_exclude_check,
which is essentially a wrapper of dir_struct with two more fields. However,
as is_path_excluded also modifies dir_struct, it is not possible to e.g.
use multiple path_exclude_check structures with the same dir_struct in
parallel. The additional structure just unnecessarily complicates the API.
Teach is_excluded / prep_exclude about ignored directories: whenever
entering a new directory, first check if the entire directory is excluded.
Remember the excluded state in dir_struct. Don't traverse into already
ignored directories (i.e. don't read irrelevant .gitignore files).
Directories could also be excluded by exclude patterns specified on the
command line or .git/info/exclude, so we cannot simply skip prep_exclude
entirely if there's no .gitignore file name (dir_struct.exclude_per_dir).
Move this check to just before actually reading the file.
is_path_excluded is now equivalent to is_excluded, so we can simply
redirect to it (the public API is cleaned up in the next patch).
The performance impact of the additional ignored check per directory is
hardly noticeable when reading directories recursively (e.g. 'git status').
However, performance of git commands using the is_path_excluded API (e.g.
'git ls-files --cached --ignored --exclude-standard') is greatly improved
as this no longer re-reads .gitignore files on each call.
Here's some performance data from the linux and WebKit repos (best of 10
runs on a Debian Linux on SSD, core.preloadIndex=true):
| ls-files -ci | status | status --ignored
| linux | WebKit | linux | WebKit | linux | WebKit
-------+-------+--------+-------+--------+-------+---------
before | 0.506 | 6.539 | 0.212 | 1.555 | 0.323 | 2.541
after | 0.080 | 1.191 | 0.218 | 1.583 | 0.321 | 2.579
gain | 6.325 | 5.490 | 0.972 | 0.982 | 1.006 | 0.985
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:12:14 +08:00
|
|
|
dir->exclude = last_exclude_matching_from_lists(dir,
|
2014-07-14 17:50:22 +08:00
|
|
|
dir->basebuf.buf, stk->baselen - 1,
|
|
|
|
dir->basebuf.buf + current, &dt);
|
|
|
|
dir->basebuf.buf[stk->baselen - 1] = '/';
|
2013-05-30 04:32:36 +08:00
|
|
|
if (dir->exclude &&
|
|
|
|
dir->exclude->flags & EXC_FLAG_NEGATIVE)
|
|
|
|
dir->exclude = NULL;
|
dir.c: unify is_excluded and is_path_excluded APIs
The is_excluded and is_path_excluded APIs are very similar, except for a
few noteworthy differences:
is_excluded doesn't handle ignored directories, results for paths within
ignored directories are incorrect. This is probably based on the premise
that recursive directory scans should stop at ignored directories, which
is no longer true (in certain cases, read_directory_recursive currently
calls is_excluded *and* is_path_excluded to get correct ignored state).
is_excluded caches parsed .gitignore files of the last directory in struct
dir_struct. If the directory changes, it finds a common parent directory
and is very careful to drop only as much state as necessary. On the other
hand, is_excluded will also read and parse .gitignore files in already
ignored directories, which are completely irrelevant.
is_path_excluded correctly handles ignored directories by checking if any
component in the path is excluded. As it uses is_excluded internally, this
unfortunately forces is_excluded to drop and re-read all .gitignore files,
as there is no common parent directory for the root dir.
is_path_excluded tracks state in a separate struct path_exclude_check,
which is essentially a wrapper of dir_struct with two more fields. However,
as is_path_excluded also modifies dir_struct, it is not possible to e.g.
use multiple path_exclude_check structures with the same dir_struct in
parallel. The additional structure just unnecessarily complicates the API.
Teach is_excluded / prep_exclude about ignored directories: whenever
entering a new directory, first check if the entire directory is excluded.
Remember the excluded state in dir_struct. Don't traverse into already
ignored directories (i.e. don't read irrelevant .gitignore files).
Directories could also be excluded by exclude patterns specified on the
command line or .git/info/exclude, so we cannot simply skip prep_exclude
entirely if there's no .gitignore file name (dir_struct.exclude_per_dir).
Move this check to just before actually reading the file.
is_path_excluded is now equivalent to is_excluded, so we can simply
redirect to it (the public API is cleaned up in the next patch).
The performance impact of the additional ignored check per directory is
hardly noticeable when reading directories recursively (e.g. 'git status').
However, performance of git commands using the is_path_excluded API (e.g.
'git ls-files --cached --ignored --exclude-standard') is greatly improved
as this no longer re-reads .gitignore files on each call.
Here's some performance data from the linux and WebKit repos (best of 10
runs on a Debian Linux on SSD, core.preloadIndex=true):
| ls-files -ci | status | status --ignored
| linux | WebKit | linux | WebKit | linux | WebKit
-------+-------+--------+-------+--------+-------+---------
before | 0.506 | 6.539 | 0.212 | 1.555 | 0.323 | 2.541
after | 0.080 | 1.191 | 0.218 | 1.583 | 0.321 | 2.579
gain | 6.325 | 5.490 | 0.972 | 0.982 | 1.006 | 0.985
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:12:14 +08:00
|
|
|
if (dir->exclude) {
|
|
|
|
dir->exclude_stack = stk;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-14 17:50:22 +08:00
|
|
|
/* Try to read per-directory file */
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
hashclr(sha1_stat.sha1);
|
|
|
|
sha1_stat.valid = 0;
|
2015-03-08 18:12:31 +08:00
|
|
|
if (dir->exclude_per_dir &&
|
|
|
|
/*
|
|
|
|
* If we know that no files have been added in
|
|
|
|
* this directory (i.e. valid_cached_dir() has
|
|
|
|
* been executed and set untracked->valid) ..
|
|
|
|
*/
|
|
|
|
(!untracked || !untracked->valid ||
|
|
|
|
/*
|
|
|
|
* .. and .gitignore does not exist before
|
2015-08-01 01:35:01 +08:00
|
|
|
* (i.e. null exclude_sha1). Then we can skip
|
|
|
|
* loading .gitignore, which would result in
|
|
|
|
* ENOENT anyway.
|
2015-03-08 18:12:31 +08:00
|
|
|
*/
|
|
|
|
!is_null_sha1(untracked->exclude_sha1))) {
|
dir.c: unify is_excluded and is_path_excluded APIs
The is_excluded and is_path_excluded APIs are very similar, except for a
few noteworthy differences:
is_excluded doesn't handle ignored directories, results for paths within
ignored directories are incorrect. This is probably based on the premise
that recursive directory scans should stop at ignored directories, which
is no longer true (in certain cases, read_directory_recursive currently
calls is_excluded *and* is_path_excluded to get correct ignored state).
is_excluded caches parsed .gitignore files of the last directory in struct
dir_struct. If the directory changes, it finds a common parent directory
and is very careful to drop only as much state as necessary. On the other
hand, is_excluded will also read and parse .gitignore files in already
ignored directories, which are completely irrelevant.
is_path_excluded correctly handles ignored directories by checking if any
component in the path is excluded. As it uses is_excluded internally, this
unfortunately forces is_excluded to drop and re-read all .gitignore files,
as there is no common parent directory for the root dir.
is_path_excluded tracks state in a separate struct path_exclude_check,
which is essentially a wrapper of dir_struct with two more fields. However,
as is_path_excluded also modifies dir_struct, it is not possible to e.g.
use multiple path_exclude_check structures with the same dir_struct in
parallel. The additional structure just unnecessarily complicates the API.
Teach is_excluded / prep_exclude about ignored directories: whenever
entering a new directory, first check if the entire directory is excluded.
Remember the excluded state in dir_struct. Don't traverse into already
ignored directories (i.e. don't read irrelevant .gitignore files).
Directories could also be excluded by exclude patterns specified on the
command line or .git/info/exclude, so we cannot simply skip prep_exclude
entirely if there's no .gitignore file name (dir_struct.exclude_per_dir).
Move this check to just before actually reading the file.
is_path_excluded is now equivalent to is_excluded, so we can simply
redirect to it (the public API is cleaned up in the next patch).
The performance impact of the additional ignored check per directory is
hardly noticeable when reading directories recursively (e.g. 'git status').
However, performance of git commands using the is_path_excluded API (e.g.
'git ls-files --cached --ignored --exclude-standard') is greatly improved
as this no longer re-reads .gitignore files on each call.
Here's some performance data from the linux and WebKit repos (best of 10
runs on a Debian Linux on SSD, core.preloadIndex=true):
| ls-files -ci | status | status --ignored
| linux | WebKit | linux | WebKit | linux | WebKit
-------+-------+--------+-------+--------+-------+---------
before | 0.506 | 6.539 | 0.212 | 1.555 | 0.323 | 2.541
after | 0.080 | 1.191 | 0.218 | 1.583 | 0.321 | 2.579
gain | 6.325 | 5.490 | 0.972 | 0.982 | 1.006 | 0.985
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:12:14 +08:00
|
|
|
/*
|
|
|
|
* dir->basebuf gets reused by the traversal, but we
|
|
|
|
* need fname to remain unchanged to ensure the src
|
|
|
|
* member of each struct exclude correctly
|
|
|
|
* back-references its source file. Other invocations
|
|
|
|
* of add_exclude_list provide stable strings, so we
|
2014-07-14 17:50:22 +08:00
|
|
|
* strbuf_detach() and free() here in the caller.
|
dir.c: unify is_excluded and is_path_excluded APIs
The is_excluded and is_path_excluded APIs are very similar, except for a
few noteworthy differences:
is_excluded doesn't handle ignored directories, results for paths within
ignored directories are incorrect. This is probably based on the premise
that recursive directory scans should stop at ignored directories, which
is no longer true (in certain cases, read_directory_recursive currently
calls is_excluded *and* is_path_excluded to get correct ignored state).
is_excluded caches parsed .gitignore files of the last directory in struct
dir_struct. If the directory changes, it finds a common parent directory
and is very careful to drop only as much state as necessary. On the other
hand, is_excluded will also read and parse .gitignore files in already
ignored directories, which are completely irrelevant.
is_path_excluded correctly handles ignored directories by checking if any
component in the path is excluded. As it uses is_excluded internally, this
unfortunately forces is_excluded to drop and re-read all .gitignore files,
as there is no common parent directory for the root dir.
is_path_excluded tracks state in a separate struct path_exclude_check,
which is essentially a wrapper of dir_struct with two more fields. However,
as is_path_excluded also modifies dir_struct, it is not possible to e.g.
use multiple path_exclude_check structures with the same dir_struct in
parallel. The additional structure just unnecessarily complicates the API.
Teach is_excluded / prep_exclude about ignored directories: whenever
entering a new directory, first check if the entire directory is excluded.
Remember the excluded state in dir_struct. Don't traverse into already
ignored directories (i.e. don't read irrelevant .gitignore files).
Directories could also be excluded by exclude patterns specified on the
command line or .git/info/exclude, so we cannot simply skip prep_exclude
entirely if there's no .gitignore file name (dir_struct.exclude_per_dir).
Move this check to just before actually reading the file.
is_path_excluded is now equivalent to is_excluded, so we can simply
redirect to it (the public API is cleaned up in the next patch).
The performance impact of the additional ignored check per directory is
hardly noticeable when reading directories recursively (e.g. 'git status').
However, performance of git commands using the is_path_excluded API (e.g.
'git ls-files --cached --ignored --exclude-standard') is greatly improved
as this no longer re-reads .gitignore files on each call.
Here's some performance data from the linux and WebKit repos (best of 10
runs on a Debian Linux on SSD, core.preloadIndex=true):
| ls-files -ci | status | status --ignored
| linux | WebKit | linux | WebKit | linux | WebKit
-------+-------+--------+-------+--------+-------+---------
before | 0.506 | 6.539 | 0.212 | 1.555 | 0.323 | 2.541
after | 0.080 | 1.191 | 0.218 | 1.583 | 0.321 | 2.579
gain | 6.325 | 5.490 | 0.972 | 0.982 | 1.006 | 0.985
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:12:14 +08:00
|
|
|
*/
|
2014-07-14 17:50:22 +08:00
|
|
|
struct strbuf sb = STRBUF_INIT;
|
|
|
|
strbuf_addbuf(&sb, &dir->basebuf);
|
|
|
|
strbuf_addstr(&sb, dir->exclude_per_dir);
|
|
|
|
el->src = strbuf_detach(&sb, NULL);
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
add_excludes(el->src, el->src, stk->baselen, el, 1,
|
|
|
|
untracked ? &sha1_stat : NULL);
|
|
|
|
}
|
2015-03-08 18:12:27 +08:00
|
|
|
/*
|
|
|
|
* NEEDSWORK: when untracked cache is enabled, prep_exclude()
|
|
|
|
* will first be called in valid_cached_dir() then maybe many
|
|
|
|
* times more in last_exclude_matching(). When the cache is
|
|
|
|
* used, last_exclude_matching() will not be called and
|
|
|
|
* reading .gitignore content will be a waste.
|
|
|
|
*
|
|
|
|
* So when it's called by valid_cached_dir() and we can get
|
|
|
|
* .gitignore SHA-1 from the index (i.e. .gitignore is not
|
|
|
|
* modified on work tree), we could delay reading the
|
|
|
|
* .gitignore content until we absolutely need it in
|
|
|
|
* last_exclude_matching(). Be careful about ignore rule
|
|
|
|
* order, though, if you do that.
|
|
|
|
*/
|
|
|
|
if (untracked &&
|
|
|
|
hashcmp(sha1_stat.sha1, untracked->exclude_sha1)) {
|
|
|
|
invalidate_gitignore(dir->untracked, untracked);
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
hashcpy(untracked->exclude_sha1, sha1_stat.sha1);
|
dir.c: unify is_excluded and is_path_excluded APIs
The is_excluded and is_path_excluded APIs are very similar, except for a
few noteworthy differences:
is_excluded doesn't handle ignored directories, results for paths within
ignored directories are incorrect. This is probably based on the premise
that recursive directory scans should stop at ignored directories, which
is no longer true (in certain cases, read_directory_recursive currently
calls is_excluded *and* is_path_excluded to get correct ignored state).
is_excluded caches parsed .gitignore files of the last directory in struct
dir_struct. If the directory changes, it finds a common parent directory
and is very careful to drop only as much state as necessary. On the other
hand, is_excluded will also read and parse .gitignore files in already
ignored directories, which are completely irrelevant.
is_path_excluded correctly handles ignored directories by checking if any
component in the path is excluded. As it uses is_excluded internally, this
unfortunately forces is_excluded to drop and re-read all .gitignore files,
as there is no common parent directory for the root dir.
is_path_excluded tracks state in a separate struct path_exclude_check,
which is essentially a wrapper of dir_struct with two more fields. However,
as is_path_excluded also modifies dir_struct, it is not possible to e.g.
use multiple path_exclude_check structures with the same dir_struct in
parallel. The additional structure just unnecessarily complicates the API.
Teach is_excluded / prep_exclude about ignored directories: whenever
entering a new directory, first check if the entire directory is excluded.
Remember the excluded state in dir_struct. Don't traverse into already
ignored directories (i.e. don't read irrelevant .gitignore files).
Directories could also be excluded by exclude patterns specified on the
command line or .git/info/exclude, so we cannot simply skip prep_exclude
entirely if there's no .gitignore file name (dir_struct.exclude_per_dir).
Move this check to just before actually reading the file.
is_path_excluded is now equivalent to is_excluded, so we can simply
redirect to it (the public API is cleaned up in the next patch).
The performance impact of the additional ignored check per directory is
hardly noticeable when reading directories recursively (e.g. 'git status').
However, performance of git commands using the is_path_excluded API (e.g.
'git ls-files --cached --ignored --exclude-standard') is greatly improved
as this no longer re-reads .gitignore files on each call.
Here's some performance data from the linux and WebKit repos (best of 10
runs on a Debian Linux on SSD, core.preloadIndex=true):
| ls-files -ci | status | status --ignored
| linux | WebKit | linux | WebKit | linux | WebKit
-------+-------+--------+-------+--------+-------+---------
before | 0.506 | 6.539 | 0.212 | 1.555 | 0.323 | 2.541
after | 0.080 | 1.191 | 0.218 | 1.583 | 0.321 | 2.579
gain | 6.325 | 5.490 | 0.972 | 0.982 | 1.006 | 0.985
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:12:14 +08:00
|
|
|
}
|
2013-04-16 03:11:37 +08:00
|
|
|
dir->exclude_stack = stk;
|
|
|
|
current = stk->baselen;
|
|
|
|
}
|
2014-07-14 17:50:22 +08:00
|
|
|
strbuf_setlen(&dir->basebuf, baselen);
|
2013-04-16 03:11:37 +08:00
|
|
|
}
|
|
|
|
|
2012-12-27 10:32:27 +08:00
|
|
|
/*
|
|
|
|
* Loads the exclude lists for the directory containing pathname, then
|
|
|
|
* scans all exclude lists to determine whether pathname is excluded.
|
|
|
|
* Returns the exclude_list element which matched, or NULL for
|
|
|
|
* undecided.
|
|
|
|
*/
|
2013-04-16 03:12:57 +08:00
|
|
|
struct exclude *last_exclude_matching(struct dir_struct *dir,
|
2012-12-27 10:32:27 +08:00
|
|
|
const char *pathname,
|
|
|
|
int *dtype_p)
|
2006-05-17 10:02:14 +08:00
|
|
|
{
|
|
|
|
int pathlen = strlen(pathname);
|
2007-10-29 04:27:13 +08:00
|
|
|
const char *basename = strrchr(pathname, '/');
|
|
|
|
basename = (basename) ? basename+1 : pathname;
|
2006-05-17 10:02:14 +08:00
|
|
|
|
2007-11-29 18:17:44 +08:00
|
|
|
prep_exclude(dir, pathname, basename-pathname);
|
2013-01-07 00:58:03 +08:00
|
|
|
|
dir.c: unify is_excluded and is_path_excluded APIs
The is_excluded and is_path_excluded APIs are very similar, except for a
few noteworthy differences:
is_excluded doesn't handle ignored directories, results for paths within
ignored directories are incorrect. This is probably based on the premise
that recursive directory scans should stop at ignored directories, which
is no longer true (in certain cases, read_directory_recursive currently
calls is_excluded *and* is_path_excluded to get correct ignored state).
is_excluded caches parsed .gitignore files of the last directory in struct
dir_struct. If the directory changes, it finds a common parent directory
and is very careful to drop only as much state as necessary. On the other
hand, is_excluded will also read and parse .gitignore files in already
ignored directories, which are completely irrelevant.
is_path_excluded correctly handles ignored directories by checking if any
component in the path is excluded. As it uses is_excluded internally, this
unfortunately forces is_excluded to drop and re-read all .gitignore files,
as there is no common parent directory for the root dir.
is_path_excluded tracks state in a separate struct path_exclude_check,
which is essentially a wrapper of dir_struct with two more fields. However,
as is_path_excluded also modifies dir_struct, it is not possible to e.g.
use multiple path_exclude_check structures with the same dir_struct in
parallel. The additional structure just unnecessarily complicates the API.
Teach is_excluded / prep_exclude about ignored directories: whenever
entering a new directory, first check if the entire directory is excluded.
Remember the excluded state in dir_struct. Don't traverse into already
ignored directories (i.e. don't read irrelevant .gitignore files).
Directories could also be excluded by exclude patterns specified on the
command line or .git/info/exclude, so we cannot simply skip prep_exclude
entirely if there's no .gitignore file name (dir_struct.exclude_per_dir).
Move this check to just before actually reading the file.
is_path_excluded is now equivalent to is_excluded, so we can simply
redirect to it (the public API is cleaned up in the next patch).
The performance impact of the additional ignored check per directory is
hardly noticeable when reading directories recursively (e.g. 'git status').
However, performance of git commands using the is_path_excluded API (e.g.
'git ls-files --cached --ignored --exclude-standard') is greatly improved
as this no longer re-reads .gitignore files on each call.
Here's some performance data from the linux and WebKit repos (best of 10
runs on a Debian Linux on SSD, core.preloadIndex=true):
| ls-files -ci | status | status --ignored
| linux | WebKit | linux | WebKit | linux | WebKit
-------+-------+--------+-------+--------+-------+---------
before | 0.506 | 6.539 | 0.212 | 1.555 | 0.323 | 2.541
after | 0.080 | 1.191 | 0.218 | 1.583 | 0.321 | 2.579
gain | 6.325 | 5.490 | 0.972 | 0.982 | 1.006 | 0.985
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:12:14 +08:00
|
|
|
if (dir->exclude)
|
|
|
|
return dir->exclude;
|
|
|
|
|
2013-04-16 03:11:02 +08:00
|
|
|
return last_exclude_matching_from_lists(dir, pathname, pathlen,
|
|
|
|
basename, dtype_p);
|
2012-12-27 10:32:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Loads the exclude lists for the directory containing pathname, then
|
|
|
|
* scans all exclude lists to determine whether pathname is excluded.
|
|
|
|
* Returns 1 if true, otherwise 0.
|
|
|
|
*/
|
2013-04-16 03:12:57 +08:00
|
|
|
int is_excluded(struct dir_struct *dir, const char *pathname, int *dtype_p)
|
2012-12-27 10:32:27 +08:00
|
|
|
{
|
|
|
|
struct exclude *exclude =
|
|
|
|
last_exclude_matching(dir, pathname, dtype_p);
|
|
|
|
if (exclude)
|
|
|
|
return exclude->flags & EXC_FLAG_NEGATIVE ? 0 : 1;
|
2006-05-17 10:02:14 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-11-09 07:35:32 +08:00
|
|
|
static struct dir_entry *dir_entry_new(const char *pathname, int len)
|
|
|
|
{
|
2006-05-17 10:02:14 +08:00
|
|
|
struct dir_entry *ent;
|
|
|
|
|
2016-02-23 06:44:32 +08:00
|
|
|
FLEX_ALLOC_MEM(ent, name, pathname, len);
|
2006-05-17 10:02:14 +08:00
|
|
|
ent->len = len;
|
2006-12-30 03:01:31 +08:00
|
|
|
return ent;
|
2006-05-17 10:02:14 +08:00
|
|
|
}
|
|
|
|
|
2008-10-02 18:14:23 +08:00
|
|
|
static struct dir_entry *dir_add_name(struct dir_struct *dir, const char *pathname, int len)
|
2007-06-11 21:39:44 +08:00
|
|
|
{
|
2013-09-17 15:06:15 +08:00
|
|
|
if (cache_file_exists(pathname, len, ignore_case))
|
2007-06-11 21:39:44 +08:00
|
|
|
return NULL;
|
|
|
|
|
2007-06-17 06:43:40 +08:00
|
|
|
ALLOC_GROW(dir->entries, dir->nr+1, dir->alloc);
|
2007-06-11 21:39:44 +08:00
|
|
|
return dir->entries[dir->nr++] = dir_entry_new(pathname, len);
|
|
|
|
}
|
|
|
|
|
2010-07-10 06:18:38 +08:00
|
|
|
struct dir_entry *dir_add_ignored(struct dir_struct *dir, const char *pathname, int len)
|
2007-06-11 21:39:50 +08:00
|
|
|
{
|
2009-05-31 05:54:18 +08:00
|
|
|
if (!cache_name_is_other(pathname, len))
|
2007-06-11 21:39:50 +08:00
|
|
|
return NULL;
|
|
|
|
|
2007-06-17 06:43:40 +08:00
|
|
|
ALLOC_GROW(dir->ignored, dir->ignored_nr+1, dir->ignored_alloc);
|
2007-06-11 21:39:50 +08:00
|
|
|
return dir->ignored[dir->ignored_nr++] = dir_entry_new(pathname, len);
|
|
|
|
}
|
|
|
|
|
2007-04-12 05:49:44 +08:00
|
|
|
enum exist_status {
|
|
|
|
index_nonexistent = 0,
|
|
|
|
index_directory,
|
2010-05-14 17:31:35 +08:00
|
|
|
index_gitdir
|
2007-04-12 05:49:44 +08:00
|
|
|
};
|
|
|
|
|
2010-10-03 17:56:43 +08:00
|
|
|
/*
|
2013-08-16 03:08:45 +08:00
|
|
|
* Do not use the alphabetically sorted index to look up
|
2010-10-03 17:56:43 +08:00
|
|
|
* the directory name; instead, use the case insensitive
|
2013-09-17 15:06:15 +08:00
|
|
|
* directory hash.
|
2010-10-03 17:56:43 +08:00
|
|
|
*/
|
|
|
|
static enum exist_status directory_exists_in_index_icase(const char *dirname, int len)
|
|
|
|
{
|
2015-10-22 01:54:11 +08:00
|
|
|
struct cache_entry *ce;
|
2010-10-03 17:56:43 +08:00
|
|
|
|
2015-10-22 01:54:11 +08:00
|
|
|
if (cache_dir_exists(dirname, len))
|
2010-10-03 17:56:43 +08:00
|
|
|
return index_directory;
|
|
|
|
|
2015-10-22 01:54:11 +08:00
|
|
|
ce = cache_file_exists(dirname, len, ignore_case);
|
|
|
|
if (ce && S_ISGITLINK(ce->ce_mode))
|
2010-10-03 17:56:43 +08:00
|
|
|
return index_gitdir;
|
|
|
|
|
|
|
|
return index_nonexistent;
|
|
|
|
}
|
|
|
|
|
2007-04-12 05:49:44 +08:00
|
|
|
/*
|
|
|
|
* The index sorts alphabetically by entry name, which
|
|
|
|
* means that a gitlink sorts as '\0' at the end, while
|
|
|
|
* a directory (which is defined not as an entry, but as
|
|
|
|
* the files it contains) will sort with the '/' at the
|
|
|
|
* end.
|
|
|
|
*/
|
|
|
|
static enum exist_status directory_exists_in_index(const char *dirname, int len)
|
2006-05-17 10:02:14 +08:00
|
|
|
{
|
2010-10-03 17:56:43 +08:00
|
|
|
int pos;
|
|
|
|
|
|
|
|
if (ignore_case)
|
|
|
|
return directory_exists_in_index_icase(dirname, len);
|
|
|
|
|
|
|
|
pos = cache_name_pos(dirname, len);
|
2007-04-12 05:49:44 +08:00
|
|
|
if (pos < 0)
|
|
|
|
pos = -pos-1;
|
|
|
|
while (pos < active_nr) {
|
Convert "struct cache_entry *" to "const ..." wherever possible
I attempted to make index_state->cache[] a "const struct cache_entry **"
to find out how existing entries in index are modified and where. The
question I have is what do we do if we really need to keep track of on-disk
changes in the index. The result is
- diff-lib.c: setting CE_UPTODATE
- name-hash.c: setting CE_HASHED
- preload-index.c, read-cache.c, unpack-trees.c and
builtin/update-index: obvious
- entry.c: write_entry() may refresh the checked out entry via
fill_stat_cache_info(). This causes "non-const struct cache_entry
*" in builtin/apply.c, builtin/checkout-index.c and
builtin/checkout.c
- builtin/ls-files.c: --with-tree changes stagemask and may set
CE_UPDATE
Of these, write_entry() and its call sites are probably most
interesting because it modifies on-disk info. But this is stat info
and can be retrieved via refresh, at least for porcelain
commands. Other just uses ce_flags for local purposes.
So, keeping track of "dirty" entries is just a matter of setting a
flag in index modification functions exposed by read-cache.c. Except
unpack-trees, the rest of the code base does not do anything funny
behind read-cache's back.
The actual patch is less valueable than the summary above. But if
anyone wants to re-identify the above sites. Applying this patch, then
this:
diff --git a/cache.h b/cache.h
index 430d021..1692891 100644
--- a/cache.h
+++ b/cache.h
@@ -267,7 +267,7 @@ static inline unsigned int canon_mode(unsigned int mode)
#define cache_entry_size(len) (offsetof(struct cache_entry,name) + (len) + 1)
struct index_state {
- struct cache_entry **cache;
+ const struct cache_entry **cache;
unsigned int version;
unsigned int cache_nr, cache_alloc, cache_changed;
struct string_list *resolve_undo;
will help quickly identify them without bogus warnings.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-07-09 23:29:00 +08:00
|
|
|
const struct cache_entry *ce = active_cache[pos++];
|
2007-04-12 05:49:44 +08:00
|
|
|
unsigned char endchar;
|
|
|
|
|
|
|
|
if (strncmp(ce->name, dirname, len))
|
|
|
|
break;
|
|
|
|
endchar = ce->name[len];
|
|
|
|
if (endchar > '/')
|
|
|
|
break;
|
|
|
|
if (endchar == '/')
|
|
|
|
return index_directory;
|
2008-01-15 08:03:17 +08:00
|
|
|
if (!endchar && S_ISGITLINK(ce->ce_mode))
|
2007-04-12 05:49:44 +08:00
|
|
|
return index_gitdir;
|
|
|
|
}
|
|
|
|
return index_nonexistent;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When we find a directory when traversing the filesystem, we
|
|
|
|
* have three distinct cases:
|
|
|
|
*
|
|
|
|
* - ignore it
|
|
|
|
* - see it as a directory
|
|
|
|
* - recurse into it
|
|
|
|
*
|
|
|
|
* and which one we choose depends on a combination of existing
|
|
|
|
* git index contents and the flags passed into the directory
|
|
|
|
* traversal routine.
|
|
|
|
*
|
|
|
|
* Case 1: If we *already* have entries in the index under that
|
2013-04-16 03:10:05 +08:00
|
|
|
* directory name, we always recurse into the directory to see
|
|
|
|
* all the files.
|
2007-04-12 05:49:44 +08:00
|
|
|
*
|
|
|
|
* Case 2: If we *already* have that directory name as a gitlink,
|
|
|
|
* we always continue to see it as a gitlink, regardless of whether
|
|
|
|
* there is an actual git directory there or not (it might not
|
|
|
|
* be checked out as a subproject!)
|
|
|
|
*
|
|
|
|
* Case 3: if we didn't have it in the index previously, we
|
|
|
|
* have a few sub-cases:
|
|
|
|
*
|
|
|
|
* (a) if "show_other_directories" is true, we show it as
|
|
|
|
* just a directory, unless "hide_empty_directories" is
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
* also true, in which case we need to check if it contains any
|
|
|
|
* untracked and / or ignored files.
|
2007-04-12 05:49:44 +08:00
|
|
|
* (b) if it looks like a git directory, and we don't have
|
2007-05-22 04:08:28 +08:00
|
|
|
* 'no_gitlinks' set we treat it as a gitlink, and show it
|
2007-04-12 05:49:44 +08:00
|
|
|
* as a directory.
|
|
|
|
* (c) otherwise, we recurse into it.
|
|
|
|
*/
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
static enum path_treatment treat_directory(struct dir_struct *dir,
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
struct untracked_cache_dir *untracked,
|
2015-08-19 21:01:25 +08:00
|
|
|
const char *dirname, int len, int baselen, int exclude,
|
2007-04-12 05:49:44 +08:00
|
|
|
const struct path_simplify *simplify)
|
|
|
|
{
|
|
|
|
/* The "len-1" is to strip the final '/' */
|
|
|
|
switch (directory_exists_in_index(dirname, len-1)) {
|
|
|
|
case index_directory:
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
return path_recurse;
|
2007-04-12 05:49:44 +08:00
|
|
|
|
|
|
|
case index_gitdir:
|
2013-07-02 05:00:32 +08:00
|
|
|
return path_none;
|
2007-04-12 05:49:44 +08:00
|
|
|
|
|
|
|
case index_nonexistent:
|
2009-02-16 20:20:25 +08:00
|
|
|
if (dir->flags & DIR_SHOW_OTHER_DIRECTORIES)
|
2007-04-12 05:49:44 +08:00
|
|
|
break;
|
2009-02-16 20:20:25 +08:00
|
|
|
if (!(dir->flags & DIR_NO_GITLINKS)) {
|
2007-04-12 05:49:44 +08:00
|
|
|
unsigned char sha1[20];
|
|
|
|
if (resolve_gitlink_ref(dirname, "HEAD", sha1) == 0)
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
return path_untracked;
|
2007-04-12 05:49:44 +08:00
|
|
|
}
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
return path_recurse;
|
2007-04-12 05:49:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* This is the "show_other_directories" case */
|
2012-12-30 22:39:00 +08:00
|
|
|
|
2013-04-16 03:08:02 +08:00
|
|
|
if (!(dir->flags & DIR_HIDE_EMPTY_DIRECTORIES))
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
return exclude ? path_excluded : path_untracked;
|
|
|
|
|
2015-08-19 21:01:25 +08:00
|
|
|
untracked = lookup_untracked(dir->untracked, untracked,
|
|
|
|
dirname + baselen, len - baselen);
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
return read_directory_recursive(dir, dirname, len,
|
|
|
|
untracked, 1, simplify);
|
2006-05-17 10:02:14 +08:00
|
|
|
}
|
|
|
|
|
Optimize directory listing with pathspec limiter.
The way things are set up, you can now pass a "pathspec" to the
"read_directory()" function. If you pass NULL, it acts exactly
like it used to do (read everything). If you pass a non-NULL
pointer, it will simplify it into a "these are the prefixes
without any special characters", and stop any readdir() early if
the path in question doesn't match any of the prefixes.
NOTE! This does *not* obviate the need for the caller to do the *exact*
pathspec match later. It's a first-level filter on "read_directory()", but
it does not do the full pathspec thing. Maybe it should. But in the
meantime, builtin-add.c really does need to do first
read_directory(dir, .., pathspec);
if (pathspec)
prune_directory(dir, pathspec, baselen);
ie the "prune_directory()" part will do the *exact* pathspec pruning,
while the "read_directory()" will use the pathspec just to do some quick
high-level pruning of the directories it will recurse into.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-03-31 11:39:30 +08:00
|
|
|
/*
|
|
|
|
* This is an inexact early pruning of any recursive directory
|
|
|
|
* reading - if the path cannot possibly be in the pathspec,
|
|
|
|
* return true, and we'll skip it early.
|
|
|
|
*/
|
|
|
|
static int simplify_away(const char *path, int pathlen, const struct path_simplify *simplify)
|
|
|
|
{
|
|
|
|
if (simplify) {
|
|
|
|
for (;;) {
|
|
|
|
const char *match = simplify->path;
|
|
|
|
int len = simplify->len;
|
|
|
|
|
|
|
|
if (!match)
|
|
|
|
break;
|
|
|
|
if (len > pathlen)
|
|
|
|
len = pathlen;
|
|
|
|
if (!memcmp(path, match, len))
|
|
|
|
return 0;
|
|
|
|
simplify++;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-03-11 15:15:43 +08:00
|
|
|
/*
|
|
|
|
* This function tells us whether an excluded path matches a
|
|
|
|
* list of "interesting" pathspecs. That is, whether a path matched
|
|
|
|
* by any of the pathspecs could possibly be ignored by excluding
|
|
|
|
* the specified path. This can happen if:
|
|
|
|
*
|
|
|
|
* 1. the path is mentioned explicitly in the pathspec
|
|
|
|
*
|
|
|
|
* 2. the path is a directory prefix of some element in the
|
|
|
|
* pathspec
|
|
|
|
*/
|
|
|
|
static int exclude_matches_pathspec(const char *path, int len,
|
|
|
|
const struct path_simplify *simplify)
|
builtin-add: simplify (and increase accuracy of) exclude handling
Previously, the code would always set up the excludes, and then manually
pick through the pathspec we were given, assuming that non-added but
existing paths were just ignored. This was mostly correct, but would
erroneously mark a totally empty directory as 'ignored'.
Instead, we now use the collect_ignored option of dir_struct, which
unambiguously tells us whether a path was ignored. This simplifies the
code, and means empty directories are now just not mentioned at all.
Furthermore, we now conditionally ask dir_struct to respect excludes,
depending on whether the '-f' flag has been set. This means we don't have
to pick through the result, checking for an 'ignored' flag; ignored entries
were either added or not in the first place.
We can safely get rid of the special 'ignored' flags to dir_entry, which
were not used anywhere else.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Jonas Fonseca <fonseca@diku.dk>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-06-13 05:42:14 +08:00
|
|
|
{
|
|
|
|
if (simplify) {
|
|
|
|
for (; simplify->path; simplify++) {
|
|
|
|
if (len == simplify->len
|
|
|
|
&& !memcmp(path, simplify->path, len))
|
|
|
|
return 1;
|
2010-03-11 15:15:43 +08:00
|
|
|
if (len < simplify->len
|
|
|
|
&& simplify->path[len] == '/'
|
|
|
|
&& !memcmp(path, simplify->path, len))
|
|
|
|
return 1;
|
builtin-add: simplify (and increase accuracy of) exclude handling
Previously, the code would always set up the excludes, and then manually
pick through the pathspec we were given, assuming that non-added but
existing paths were just ignored. This was mostly correct, but would
erroneously mark a totally empty directory as 'ignored'.
Instead, we now use the collect_ignored option of dir_struct, which
unambiguously tells us whether a path was ignored. This simplifies the
code, and means empty directories are now just not mentioned at all.
Furthermore, we now conditionally ask dir_struct to respect excludes,
depending on whether the '-f' flag has been set. This means we don't have
to pick through the result, checking for an 'ignored' flag; ignored entries
were either added or not in the first place.
We can safely get rid of the special 'ignored' flags to dir_entry, which
were not used anywhere else.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Jonas Fonseca <fonseca@diku.dk>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-06-13 05:42:14 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-07-10 04:14:28 +08:00
|
|
|
static int get_index_dtype(const char *path, int len)
|
|
|
|
{
|
|
|
|
int pos;
|
Convert "struct cache_entry *" to "const ..." wherever possible
I attempted to make index_state->cache[] a "const struct cache_entry **"
to find out how existing entries in index are modified and where. The
question I have is what do we do if we really need to keep track of on-disk
changes in the index. The result is
- diff-lib.c: setting CE_UPTODATE
- name-hash.c: setting CE_HASHED
- preload-index.c, read-cache.c, unpack-trees.c and
builtin/update-index: obvious
- entry.c: write_entry() may refresh the checked out entry via
fill_stat_cache_info(). This causes "non-const struct cache_entry
*" in builtin/apply.c, builtin/checkout-index.c and
builtin/checkout.c
- builtin/ls-files.c: --with-tree changes stagemask and may set
CE_UPDATE
Of these, write_entry() and its call sites are probably most
interesting because it modifies on-disk info. But this is stat info
and can be retrieved via refresh, at least for porcelain
commands. Other just uses ce_flags for local purposes.
So, keeping track of "dirty" entries is just a matter of setting a
flag in index modification functions exposed by read-cache.c. Except
unpack-trees, the rest of the code base does not do anything funny
behind read-cache's back.
The actual patch is less valueable than the summary above. But if
anyone wants to re-identify the above sites. Applying this patch, then
this:
diff --git a/cache.h b/cache.h
index 430d021..1692891 100644
--- a/cache.h
+++ b/cache.h
@@ -267,7 +267,7 @@ static inline unsigned int canon_mode(unsigned int mode)
#define cache_entry_size(len) (offsetof(struct cache_entry,name) + (len) + 1)
struct index_state {
- struct cache_entry **cache;
+ const struct cache_entry **cache;
unsigned int version;
unsigned int cache_nr, cache_alloc, cache_changed;
struct string_list *resolve_undo;
will help quickly identify them without bogus warnings.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-07-09 23:29:00 +08:00
|
|
|
const struct cache_entry *ce;
|
2009-07-10 04:14:28 +08:00
|
|
|
|
2013-09-17 15:06:15 +08:00
|
|
|
ce = cache_file_exists(path, len, 0);
|
2009-07-10 04:14:28 +08:00
|
|
|
if (ce) {
|
|
|
|
if (!ce_uptodate(ce))
|
|
|
|
return DT_UNKNOWN;
|
|
|
|
if (S_ISGITLINK(ce->ce_mode))
|
|
|
|
return DT_DIR;
|
|
|
|
/*
|
|
|
|
* Nobody actually cares about the
|
|
|
|
* difference between DT_LNK and DT_REG
|
|
|
|
*/
|
|
|
|
return DT_REG;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Try to look it up as a directory */
|
|
|
|
pos = cache_name_pos(path, len);
|
|
|
|
if (pos >= 0)
|
|
|
|
return DT_UNKNOWN;
|
|
|
|
pos = -pos-1;
|
|
|
|
while (pos < active_nr) {
|
|
|
|
ce = active_cache[pos++];
|
|
|
|
if (strncmp(ce->name, path, len))
|
|
|
|
break;
|
|
|
|
if (ce->name[len] > '/')
|
|
|
|
break;
|
|
|
|
if (ce->name[len] < '/')
|
|
|
|
continue;
|
|
|
|
if (!ce_uptodate(ce))
|
|
|
|
break; /* continue? */
|
|
|
|
return DT_DIR;
|
|
|
|
}
|
|
|
|
return DT_UNKNOWN;
|
|
|
|
}
|
|
|
|
|
2009-07-09 10:31:49 +08:00
|
|
|
static int get_dtype(struct dirent *de, const char *path, int len)
|
Fix directory scanner to correctly ignore files without d_type
On Fri, 19 Oct 2007, Todd T. Fries wrote:
> If DT_UNKNOWN exists, then we have to do a stat() of some form to
> find out the right type.
That happened in the case of a pathname that was ignored, and we did
not ask for "dir->show_ignored". That test used to be *together*
with the "DTYPE(de) != DT_DIR", but splitting the two tests up
means that we can do that (common) test before we even bother to
calculate the real dtype.
Of course, that optimization only matters for systems that don't
have, or don't fill in DTYPE properly.
I also clarified the real relationship between "exclude" and
"dir->show_ignored". It used to do
if (exclude != dir->show_ignored) {
..
which wasn't exactly obvious, because it triggers for two different
cases:
- the path is marked excluded, but we are not interested in ignored
files: ignore it
- the path is *not* excluded, but we *are* interested in ignored
files: ignore it unless it's a directory, in which case we might
have ignored files inside the directory and need to recurse
into it).
so this splits them into those two cases, since the first case
doesn't even care about the type.
I also made a the DT_UNKNOWN case a separate helper function,
and added some commentary to the cases.
Linus
Signed-off-by: Shawn O. Pearce <spearce@spearce.org>
2007-10-20 01:59:22 +08:00
|
|
|
{
|
2008-02-01 12:23:25 +08:00
|
|
|
int dtype = de ? DTYPE(de) : DT_UNKNOWN;
|
Fix directory scanner to correctly ignore files without d_type
On Fri, 19 Oct 2007, Todd T. Fries wrote:
> If DT_UNKNOWN exists, then we have to do a stat() of some form to
> find out the right type.
That happened in the case of a pathname that was ignored, and we did
not ask for "dir->show_ignored". That test used to be *together*
with the "DTYPE(de) != DT_DIR", but splitting the two tests up
means that we can do that (common) test before we even bother to
calculate the real dtype.
Of course, that optimization only matters for systems that don't
have, or don't fill in DTYPE properly.
I also clarified the real relationship between "exclude" and
"dir->show_ignored". It used to do
if (exclude != dir->show_ignored) {
..
which wasn't exactly obvious, because it triggers for two different
cases:
- the path is marked excluded, but we are not interested in ignored
files: ignore it
- the path is *not* excluded, but we *are* interested in ignored
files: ignore it unless it's a directory, in which case we might
have ignored files inside the directory and need to recurse
into it).
so this splits them into those two cases, since the first case
doesn't even care about the type.
I also made a the DT_UNKNOWN case a separate helper function,
and added some commentary to the cases.
Linus
Signed-off-by: Shawn O. Pearce <spearce@spearce.org>
2007-10-20 01:59:22 +08:00
|
|
|
struct stat st;
|
|
|
|
|
|
|
|
if (dtype != DT_UNKNOWN)
|
|
|
|
return dtype;
|
2009-07-10 04:14:28 +08:00
|
|
|
dtype = get_index_dtype(path, len);
|
|
|
|
if (dtype != DT_UNKNOWN)
|
|
|
|
return dtype;
|
|
|
|
if (lstat(path, &st))
|
Fix directory scanner to correctly ignore files without d_type
On Fri, 19 Oct 2007, Todd T. Fries wrote:
> If DT_UNKNOWN exists, then we have to do a stat() of some form to
> find out the right type.
That happened in the case of a pathname that was ignored, and we did
not ask for "dir->show_ignored". That test used to be *together*
with the "DTYPE(de) != DT_DIR", but splitting the two tests up
means that we can do that (common) test before we even bother to
calculate the real dtype.
Of course, that optimization only matters for systems that don't
have, or don't fill in DTYPE properly.
I also clarified the real relationship between "exclude" and
"dir->show_ignored". It used to do
if (exclude != dir->show_ignored) {
..
which wasn't exactly obvious, because it triggers for two different
cases:
- the path is marked excluded, but we are not interested in ignored
files: ignore it
- the path is *not* excluded, but we *are* interested in ignored
files: ignore it unless it's a directory, in which case we might
have ignored files inside the directory and need to recurse
into it).
so this splits them into those two cases, since the first case
doesn't even care about the type.
I also made a the DT_UNKNOWN case a separate helper function,
and added some commentary to the cases.
Linus
Signed-off-by: Shawn O. Pearce <spearce@spearce.org>
2007-10-20 01:59:22 +08:00
|
|
|
return dtype;
|
|
|
|
if (S_ISREG(st.st_mode))
|
|
|
|
return DT_REG;
|
|
|
|
if (S_ISDIR(st.st_mode))
|
|
|
|
return DT_DIR;
|
|
|
|
if (S_ISLNK(st.st_mode))
|
|
|
|
return DT_LNK;
|
|
|
|
return dtype;
|
|
|
|
}
|
|
|
|
|
2010-01-09 12:56:16 +08:00
|
|
|
static enum path_treatment treat_one_path(struct dir_struct *dir,
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
struct untracked_cache_dir *untracked,
|
2012-05-01 19:25:24 +08:00
|
|
|
struct strbuf *path,
|
2015-08-19 21:01:25 +08:00
|
|
|
int baselen,
|
2010-01-09 12:56:16 +08:00
|
|
|
const struct path_simplify *simplify,
|
|
|
|
int dtype, struct dirent *de)
|
2010-01-09 11:14:07 +08:00
|
|
|
{
|
2013-04-16 03:13:35 +08:00
|
|
|
int exclude;
|
2013-09-17 15:06:15 +08:00
|
|
|
int has_path_in_index = !!cache_file_exists(path->buf, path->len, ignore_case);
|
ls-files -k: a directory only can be killed if the index has a non-directory
"ls-files -o" and "ls-files -k" both traverse the working tree down
to find either all untracked paths or those that will be "killed"
(removed from the working tree to make room) when the paths recorded
in the index are checked out. It is necessary to traverse the
working tree fully when enumerating all the "other" paths, but when
we are only interested in "killed" paths, we can take advantage of
the fact that paths that do not overlap with entries in the index
can never be killed.
The treat_one_path() helper function, which is called during the
recursive traversal, is the ideal place to implement an
optimization.
When we are looking at a directory P in the working tree, there are
three cases:
(1) P exists in the index. Everything inside the directory P in
the working tree needs to go when P is checked out from the
index.
(2) P does not exist in the index, but there is P/Q in the index.
We know P will stay a directory when we check out the contents
of the index, but we do not know yet if there is a directory
P/Q in the working tree to be killed, so we need to recurse.
(3) P does not exist in the index, and there is no P/Q in the index
to require P to be a directory, either. Only in this case, we
know that everything inside P will not be killed without
recursing.
Note that this helper is called by treat_leading_path() that decides
if we need to traverse only subdirectories of a single common
leading directory, which is essential for this optimization to be
correct. This caller checks each level of the leading path
component from shallower directory to deeper ones, and that is what
allows us to only check if the path appears in the index. If the
call to treat_one_path() weren't there, given a path P/Q/R, the real
traversal may start from directory P/Q/R, even when the index
records P as a regular file, and we would end up having to check if
any leading subpath in P/Q/R, e.g. P, appears in the index.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-08-16 03:13:46 +08:00
|
|
|
|
2013-04-16 03:13:35 +08:00
|
|
|
if (dtype == DT_UNKNOWN)
|
|
|
|
dtype = get_dtype(de, path->buf, path->len);
|
|
|
|
|
|
|
|
/* Always exclude indexed files */
|
ls-files -k: a directory only can be killed if the index has a non-directory
"ls-files -o" and "ls-files -k" both traverse the working tree down
to find either all untracked paths or those that will be "killed"
(removed from the working tree to make room) when the paths recorded
in the index are checked out. It is necessary to traverse the
working tree fully when enumerating all the "other" paths, but when
we are only interested in "killed" paths, we can take advantage of
the fact that paths that do not overlap with entries in the index
can never be killed.
The treat_one_path() helper function, which is called during the
recursive traversal, is the ideal place to implement an
optimization.
When we are looking at a directory P in the working tree, there are
three cases:
(1) P exists in the index. Everything inside the directory P in
the working tree needs to go when P is checked out from the
index.
(2) P does not exist in the index, but there is P/Q in the index.
We know P will stay a directory when we check out the contents
of the index, but we do not know yet if there is a directory
P/Q in the working tree to be killed, so we need to recurse.
(3) P does not exist in the index, and there is no P/Q in the index
to require P to be a directory, either. Only in this case, we
know that everything inside P will not be killed without
recursing.
Note that this helper is called by treat_leading_path() that decides
if we need to traverse only subdirectories of a single common
leading directory, which is essential for this optimization to be
correct. This caller checks each level of the leading path
component from shallower directory to deeper ones, and that is what
allows us to only check if the path appears in the index. If the
call to treat_one_path() weren't there, given a path P/Q/R, the real
traversal may start from directory P/Q/R, even when the index
records P as a regular file, and we would end up having to check if
any leading subpath in P/Q/R, e.g. P, appears in the index.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-08-16 03:13:46 +08:00
|
|
|
if (dtype != DT_DIR && has_path_in_index)
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
return path_none;
|
2013-04-16 03:13:35 +08:00
|
|
|
|
ls-files -k: a directory only can be killed if the index has a non-directory
"ls-files -o" and "ls-files -k" both traverse the working tree down
to find either all untracked paths or those that will be "killed"
(removed from the working tree to make room) when the paths recorded
in the index are checked out. It is necessary to traverse the
working tree fully when enumerating all the "other" paths, but when
we are only interested in "killed" paths, we can take advantage of
the fact that paths that do not overlap with entries in the index
can never be killed.
The treat_one_path() helper function, which is called during the
recursive traversal, is the ideal place to implement an
optimization.
When we are looking at a directory P in the working tree, there are
three cases:
(1) P exists in the index. Everything inside the directory P in
the working tree needs to go when P is checked out from the
index.
(2) P does not exist in the index, but there is P/Q in the index.
We know P will stay a directory when we check out the contents
of the index, but we do not know yet if there is a directory
P/Q in the working tree to be killed, so we need to recurse.
(3) P does not exist in the index, and there is no P/Q in the index
to require P to be a directory, either. Only in this case, we
know that everything inside P will not be killed without
recursing.
Note that this helper is called by treat_leading_path() that decides
if we need to traverse only subdirectories of a single common
leading directory, which is essential for this optimization to be
correct. This caller checks each level of the leading path
component from shallower directory to deeper ones, and that is what
allows us to only check if the path appears in the index. If the
call to treat_one_path() weren't there, given a path P/Q/R, the real
traversal may start from directory P/Q/R, even when the index
records P as a regular file, and we would end up having to check if
any leading subpath in P/Q/R, e.g. P, appears in the index.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-08-16 03:13:46 +08:00
|
|
|
/*
|
|
|
|
* When we are looking at a directory P in the working tree,
|
|
|
|
* there are three cases:
|
|
|
|
*
|
|
|
|
* (1) P exists in the index. Everything inside the directory P in
|
|
|
|
* the working tree needs to go when P is checked out from the
|
|
|
|
* index.
|
|
|
|
*
|
|
|
|
* (2) P does not exist in the index, but there is P/Q in the index.
|
|
|
|
* We know P will stay a directory when we check out the contents
|
|
|
|
* of the index, but we do not know yet if there is a directory
|
|
|
|
* P/Q in the working tree to be killed, so we need to recurse.
|
|
|
|
*
|
|
|
|
* (3) P does not exist in the index, and there is no P/Q in the index
|
|
|
|
* to require P to be a directory, either. Only in this case, we
|
|
|
|
* know that everything inside P will not be killed without
|
|
|
|
* recursing.
|
|
|
|
*/
|
|
|
|
if ((dir->flags & DIR_COLLECT_KILLED_ONLY) &&
|
|
|
|
(dtype == DT_DIR) &&
|
2013-09-17 15:06:17 +08:00
|
|
|
!has_path_in_index &&
|
|
|
|
(directory_exists_in_index(path->buf, path->len) == index_nonexistent))
|
|
|
|
return path_none;
|
2013-04-16 03:13:35 +08:00
|
|
|
|
|
|
|
exclude = is_excluded(dir, path->buf, &dtype);
|
2010-01-09 11:14:07 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Excluded? If we don't explicitly want to show
|
|
|
|
* ignored files, ignore it
|
|
|
|
*/
|
2013-04-16 03:15:03 +08:00
|
|
|
if (exclude && !(dir->flags & (DIR_SHOW_IGNORED|DIR_SHOW_IGNORED_TOO)))
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
return path_excluded;
|
2010-01-09 11:14:07 +08:00
|
|
|
|
|
|
|
switch (dtype) {
|
|
|
|
default:
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
return path_none;
|
2010-01-09 11:14:07 +08:00
|
|
|
case DT_DIR:
|
2012-05-01 19:25:24 +08:00
|
|
|
strbuf_addch(path, '/');
|
2015-08-19 21:01:25 +08:00
|
|
|
return treat_directory(dir, untracked, path->buf, path->len,
|
|
|
|
baselen, exclude, simplify);
|
2010-01-09 11:14:07 +08:00
|
|
|
case DT_REG:
|
|
|
|
case DT_LNK:
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
return exclude ? path_excluded : path_untracked;
|
2010-01-09 11:14:07 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:29 +08:00
|
|
|
static enum path_treatment treat_path_fast(struct dir_struct *dir,
|
|
|
|
struct untracked_cache_dir *untracked,
|
|
|
|
struct cached_dir *cdir,
|
|
|
|
struct strbuf *path,
|
|
|
|
int baselen,
|
|
|
|
const struct path_simplify *simplify)
|
|
|
|
{
|
|
|
|
strbuf_setlen(path, baselen);
|
|
|
|
if (!cdir->ucd) {
|
|
|
|
strbuf_addstr(path, cdir->file);
|
|
|
|
return path_untracked;
|
|
|
|
}
|
|
|
|
strbuf_addstr(path, cdir->ucd->name);
|
|
|
|
/* treat_one_path() does this before it calls treat_directory() */
|
use strbuf_complete to conditionally append slash
When working with paths in strbufs, we frequently want to
ensure that a directory contains a trailing slash before
appending to it. We can shorten this code (and make the
intent more obvious) by calling strbuf_complete.
Most of these cases are trivially identical conversions, but
there are two things to note:
- in a few cases we did not check that the strbuf is
non-empty (which would lead to an out-of-bounds memory
access). These were generally not triggerable in
practice, either from earlier assertions, or typically
because we would have just fed the strbuf to opendir(),
which would choke on an empty path.
- in a few cases we indexed the buffer with "original_len"
or similar, rather than the current sb->len, and it is
not immediately obvious from the diff that they are the
same. In all of these cases, I manually verified that
the strbuf does not change between the assignment and
the strbuf_complete call.
This does not convert cases which look like:
if (sb->len && !is_dir_sep(sb->buf[sb->len - 1]))
strbuf_addch(sb, '/');
as those are obviously semantically different. Some of these
cases arguably should be doing that, but that is out of
scope for this change, which aims purely for cleanup with no
behavior change (and at least it will make such sites easier
to find and examine in the future, as we can grep for
strbuf_complete).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-09-25 05:08:35 +08:00
|
|
|
strbuf_complete(path, '/');
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:29 +08:00
|
|
|
if (cdir->ucd->check_only)
|
|
|
|
/*
|
|
|
|
* check_only is set as a result of treat_directory() getting
|
|
|
|
* to its bottom. Verify again the same set of directories
|
|
|
|
* with check_only set.
|
|
|
|
*/
|
|
|
|
return read_directory_recursive(dir, path->buf, path->len,
|
|
|
|
cdir->ucd, 1, simplify);
|
|
|
|
/*
|
|
|
|
* We get path_recurse in the first run when
|
|
|
|
* directory_exists_in_index() returns index_nonexistent. We
|
|
|
|
* are sure that new changes in the index does not impact the
|
|
|
|
* outcome. Return now.
|
|
|
|
*/
|
|
|
|
return path_recurse;
|
|
|
|
}
|
|
|
|
|
2010-01-09 12:56:16 +08:00
|
|
|
static enum path_treatment treat_path(struct dir_struct *dir,
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
struct untracked_cache_dir *untracked,
|
2015-03-08 18:12:28 +08:00
|
|
|
struct cached_dir *cdir,
|
2012-05-01 19:25:24 +08:00
|
|
|
struct strbuf *path,
|
2010-01-09 12:56:16 +08:00
|
|
|
int baselen,
|
2012-05-01 19:25:24 +08:00
|
|
|
const struct path_simplify *simplify)
|
2010-01-09 12:56:16 +08:00
|
|
|
{
|
|
|
|
int dtype;
|
2015-03-08 18:12:28 +08:00
|
|
|
struct dirent *de = cdir->de;
|
2010-01-09 12:56:16 +08:00
|
|
|
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:29 +08:00
|
|
|
if (!de)
|
|
|
|
return treat_path_fast(dir, untracked, cdir, path,
|
|
|
|
baselen, simplify);
|
2010-01-09 12:56:16 +08:00
|
|
|
if (is_dot_or_dotdot(de->d_name) || !strcmp(de->d_name, ".git"))
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
return path_none;
|
2012-05-01 19:25:24 +08:00
|
|
|
strbuf_setlen(path, baselen);
|
|
|
|
strbuf_addstr(path, de->d_name);
|
|
|
|
if (simplify_away(path->buf, path->len, simplify))
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
return path_none;
|
2010-01-09 12:56:16 +08:00
|
|
|
|
|
|
|
dtype = DTYPE(de);
|
2015-08-19 21:01:25 +08:00
|
|
|
return treat_one_path(dir, untracked, path, baselen, simplify, dtype, de);
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void add_untracked(struct untracked_cache_dir *dir, const char *name)
|
|
|
|
{
|
|
|
|
if (!dir)
|
|
|
|
return;
|
|
|
|
ALLOC_GROW(dir->untracked, dir->untracked_nr + 1,
|
|
|
|
dir->untracked_alloc);
|
|
|
|
dir->untracked[dir->untracked_nr++] = xstrdup(name);
|
2010-01-09 12:56:16 +08:00
|
|
|
}
|
|
|
|
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:29 +08:00
|
|
|
static int valid_cached_dir(struct dir_struct *dir,
|
|
|
|
struct untracked_cache_dir *untracked,
|
|
|
|
struct strbuf *path,
|
|
|
|
int check_only)
|
|
|
|
{
|
|
|
|
struct stat st;
|
|
|
|
|
|
|
|
if (!untracked)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (stat(path->len ? path->buf : ".", &st)) {
|
|
|
|
invalidate_directory(dir->untracked, untracked);
|
|
|
|
memset(&untracked->stat_data, 0, sizeof(untracked->stat_data));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (!untracked->valid ||
|
2015-03-08 18:12:37 +08:00
|
|
|
match_stat_data_racy(&the_index, &untracked->stat_data, &st)) {
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:29 +08:00
|
|
|
if (untracked->valid)
|
|
|
|
invalidate_directory(dir->untracked, untracked);
|
|
|
|
fill_stat_data(&untracked->stat_data, &st);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (untracked->check_only != !!check_only) {
|
|
|
|
invalidate_directory(dir->untracked, untracked);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* prep_exclude will be called eventually on this directory,
|
|
|
|
* but it's called much later in last_exclude_matching(). We
|
|
|
|
* need it now to determine the validity of the cache for this
|
|
|
|
* path. The next calls will be nearly no-op, the way
|
|
|
|
* prep_exclude() is designed.
|
|
|
|
*/
|
|
|
|
if (path->len && path->buf[path->len - 1] != '/') {
|
|
|
|
strbuf_addch(path, '/');
|
|
|
|
prep_exclude(dir, path->buf, path->len);
|
|
|
|
strbuf_setlen(path, path->len - 1);
|
|
|
|
} else
|
|
|
|
prep_exclude(dir, path->buf, path->len);
|
|
|
|
|
|
|
|
/* hopefully prep_exclude() haven't invalidated this entry... */
|
|
|
|
return untracked->valid;
|
|
|
|
}
|
|
|
|
|
2015-03-08 18:12:28 +08:00
|
|
|
static int open_cached_dir(struct cached_dir *cdir,
|
|
|
|
struct dir_struct *dir,
|
|
|
|
struct untracked_cache_dir *untracked,
|
|
|
|
struct strbuf *path,
|
|
|
|
int check_only)
|
|
|
|
{
|
|
|
|
memset(cdir, 0, sizeof(*cdir));
|
|
|
|
cdir->untracked = untracked;
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:29 +08:00
|
|
|
if (valid_cached_dir(dir, untracked, path, check_only))
|
|
|
|
return 0;
|
2015-03-08 18:12:28 +08:00
|
|
|
cdir->fdir = opendir(path->len ? path->buf : ".");
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:29 +08:00
|
|
|
if (dir->untracked)
|
|
|
|
dir->untracked->dir_opened++;
|
2015-03-08 18:12:28 +08:00
|
|
|
if (!cdir->fdir)
|
|
|
|
return -1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int read_cached_dir(struct cached_dir *cdir)
|
|
|
|
{
|
|
|
|
if (cdir->fdir) {
|
|
|
|
cdir->de = readdir(cdir->fdir);
|
|
|
|
if (!cdir->de)
|
|
|
|
return -1;
|
|
|
|
return 0;
|
|
|
|
}
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:29 +08:00
|
|
|
while (cdir->nr_dirs < cdir->untracked->dirs_nr) {
|
|
|
|
struct untracked_cache_dir *d = cdir->untracked->dirs[cdir->nr_dirs];
|
2015-03-08 18:12:30 +08:00
|
|
|
if (!d->recurse) {
|
|
|
|
cdir->nr_dirs++;
|
|
|
|
continue;
|
|
|
|
}
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:29 +08:00
|
|
|
cdir->ucd = d;
|
|
|
|
cdir->nr_dirs++;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
cdir->ucd = NULL;
|
|
|
|
if (cdir->nr_files < cdir->untracked->untracked_nr) {
|
|
|
|
struct untracked_cache_dir *d = cdir->untracked;
|
|
|
|
cdir->file = d->untracked[cdir->nr_files++];
|
|
|
|
return 0;
|
|
|
|
}
|
2015-03-08 18:12:28 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void close_cached_dir(struct cached_dir *cdir)
|
|
|
|
{
|
|
|
|
if (cdir->fdir)
|
|
|
|
closedir(cdir->fdir);
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:29 +08:00
|
|
|
/*
|
|
|
|
* We have gone through this directory and found no untracked
|
|
|
|
* entries. Mark it valid.
|
|
|
|
*/
|
2015-03-08 18:12:30 +08:00
|
|
|
if (cdir->untracked) {
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:29 +08:00
|
|
|
cdir->untracked->valid = 1;
|
2015-03-08 18:12:30 +08:00
|
|
|
cdir->untracked->recurse = 1;
|
|
|
|
}
|
2010-01-09 12:56:16 +08:00
|
|
|
}
|
|
|
|
|
2006-05-17 10:02:14 +08:00
|
|
|
/*
|
|
|
|
* Read a directory tree. We currently ignore anything but
|
|
|
|
* directories, regular files and symlinks. That's because git
|
|
|
|
* doesn't handle them at all yet. Maybe that will change some
|
|
|
|
* day.
|
|
|
|
*
|
|
|
|
* Also, we ignore the name ".git" (even if it is not a directory).
|
|
|
|
* That likely will not change.
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
*
|
|
|
|
* Returns the most significant path_treatment value encountered in the scan.
|
2006-05-17 10:02:14 +08:00
|
|
|
*/
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
static enum path_treatment read_directory_recursive(struct dir_struct *dir,
|
2010-01-09 11:14:07 +08:00
|
|
|
const char *base, int baselen,
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
struct untracked_cache_dir *untracked, int check_only,
|
2010-01-09 11:14:07 +08:00
|
|
|
const struct path_simplify *simplify)
|
2006-05-17 10:02:14 +08:00
|
|
|
{
|
2015-03-08 18:12:28 +08:00
|
|
|
struct cached_dir cdir;
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
enum path_treatment state, subdir_state, dir_state = path_none;
|
2012-05-09 00:43:40 +08:00
|
|
|
struct strbuf path = STRBUF_INIT;
|
2006-05-17 10:02:14 +08:00
|
|
|
|
2012-05-09 00:43:40 +08:00
|
|
|
strbuf_add(&path, base, baselen);
|
2011-10-24 14:36:11 +08:00
|
|
|
|
2015-03-08 18:12:28 +08:00
|
|
|
if (open_cached_dir(&cdir, dir, untracked, &path, check_only))
|
2012-05-11 22:53:07 +08:00
|
|
|
goto out;
|
|
|
|
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
if (untracked)
|
|
|
|
untracked->check_only = !!check_only;
|
|
|
|
|
2015-03-08 18:12:28 +08:00
|
|
|
while (!read_cached_dir(&cdir)) {
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
/* check how the file or directory should be treated */
|
2015-03-08 18:12:28 +08:00
|
|
|
state = treat_path(dir, untracked, &cdir, &path, baselen, simplify);
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
if (state > dir_state)
|
|
|
|
dir_state = state;
|
|
|
|
|
|
|
|
/* recurse into subdir if instructed by treat_path */
|
|
|
|
if (state == path_recurse) {
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
struct untracked_cache_dir *ud;
|
|
|
|
ud = lookup_untracked(dir->untracked, untracked,
|
|
|
|
path.buf + baselen,
|
|
|
|
path.len - baselen);
|
|
|
|
subdir_state =
|
|
|
|
read_directory_recursive(dir, path.buf, path.len,
|
|
|
|
ud, check_only, simplify);
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
if (subdir_state > dir_state)
|
|
|
|
dir_state = subdir_state;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (check_only) {
|
|
|
|
/* abort early if maximum state has been reached */
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
if (dir_state == path_untracked) {
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:29 +08:00
|
|
|
if (cdir.fdir)
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
add_untracked(untracked, path.buf + baselen);
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
break;
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
}
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
/* skip the dir_add_* part */
|
2011-10-24 14:36:11 +08:00
|
|
|
continue;
|
2006-05-17 10:02:14 +08:00
|
|
|
}
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
|
|
|
|
/* add the path to the appropriate result list */
|
|
|
|
switch (state) {
|
|
|
|
case path_excluded:
|
|
|
|
if (dir->flags & DIR_SHOW_IGNORED)
|
|
|
|
dir_add_name(dir, path.buf, path.len);
|
2013-04-16 03:15:03 +08:00
|
|
|
else if ((dir->flags & DIR_SHOW_IGNORED_TOO) ||
|
|
|
|
((dir->flags & DIR_COLLECT_IGNORED) &&
|
|
|
|
exclude_matches_pathspec(path.buf, path.len,
|
|
|
|
simplify)))
|
|
|
|
dir_add_ignored(dir, path.buf, path.len);
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case path_untracked:
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
if (dir->flags & DIR_SHOW_IGNORED)
|
|
|
|
break;
|
|
|
|
dir_add_name(dir, path.buf, path.len);
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:29 +08:00
|
|
|
if (cdir.fdir)
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
add_untracked(untracked, path.buf + baselen);
|
2012-05-11 22:53:07 +08:00
|
|
|
break;
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2006-05-17 10:02:14 +08:00
|
|
|
}
|
2015-03-08 18:12:28 +08:00
|
|
|
close_cached_dir(&cdir);
|
2012-05-11 22:53:07 +08:00
|
|
|
out:
|
2012-05-09 00:43:40 +08:00
|
|
|
strbuf_release(&path);
|
2006-05-17 10:02:14 +08:00
|
|
|
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
return dir_state;
|
2006-05-17 10:02:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int cmp_name(const void *p1, const void *p2)
|
|
|
|
{
|
|
|
|
const struct dir_entry *e1 = *(const struct dir_entry **)p1;
|
|
|
|
const struct dir_entry *e2 = *(const struct dir_entry **)p2;
|
|
|
|
|
2014-06-20 10:06:44 +08:00
|
|
|
return name_compare(e1->name, e1->len, e2->name, e2->len);
|
2006-05-17 10:02:14 +08:00
|
|
|
}
|
|
|
|
|
Optimize directory listing with pathspec limiter.
The way things are set up, you can now pass a "pathspec" to the
"read_directory()" function. If you pass NULL, it acts exactly
like it used to do (read everything). If you pass a non-NULL
pointer, it will simplify it into a "these are the prefixes
without any special characters", and stop any readdir() early if
the path in question doesn't match any of the prefixes.
NOTE! This does *not* obviate the need for the caller to do the *exact*
pathspec match later. It's a first-level filter on "read_directory()", but
it does not do the full pathspec thing. Maybe it should. But in the
meantime, builtin-add.c really does need to do first
read_directory(dir, .., pathspec);
if (pathspec)
prune_directory(dir, pathspec, baselen);
ie the "prune_directory()" part will do the *exact* pathspec pruning,
while the "read_directory()" will use the pathspec just to do some quick
high-level pruning of the directories it will recurse into.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-03-31 11:39:30 +08:00
|
|
|
static struct path_simplify *create_simplify(const char **pathspec)
|
|
|
|
{
|
|
|
|
int nr, alloc = 0;
|
|
|
|
struct path_simplify *simplify = NULL;
|
|
|
|
|
|
|
|
if (!pathspec)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
for (nr = 0 ; ; nr++) {
|
|
|
|
const char *match;
|
2014-03-04 06:31:58 +08:00
|
|
|
ALLOC_GROW(simplify, nr + 1, alloc);
|
Optimize directory listing with pathspec limiter.
The way things are set up, you can now pass a "pathspec" to the
"read_directory()" function. If you pass NULL, it acts exactly
like it used to do (read everything). If you pass a non-NULL
pointer, it will simplify it into a "these are the prefixes
without any special characters", and stop any readdir() early if
the path in question doesn't match any of the prefixes.
NOTE! This does *not* obviate the need for the caller to do the *exact*
pathspec match later. It's a first-level filter on "read_directory()", but
it does not do the full pathspec thing. Maybe it should. But in the
meantime, builtin-add.c really does need to do first
read_directory(dir, .., pathspec);
if (pathspec)
prune_directory(dir, pathspec, baselen);
ie the "prune_directory()" part will do the *exact* pathspec pruning,
while the "read_directory()" will use the pathspec just to do some quick
high-level pruning of the directories it will recurse into.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-03-31 11:39:30 +08:00
|
|
|
match = *pathspec++;
|
|
|
|
if (!match)
|
|
|
|
break;
|
|
|
|
simplify[nr].path = match;
|
|
|
|
simplify[nr].len = simple_length(match);
|
|
|
|
}
|
|
|
|
simplify[nr].path = NULL;
|
|
|
|
simplify[nr].len = 0;
|
|
|
|
return simplify;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void free_simplify(struct path_simplify *simplify)
|
|
|
|
{
|
Avoid unnecessary "if-before-free" tests.
This change removes all obvious useless if-before-free tests.
E.g., it replaces code like this:
if (some_expression)
free (some_expression);
with the now-equivalent:
free (some_expression);
It is equivalent not just because POSIX has required free(NULL)
to work for a long time, but simply because it has worked for
so long that no reasonable porting target fails the test.
Here's some evidence from nearly 1.5 years ago:
http://www.winehq.org/pipermail/wine-patches/2006-October/031544.html
FYI, the change below was prepared by running the following:
git ls-files -z | xargs -0 \
perl -0x3b -pi -e \
's/\bif\s*\(\s*(\S+?)(?:\s*!=\s*NULL)?\s*\)\s+(free\s*\(\s*\1\s*\))/$2/s'
Note however, that it doesn't handle brace-enclosed blocks like
"if (x) { free (x); }". But that's ok, since there were none like
that in git sources.
Beware: if you do use the above snippet, note that it can
produce syntactically invalid C code. That happens when the
affected "if"-statement has a matching "else".
E.g., it would transform this
if (x)
free (x);
else
foo ();
into this:
free (x);
else
foo ();
There were none of those here, either.
If you're interested in automating detection of the useless
tests, you might like the useless-if-before-free script in gnulib:
[it *does* detect brace-enclosed free statements, and has a --name=S
option to make it detect free-like functions with different names]
http://git.sv.gnu.org/gitweb/?p=gnulib.git;a=blob;f=build-aux/useless-if-before-free
Addendum:
Remove one more (in imap-send.c), spotted by Jean-Luc Herren <jlh@gmx.ch>.
Signed-off-by: Jim Meyering <meyering@redhat.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2008-02-01 01:26:32 +08:00
|
|
|
free(simplify);
|
Optimize directory listing with pathspec limiter.
The way things are set up, you can now pass a "pathspec" to the
"read_directory()" function. If you pass NULL, it acts exactly
like it used to do (read everything). If you pass a non-NULL
pointer, it will simplify it into a "these are the prefixes
without any special characters", and stop any readdir() early if
the path in question doesn't match any of the prefixes.
NOTE! This does *not* obviate the need for the caller to do the *exact*
pathspec match later. It's a first-level filter on "read_directory()", but
it does not do the full pathspec thing. Maybe it should. But in the
meantime, builtin-add.c really does need to do first
read_directory(dir, .., pathspec);
if (pathspec)
prune_directory(dir, pathspec, baselen);
ie the "prune_directory()" part will do the *exact* pathspec pruning,
while the "read_directory()" will use the pathspec just to do some quick
high-level pruning of the directories it will recurse into.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-03-31 11:39:30 +08:00
|
|
|
}
|
|
|
|
|
2010-01-09 15:05:41 +08:00
|
|
|
static int treat_leading_path(struct dir_struct *dir,
|
|
|
|
const char *path, int len,
|
|
|
|
const struct path_simplify *simplify)
|
|
|
|
{
|
2012-05-01 19:25:24 +08:00
|
|
|
struct strbuf sb = STRBUF_INIT;
|
|
|
|
int baselen, rc = 0;
|
2010-01-09 15:05:41 +08:00
|
|
|
const char *cp;
|
2013-04-16 03:09:25 +08:00
|
|
|
int old_flags = dir->flags;
|
2010-01-09 15:05:41 +08:00
|
|
|
|
|
|
|
while (len && path[len - 1] == '/')
|
|
|
|
len--;
|
|
|
|
if (!len)
|
|
|
|
return 1;
|
|
|
|
baselen = 0;
|
2013-04-16 03:09:25 +08:00
|
|
|
dir->flags &= ~DIR_SHOW_OTHER_DIRECTORIES;
|
2010-01-09 15:05:41 +08:00
|
|
|
while (1) {
|
|
|
|
cp = path + baselen + !!baselen;
|
|
|
|
cp = memchr(cp, '/', path + len - cp);
|
|
|
|
if (!cp)
|
|
|
|
baselen = len;
|
|
|
|
else
|
|
|
|
baselen = cp - path;
|
2012-05-01 19:25:24 +08:00
|
|
|
strbuf_setlen(&sb, 0);
|
|
|
|
strbuf_add(&sb, path, baselen);
|
|
|
|
if (!is_directory(sb.buf))
|
|
|
|
break;
|
|
|
|
if (simplify_away(sb.buf, sb.len, simplify))
|
|
|
|
break;
|
2015-08-19 21:01:25 +08:00
|
|
|
if (treat_one_path(dir, NULL, &sb, baselen, simplify,
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 03:14:22 +08:00
|
|
|
DT_DIR, NULL) == path_none)
|
2012-05-01 19:25:24 +08:00
|
|
|
break; /* do not recurse into it */
|
|
|
|
if (len <= baselen) {
|
|
|
|
rc = 1;
|
|
|
|
break; /* finished checking */
|
|
|
|
}
|
2010-01-09 15:05:41 +08:00
|
|
|
}
|
2012-05-01 19:25:24 +08:00
|
|
|
strbuf_release(&sb);
|
2013-04-16 03:09:25 +08:00
|
|
|
dir->flags = old_flags;
|
2012-05-01 19:25:24 +08:00
|
|
|
return rc;
|
2010-01-09 15:05:41 +08:00
|
|
|
}
|
|
|
|
|
2015-03-08 18:12:46 +08:00
|
|
|
static const char *get_ident_string(void)
|
|
|
|
{
|
|
|
|
static struct strbuf sb = STRBUF_INIT;
|
|
|
|
struct utsname uts;
|
|
|
|
|
|
|
|
if (sb.len)
|
|
|
|
return sb.buf;
|
2015-07-18 01:09:41 +08:00
|
|
|
if (uname(&uts) < 0)
|
2015-03-08 18:12:46 +08:00
|
|
|
die_errno(_("failed to get kernel name and information"));
|
2016-01-24 23:28:21 +08:00
|
|
|
strbuf_addf(&sb, "Location %s, system %s", get_git_work_tree(),
|
|
|
|
uts.sysname);
|
2015-03-08 18:12:46 +08:00
|
|
|
return sb.buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ident_in_untracked(const struct untracked_cache *uc)
|
|
|
|
{
|
2016-01-24 23:28:21 +08:00
|
|
|
/*
|
|
|
|
* Previous git versions may have saved many NUL separated
|
|
|
|
* strings in the "ident" field, but it is insane to manage
|
|
|
|
* many locations, so just take care of the first one.
|
|
|
|
*/
|
2015-03-08 18:12:46 +08:00
|
|
|
|
2016-01-24 23:28:21 +08:00
|
|
|
return !strcmp(uc->ident.buf, get_ident_string());
|
2015-03-08 18:12:46 +08:00
|
|
|
}
|
|
|
|
|
2016-01-24 23:28:21 +08:00
|
|
|
static void set_untracked_ident(struct untracked_cache *uc)
|
2015-03-08 18:12:46 +08:00
|
|
|
{
|
2016-01-24 23:28:21 +08:00
|
|
|
strbuf_reset(&uc->ident);
|
2015-03-08 18:12:46 +08:00
|
|
|
strbuf_addstr(&uc->ident, get_ident_string());
|
2016-01-24 23:28:21 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This strbuf used to contain a list of NUL separated
|
|
|
|
* strings, so save NUL too for backward compatibility.
|
|
|
|
*/
|
2015-03-08 18:12:46 +08:00
|
|
|
strbuf_addch(&uc->ident, 0);
|
|
|
|
}
|
|
|
|
|
2016-01-24 23:28:19 +08:00
|
|
|
static void new_untracked_cache(struct index_state *istate)
|
|
|
|
{
|
|
|
|
struct untracked_cache *uc = xcalloc(1, sizeof(*uc));
|
|
|
|
strbuf_init(&uc->ident, 100);
|
|
|
|
uc->exclude_per_dir = ".gitignore";
|
|
|
|
/* should be the same flags used by git-status */
|
|
|
|
uc->dir_flags = DIR_SHOW_OTHER_DIRECTORIES | DIR_HIDE_EMPTY_DIRECTORIES;
|
2016-01-24 23:28:21 +08:00
|
|
|
set_untracked_ident(uc);
|
2016-01-24 23:28:19 +08:00
|
|
|
istate->untracked = uc;
|
2016-01-24 23:28:21 +08:00
|
|
|
istate->cache_changed |= UNTRACKED_CHANGED;
|
2016-01-24 23:28:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void add_untracked_cache(struct index_state *istate)
|
|
|
|
{
|
|
|
|
if (!istate->untracked) {
|
|
|
|
new_untracked_cache(istate);
|
2016-01-24 23:28:21 +08:00
|
|
|
} else {
|
|
|
|
if (!ident_in_untracked(istate->untracked)) {
|
|
|
|
free_untracked_cache(istate->untracked);
|
|
|
|
new_untracked_cache(istate);
|
|
|
|
}
|
|
|
|
}
|
2016-01-24 23:28:19 +08:00
|
|
|
}
|
|
|
|
|
2016-01-24 23:28:20 +08:00
|
|
|
void remove_untracked_cache(struct index_state *istate)
|
|
|
|
{
|
|
|
|
if (istate->untracked) {
|
|
|
|
free_untracked_cache(istate->untracked);
|
|
|
|
istate->untracked = NULL;
|
|
|
|
istate->cache_changed |= UNTRACKED_CHANGED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-08 18:12:26 +08:00
|
|
|
static struct untracked_cache_dir *validate_untracked_cache(struct dir_struct *dir,
|
|
|
|
int base_len,
|
|
|
|
const struct pathspec *pathspec)
|
|
|
|
{
|
|
|
|
struct untracked_cache_dir *root;
|
|
|
|
|
2015-03-08 18:12:40 +08:00
|
|
|
if (!dir->untracked || getenv("GIT_DISABLE_UNTRACKED_CACHE"))
|
2015-03-08 18:12:26 +08:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We only support $GIT_DIR/info/exclude and core.excludesfile
|
|
|
|
* as the global ignore rule files. Any other additions
|
|
|
|
* (e.g. from command line) invalidate the cache. This
|
|
|
|
* condition also catches running setup_standard_excludes()
|
|
|
|
* before setting dir->untracked!
|
|
|
|
*/
|
|
|
|
if (dir->unmanaged_exclude_files)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Optimize for the main use case only: whole-tree git
|
|
|
|
* status. More work involved in treat_leading_path() if we
|
|
|
|
* use cache on just a subset of the worktree. pathspec
|
|
|
|
* support could make the matter even worse.
|
|
|
|
*/
|
|
|
|
if (base_len || (pathspec && pathspec->nr))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Different set of flags may produce different results */
|
|
|
|
if (dir->flags != dir->untracked->dir_flags ||
|
|
|
|
/*
|
|
|
|
* See treat_directory(), case index_nonexistent. Without
|
|
|
|
* this flag, we may need to also cache .git file content
|
|
|
|
* for the resolve_gitlink_ref() call, which we don't.
|
|
|
|
*/
|
|
|
|
!(dir->flags & DIR_SHOW_OTHER_DIRECTORIES) ||
|
|
|
|
/* We don't support collecting ignore files */
|
|
|
|
(dir->flags & (DIR_SHOW_IGNORED | DIR_SHOW_IGNORED_TOO |
|
|
|
|
DIR_COLLECT_IGNORED)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we use .gitignore in the cache and now you change it to
|
|
|
|
* .gitexclude, everything will go wrong.
|
|
|
|
*/
|
|
|
|
if (dir->exclude_per_dir != dir->untracked->exclude_per_dir &&
|
|
|
|
strcmp(dir->exclude_per_dir, dir->untracked->exclude_per_dir))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* EXC_CMDL is not considered in the cache. If people set it,
|
|
|
|
* skip the cache.
|
|
|
|
*/
|
|
|
|
if (dir->exclude_list_group[EXC_CMDL].nr)
|
|
|
|
return NULL;
|
|
|
|
|
2015-03-08 18:12:46 +08:00
|
|
|
if (!ident_in_untracked(dir->untracked)) {
|
2016-01-24 23:28:21 +08:00
|
|
|
warning(_("Untracked cache is disabled on this system or location."));
|
2015-03-08 18:12:46 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-03-08 18:12:26 +08:00
|
|
|
if (!dir->untracked->root) {
|
|
|
|
const int len = sizeof(*dir->untracked->root);
|
|
|
|
dir->untracked->root = xmalloc(len);
|
|
|
|
memset(dir->untracked->root, 0, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Validate $GIT_DIR/info/exclude and core.excludesfile */
|
|
|
|
root = dir->untracked->root;
|
|
|
|
if (hashcmp(dir->ss_info_exclude.sha1,
|
|
|
|
dir->untracked->ss_info_exclude.sha1)) {
|
|
|
|
invalidate_gitignore(dir->untracked, root);
|
|
|
|
dir->untracked->ss_info_exclude = dir->ss_info_exclude;
|
|
|
|
}
|
|
|
|
if (hashcmp(dir->ss_excludes_file.sha1,
|
|
|
|
dir->untracked->ss_excludes_file.sha1)) {
|
|
|
|
invalidate_gitignore(dir->untracked, root);
|
|
|
|
dir->untracked->ss_excludes_file = dir->ss_excludes_file;
|
|
|
|
}
|
2015-03-08 18:12:30 +08:00
|
|
|
|
|
|
|
/* Make sure this directory is not dropped out at saving phase */
|
|
|
|
root->recurse = 1;
|
2015-03-08 18:12:26 +08:00
|
|
|
return root;
|
|
|
|
}
|
|
|
|
|
2013-07-14 16:35:55 +08:00
|
|
|
int read_directory(struct dir_struct *dir, const char *path, int len, const struct pathspec *pathspec)
|
Optimize directory listing with pathspec limiter.
The way things are set up, you can now pass a "pathspec" to the
"read_directory()" function. If you pass NULL, it acts exactly
like it used to do (read everything). If you pass a non-NULL
pointer, it will simplify it into a "these are the prefixes
without any special characters", and stop any readdir() early if
the path in question doesn't match any of the prefixes.
NOTE! This does *not* obviate the need for the caller to do the *exact*
pathspec match later. It's a first-level filter on "read_directory()", but
it does not do the full pathspec thing. Maybe it should. But in the
meantime, builtin-add.c really does need to do first
read_directory(dir, .., pathspec);
if (pathspec)
prune_directory(dir, pathspec, baselen);
ie the "prune_directory()" part will do the *exact* pathspec pruning,
while the "read_directory()" will use the pathspec just to do some quick
high-level pruning of the directories it will recurse into.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-03-31 11:39:30 +08:00
|
|
|
{
|
2008-08-04 15:52:37 +08:00
|
|
|
struct path_simplify *simplify;
|
2015-03-08 18:12:26 +08:00
|
|
|
struct untracked_cache_dir *untracked;
|
2006-05-17 10:46:16 +08:00
|
|
|
|
2013-07-14 16:35:55 +08:00
|
|
|
/*
|
|
|
|
* Check out create_simplify()
|
|
|
|
*/
|
|
|
|
if (pathspec)
|
2013-07-14 16:36:06 +08:00
|
|
|
GUARD_PATHSPEC(pathspec,
|
|
|
|
PATHSPEC_FROMTOP |
|
|
|
|
PATHSPEC_MAXDEPTH |
|
2013-07-14 16:36:08 +08:00
|
|
|
PATHSPEC_LITERAL |
|
2013-07-14 16:36:09 +08:00
|
|
|
PATHSPEC_GLOB |
|
2013-12-06 15:30:48 +08:00
|
|
|
PATHSPEC_ICASE |
|
|
|
|
PATHSPEC_EXCLUDE);
|
2013-07-14 16:35:55 +08:00
|
|
|
|
2009-07-09 10:24:39 +08:00
|
|
|
if (has_symlink_leading_path(path, len))
|
2008-08-04 15:52:37 +08:00
|
|
|
return dir->nr;
|
|
|
|
|
2013-12-06 15:30:48 +08:00
|
|
|
/*
|
|
|
|
* exclude patterns are treated like positive ones in
|
|
|
|
* create_simplify. Usually exclude patterns should be a
|
|
|
|
* subset of positive ones, which has no impacts on
|
|
|
|
* create_simplify().
|
|
|
|
*/
|
2013-07-14 16:36:02 +08:00
|
|
|
simplify = create_simplify(pathspec ? pathspec->_raw : NULL);
|
2015-03-08 18:12:26 +08:00
|
|
|
untracked = validate_untracked_cache(dir, len, pathspec);
|
|
|
|
if (!untracked)
|
|
|
|
/*
|
|
|
|
* make sure untracked cache code path is disabled,
|
|
|
|
* e.g. prep_exclude()
|
|
|
|
*/
|
|
|
|
dir->untracked = NULL;
|
2010-01-09 15:05:41 +08:00
|
|
|
if (!len || treat_leading_path(dir, path, len, simplify))
|
2015-03-08 18:12:26 +08:00
|
|
|
read_directory_recursive(dir, path, len, untracked, 0, simplify);
|
Optimize directory listing with pathspec limiter.
The way things are set up, you can now pass a "pathspec" to the
"read_directory()" function. If you pass NULL, it acts exactly
like it used to do (read everything). If you pass a non-NULL
pointer, it will simplify it into a "these are the prefixes
without any special characters", and stop any readdir() early if
the path in question doesn't match any of the prefixes.
NOTE! This does *not* obviate the need for the caller to do the *exact*
pathspec match later. It's a first-level filter on "read_directory()", but
it does not do the full pathspec thing. Maybe it should. But in the
meantime, builtin-add.c really does need to do first
read_directory(dir, .., pathspec);
if (pathspec)
prune_directory(dir, pathspec, baselen);
ie the "prune_directory()" part will do the *exact* pathspec pruning,
while the "read_directory()" will use the pathspec just to do some quick
high-level pruning of the directories it will recurse into.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-03-31 11:39:30 +08:00
|
|
|
free_simplify(simplify);
|
2006-05-17 10:02:14 +08:00
|
|
|
qsort(dir->entries, dir->nr, sizeof(struct dir_entry *), cmp_name);
|
2007-06-11 21:39:50 +08:00
|
|
|
qsort(dir->ignored, dir->ignored_nr, sizeof(struct dir_entry *), cmp_name);
|
2015-03-08 18:12:38 +08:00
|
|
|
if (dir->untracked) {
|
|
|
|
static struct trace_key trace_untracked_stats = TRACE_KEY_INIT(UNTRACKED_STATS);
|
|
|
|
trace_printf_key(&trace_untracked_stats,
|
|
|
|
"node creation: %u\n"
|
|
|
|
"gitignore invalidation: %u\n"
|
|
|
|
"directory invalidation: %u\n"
|
|
|
|
"opendir: %u\n",
|
|
|
|
dir->untracked->dir_created,
|
|
|
|
dir->untracked->gitignore_invalidated,
|
|
|
|
dir->untracked->dir_invalidated,
|
|
|
|
dir->untracked->dir_opened);
|
2015-03-08 18:12:39 +08:00
|
|
|
if (dir->untracked == the_index.untracked &&
|
|
|
|
(dir->untracked->dir_opened ||
|
|
|
|
dir->untracked->gitignore_invalidated ||
|
|
|
|
dir->untracked->dir_invalidated))
|
|
|
|
the_index.cache_changed |= UNTRACKED_CHANGED;
|
|
|
|
if (dir->untracked != the_index.untracked) {
|
|
|
|
free(dir->untracked);
|
|
|
|
dir->untracked = NULL;
|
|
|
|
}
|
2015-03-08 18:12:38 +08:00
|
|
|
}
|
2006-05-17 10:02:14 +08:00
|
|
|
return dir->nr;
|
|
|
|
}
|
2006-09-08 16:05:34 +08:00
|
|
|
|
2007-11-29 17:11:46 +08:00
|
|
|
int file_exists(const char *f)
|
2006-09-08 16:05:34 +08:00
|
|
|
{
|
2007-11-29 17:11:46 +08:00
|
|
|
struct stat sb;
|
2007-11-18 17:58:16 +08:00
|
|
|
return lstat(f, &sb) == 0;
|
2006-09-08 16:05:34 +08:00
|
|
|
}
|
2007-08-01 08:29:17 +08:00
|
|
|
|
2015-09-29 00:12:18 +08:00
|
|
|
static int cmp_icase(char a, char b)
|
|
|
|
{
|
|
|
|
if (a == b)
|
|
|
|
return 0;
|
|
|
|
if (ignore_case)
|
|
|
|
return toupper(a) - toupper(b);
|
|
|
|
return a - b;
|
|
|
|
}
|
|
|
|
|
2007-08-01 08:29:17 +08:00
|
|
|
/*
|
2011-03-26 17:04:24 +08:00
|
|
|
* Given two normalized paths (a trailing slash is ok), if subdir is
|
|
|
|
* outside dir, return -1. Otherwise return the offset in subdir that
|
|
|
|
* can be used as relative path to dir.
|
2007-08-01 08:29:17 +08:00
|
|
|
*/
|
2011-03-26 17:04:24 +08:00
|
|
|
int dir_inside_of(const char *subdir, const char *dir)
|
2007-08-01 08:29:17 +08:00
|
|
|
{
|
2011-03-26 17:04:24 +08:00
|
|
|
int offset = 0;
|
2007-08-01 08:29:17 +08:00
|
|
|
|
2011-03-26 17:04:24 +08:00
|
|
|
assert(dir && subdir && *dir && *subdir);
|
2007-08-01 08:29:17 +08:00
|
|
|
|
2015-09-29 00:12:18 +08:00
|
|
|
while (*dir && *subdir && !cmp_icase(*dir, *subdir)) {
|
2007-08-01 08:29:17 +08:00
|
|
|
dir++;
|
2011-03-26 17:04:24 +08:00
|
|
|
subdir++;
|
|
|
|
offset++;
|
2010-05-22 19:13:05 +08:00
|
|
|
}
|
2011-03-26 17:04:24 +08:00
|
|
|
|
|
|
|
/* hel[p]/me vs hel[l]/yeah */
|
|
|
|
if (*dir && *subdir)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!*subdir)
|
|
|
|
return !*dir ? offset : -1; /* same dir */
|
|
|
|
|
|
|
|
/* foo/[b]ar vs foo/[] */
|
|
|
|
if (is_dir_sep(dir[-1]))
|
|
|
|
return is_dir_sep(subdir[-1]) ? offset : -1;
|
|
|
|
|
|
|
|
/* foo[/]bar vs foo[] */
|
|
|
|
return is_dir_sep(*subdir) ? offset + 1 : -1;
|
2007-08-01 08:29:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int is_inside_dir(const char *dir)
|
|
|
|
{
|
2014-07-29 02:30:39 +08:00
|
|
|
char *cwd;
|
|
|
|
int rc;
|
|
|
|
|
2011-03-26 17:04:25 +08:00
|
|
|
if (!dir)
|
|
|
|
return 0;
|
2014-07-29 02:30:39 +08:00
|
|
|
|
|
|
|
cwd = xgetcwd();
|
|
|
|
rc = (dir_inside_of(cwd, dir) >= 0);
|
|
|
|
free(cwd);
|
|
|
|
return rc;
|
2007-08-01 08:29:17 +08:00
|
|
|
}
|
2007-09-28 23:28:54 +08:00
|
|
|
|
2009-01-11 20:19:12 +08:00
|
|
|
int is_empty_dir(const char *path)
|
|
|
|
{
|
|
|
|
DIR *dir = opendir(path);
|
|
|
|
struct dirent *e;
|
|
|
|
int ret = 1;
|
|
|
|
|
|
|
|
if (!dir)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
while ((e = readdir(dir)) != NULL)
|
|
|
|
if (!is_dot_or_dotdot(e->d_name)) {
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
closedir(dir);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-03-15 16:04:12 +08:00
|
|
|
static int remove_dir_recurse(struct strbuf *path, int flag, int *kept_up)
|
2007-09-28 23:28:54 +08:00
|
|
|
{
|
2009-07-01 06:33:45 +08:00
|
|
|
DIR *dir;
|
2007-09-28 23:28:54 +08:00
|
|
|
struct dirent *e;
|
2012-03-15 16:04:12 +08:00
|
|
|
int ret = 0, original_len = path->len, len, kept_down = 0;
|
2009-07-01 06:33:45 +08:00
|
|
|
int only_empty = (flag & REMOVE_DIR_EMPTY_ONLY);
|
2012-03-15 22:58:54 +08:00
|
|
|
int keep_toplevel = (flag & REMOVE_DIR_KEEP_TOPLEVEL);
|
2009-07-01 06:33:45 +08:00
|
|
|
unsigned char submodule_head[20];
|
2007-09-28 23:28:54 +08:00
|
|
|
|
2009-07-01 06:33:45 +08:00
|
|
|
if ((flag & REMOVE_DIR_KEEP_NESTED_GIT) &&
|
2012-03-15 16:04:12 +08:00
|
|
|
!resolve_gitlink_ref(path->buf, "HEAD", submodule_head)) {
|
2009-07-01 06:33:45 +08:00
|
|
|
/* Do not descend and nuke a nested git work tree. */
|
2012-03-15 16:04:12 +08:00
|
|
|
if (kept_up)
|
|
|
|
*kept_up = 1;
|
2009-07-01 06:33:45 +08:00
|
|
|
return 0;
|
2012-03-15 16:04:12 +08:00
|
|
|
}
|
2009-07-01 06:33:45 +08:00
|
|
|
|
2012-03-15 16:04:12 +08:00
|
|
|
flag &= ~REMOVE_DIR_KEEP_TOPLEVEL;
|
2009-07-01 06:33:45 +08:00
|
|
|
dir = opendir(path->buf);
|
2012-03-15 22:58:54 +08:00
|
|
|
if (!dir) {
|
2014-01-19 06:48:57 +08:00
|
|
|
if (errno == ENOENT)
|
|
|
|
return keep_toplevel ? -1 : 0;
|
|
|
|
else if (errno == EACCES && !keep_toplevel)
|
2014-01-19 06:48:56 +08:00
|
|
|
/*
|
|
|
|
* An empty dir could be removable even if it
|
|
|
|
* is unreadable:
|
|
|
|
*/
|
2012-03-15 22:58:54 +08:00
|
|
|
return rmdir(path->buf);
|
|
|
|
else
|
|
|
|
return -1;
|
|
|
|
}
|
use strbuf_complete to conditionally append slash
When working with paths in strbufs, we frequently want to
ensure that a directory contains a trailing slash before
appending to it. We can shorten this code (and make the
intent more obvious) by calling strbuf_complete.
Most of these cases are trivially identical conversions, but
there are two things to note:
- in a few cases we did not check that the strbuf is
non-empty (which would lead to an out-of-bounds memory
access). These were generally not triggerable in
practice, either from earlier assertions, or typically
because we would have just fed the strbuf to opendir(),
which would choke on an empty path.
- in a few cases we indexed the buffer with "original_len"
or similar, rather than the current sb->len, and it is
not immediately obvious from the diff that they are the
same. In all of these cases, I manually verified that
the strbuf does not change between the assignment and
the strbuf_complete call.
This does not convert cases which look like:
if (sb->len && !is_dir_sep(sb->buf[sb->len - 1]))
strbuf_addch(sb, '/');
as those are obviously semantically different. Some of these
cases arguably should be doing that, but that is out of
scope for this change, which aims purely for cleanup with no
behavior change (and at least it will make such sites easier
to find and examine in the future, as we can grep for
strbuf_complete).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-09-25 05:08:35 +08:00
|
|
|
strbuf_complete(path, '/');
|
2007-09-28 23:28:54 +08:00
|
|
|
|
|
|
|
len = path->len;
|
|
|
|
while ((e = readdir(dir)) != NULL) {
|
|
|
|
struct stat st;
|
2009-01-10 20:07:50 +08:00
|
|
|
if (is_dot_or_dotdot(e->d_name))
|
|
|
|
continue;
|
2007-09-28 23:28:54 +08:00
|
|
|
|
|
|
|
strbuf_setlen(path, len);
|
|
|
|
strbuf_addstr(path, e->d_name);
|
2014-01-19 06:48:57 +08:00
|
|
|
if (lstat(path->buf, &st)) {
|
|
|
|
if (errno == ENOENT)
|
|
|
|
/*
|
|
|
|
* file disappeared, which is what we
|
|
|
|
* wanted anyway
|
|
|
|
*/
|
|
|
|
continue;
|
|
|
|
/* fall thru */
|
|
|
|
} else if (S_ISDIR(st.st_mode)) {
|
2012-03-15 16:04:12 +08:00
|
|
|
if (!remove_dir_recurse(path, flag, &kept_down))
|
2007-09-28 23:28:54 +08:00
|
|
|
continue; /* happy */
|
2014-01-19 06:48:57 +08:00
|
|
|
} else if (!only_empty &&
|
|
|
|
(!unlink(path->buf) || errno == ENOENT)) {
|
2007-09-28 23:28:54 +08:00
|
|
|
continue; /* happy, too */
|
2014-01-19 06:48:57 +08:00
|
|
|
}
|
2007-09-28 23:28:54 +08:00
|
|
|
|
|
|
|
/* path too long, stat fails, or non-directory still exists */
|
|
|
|
ret = -1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
closedir(dir);
|
|
|
|
|
|
|
|
strbuf_setlen(path, original_len);
|
2012-03-15 16:04:12 +08:00
|
|
|
if (!ret && !keep_toplevel && !kept_down)
|
2014-01-19 06:48:57 +08:00
|
|
|
ret = (!rmdir(path->buf) || errno == ENOENT) ? 0 : -1;
|
2012-03-15 16:04:12 +08:00
|
|
|
else if (kept_up)
|
|
|
|
/*
|
|
|
|
* report the uplevel that it is not an error that we
|
|
|
|
* did not rmdir() our directory.
|
|
|
|
*/
|
|
|
|
*kept_up = !ret;
|
2007-09-28 23:28:54 +08:00
|
|
|
return ret;
|
|
|
|
}
|
core.excludesfile clean-up
There are inconsistencies in the way commands currently handle
the core.excludesfile configuration variable. The problem is
the variable is too new to be noticed by anything other than
git-add and git-status.
* git-ls-files does not notice any of the "ignore" files by
default, as it predates the standardized set of ignore files.
The calling scripts established the convention to use
.git/info/exclude, .gitignore, and later core.excludesfile.
* git-add and git-status know about it because they call
add_excludes_from_file() directly with their own notion of
which standard set of ignore files to use. This is just a
stupid duplication of code that need to be updated every time
the definition of the standard set of ignore files is
changed.
* git-read-tree takes --exclude-per-directory=<gitignore>,
not because the flexibility was needed. Again, this was
because the option predates the standardization of the ignore
files.
* git-merge-recursive uses hardcoded per-directory .gitignore
and nothing else. git-clean (scripted version) does not
honor core.* because its call to underlying ls-files does not
know about it. git-clean in C (parked in 'pu') doesn't either.
We probably could change git-ls-files to use the standard set
when no excludes are specified on the command line and ignore
processing was asked, or something like that, but that will be a
change in semantics and might break people's scripts in a subtle
way. I am somewhat reluctant to make such a change.
On the other hand, I think it makes perfect sense to fix
git-read-tree, git-merge-recursive and git-clean to follow the
same rule as other commands. I do not think of a valid use case
to give an exclude-per-directory that is nonstandard to
read-tree command, outside a "negative" test in the t1004 test
script.
This patch is the first step to untangle this mess.
The next step would be to teach read-tree, merge-recursive and
clean (in C) to use setup_standard_excludes().
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-14 16:05:00 +08:00
|
|
|
|
2012-03-15 16:04:12 +08:00
|
|
|
int remove_dir_recursively(struct strbuf *path, int flag)
|
|
|
|
{
|
|
|
|
return remove_dir_recurse(path, flag, NULL);
|
|
|
|
}
|
|
|
|
|
memoize common git-path "constant" files
One of the most common uses of git_path() is to pass a
constant, like git_path("MERGE_MSG"). This has two
drawbacks:
1. The return value is a static buffer, and the lifetime
is dependent on other calls to git_path, etc.
2. There's no compile-time checking of the pathname. This
is OK for a one-off (after all, we have to spell it
correctly at least once), but many of these constant
strings appear throughout the code.
This patch introduces a series of functions to "memoize"
these strings, which are essentially globals for the
lifetime of the program. We compute the value once, take
ownership of the buffer, and return the cached value for
subsequent calls. cache.h provides a helper macro for
defining these functions as one-liners, and defines a few
common ones for global use.
Using a macro is a little bit gross, but it does nicely
document the purpose of the functions. If we need to touch
them all later (e.g., because we learned how to change the
git_dir variable at runtime, and need to invalidate all of
the stored values), it will be much easier to have the
complete list.
Note that the shared-global functions have separate, manual
declarations. We could do something clever with the macros
(e.g., expand it to a declaration in some places, and a
declaration _and_ a definition in path.c). But there aren't
that many, and it's probably better to stay away from
too-magical macros.
Likewise, if we abandon the C preprocessor in favor of
generating these with a script, we could get much fancier.
E.g., normalizing "FOO/BAR-BAZ" into "git_path_foo_bar_baz".
But the small amount of saved typing is probably not worth
the resulting confusion to readers who want to grep for the
function's definition.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-08-10 17:38:57 +08:00
|
|
|
static GIT_PATH_FUNC(git_path_info_exclude, "info/exclude")
|
|
|
|
|
core.excludesfile clean-up
There are inconsistencies in the way commands currently handle
the core.excludesfile configuration variable. The problem is
the variable is too new to be noticed by anything other than
git-add and git-status.
* git-ls-files does not notice any of the "ignore" files by
default, as it predates the standardized set of ignore files.
The calling scripts established the convention to use
.git/info/exclude, .gitignore, and later core.excludesfile.
* git-add and git-status know about it because they call
add_excludes_from_file() directly with their own notion of
which standard set of ignore files to use. This is just a
stupid duplication of code that need to be updated every time
the definition of the standard set of ignore files is
changed.
* git-read-tree takes --exclude-per-directory=<gitignore>,
not because the flexibility was needed. Again, this was
because the option predates the standardization of the ignore
files.
* git-merge-recursive uses hardcoded per-directory .gitignore
and nothing else. git-clean (scripted version) does not
honor core.* because its call to underlying ls-files does not
know about it. git-clean in C (parked in 'pu') doesn't either.
We probably could change git-ls-files to use the standard set
when no excludes are specified on the command line and ignore
processing was asked, or something like that, but that will be a
change in semantics and might break people's scripts in a subtle
way. I am somewhat reluctant to make such a change.
On the other hand, I think it makes perfect sense to fix
git-read-tree, git-merge-recursive and git-clean to follow the
same rule as other commands. I do not think of a valid use case
to give an exclude-per-directory that is nonstandard to
read-tree command, outside a "negative" test in the t1004 test
script.
This patch is the first step to untangle this mess.
The next step would be to teach read-tree, merge-recursive and
clean (in C) to use setup_standard_excludes().
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-14 16:05:00 +08:00
|
|
|
void setup_standard_excludes(struct dir_struct *dir)
|
|
|
|
{
|
|
|
|
const char *path;
|
|
|
|
|
|
|
|
dir->exclude_per_dir = ".gitignore";
|
2015-04-23 05:31:49 +08:00
|
|
|
|
|
|
|
/* core.excludefile defaulting to $XDG_HOME/git/ignore */
|
2015-05-06 16:01:00 +08:00
|
|
|
if (!excludes_file)
|
|
|
|
excludes_file = xdg_config_home("ignore");
|
config: allow inaccessible configuration under $HOME
The changes v1.7.12.1~2^2~4 (config: warn on inaccessible files,
2012-08-21) and v1.8.1.1~22^2~2 (config: treat user and xdg config
permission problems as errors, 2012-10-13) were intended to prevent
important configuration (think "[transfer] fsckobjects") from being
ignored when the configuration is unintentionally unreadable (for
example with EIO on a flaky filesystem, or with ENOMEM due to a DoS
attack). Usually ~/.gitconfig and ~/.config/git are readable by the
current user, and if they aren't then it would be easy to fix those
permissions, so the damage from adding this check should have been
minimal.
Unfortunately the access() check often trips when git is being run as
a server. A daemon (such as inetd or git-daemon) starts as "root",
creates a listening socket, and then drops privileges, meaning that
when git commands are invoked they cannot access $HOME and die with
fatal: unable to access '/root/.config/git/config': Permission denied
Any patch to fix this would have one of three problems:
1. We annoy sysadmins who need to take an extra step to handle HOME
when dropping privileges (the current behavior, or any other
proposal that they have to opt into).
2. We annoy sysadmins who want to set HOME when dropping privileges,
either by making what they want to do impossible, or making them
set an extra variable or option to accomplish what used to work
(e.g., a patch to git-daemon to set HOME when --user is passed).
3. We loosen the check, so some cases which might be noteworthy are
not caught.
This patch is of type (3).
Treat user and xdg configuration that are inaccessible due to
permissions (EACCES) as though no user configuration was provided at
all.
An alternative method would be to check if $HOME is readable, but that
would not help in cases where the user who dropped privileges had a
globally readable HOME with only .config or .gitconfig being private.
This does not change the behavior when /etc/gitconfig or .git/config
is unreadable (since those are more serious configuration errors),
nor when ~/.gitconfig or ~/.config/git is unreadable due to problems
other than permissions.
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-13 05:03:18 +08:00
|
|
|
if (excludes_file && !access_or_warn(excludes_file, R_OK, 0))
|
2015-05-27 04:24:45 +08:00
|
|
|
add_excludes_from_file_1(dir, excludes_file,
|
|
|
|
dir->untracked ? &dir->ss_excludes_file : NULL);
|
2015-04-23 05:31:49 +08:00
|
|
|
|
|
|
|
/* per repository user preference */
|
memoize common git-path "constant" files
One of the most common uses of git_path() is to pass a
constant, like git_path("MERGE_MSG"). This has two
drawbacks:
1. The return value is a static buffer, and the lifetime
is dependent on other calls to git_path, etc.
2. There's no compile-time checking of the pathname. This
is OK for a one-off (after all, we have to spell it
correctly at least once), but many of these constant
strings appear throughout the code.
This patch introduces a series of functions to "memoize"
these strings, which are essentially globals for the
lifetime of the program. We compute the value once, take
ownership of the buffer, and return the cached value for
subsequent calls. cache.h provides a helper macro for
defining these functions as one-liners, and defines a few
common ones for global use.
Using a macro is a little bit gross, but it does nicely
document the purpose of the functions. If we need to touch
them all later (e.g., because we learned how to change the
git_dir variable at runtime, and need to invalidate all of
the stored values), it will be much easier to have the
complete list.
Note that the shared-global functions have separate, manual
declarations. We could do something clever with the macros
(e.g., expand it to a declaration in some places, and a
declaration _and_ a definition in path.c). But there aren't
that many, and it's probably better to stay away from
too-magical macros.
Likewise, if we abandon the C preprocessor in favor of
generating these with a script, we could get much fancier.
E.g., normalizing "FOO/BAR-BAZ" into "git_path_foo_bar_baz".
But the small amount of saved typing is probably not worth
the resulting confusion to readers who want to grep for the
function's definition.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-08-10 17:38:57 +08:00
|
|
|
path = git_path_info_exclude();
|
2015-04-23 05:31:49 +08:00
|
|
|
if (!access_or_warn(path, R_OK, 0))
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 18:12:25 +08:00
|
|
|
add_excludes_from_file_1(dir, path,
|
|
|
|
dir->untracked ? &dir->ss_info_exclude : NULL);
|
core.excludesfile clean-up
There are inconsistencies in the way commands currently handle
the core.excludesfile configuration variable. The problem is
the variable is too new to be noticed by anything other than
git-add and git-status.
* git-ls-files does not notice any of the "ignore" files by
default, as it predates the standardized set of ignore files.
The calling scripts established the convention to use
.git/info/exclude, .gitignore, and later core.excludesfile.
* git-add and git-status know about it because they call
add_excludes_from_file() directly with their own notion of
which standard set of ignore files to use. This is just a
stupid duplication of code that need to be updated every time
the definition of the standard set of ignore files is
changed.
* git-read-tree takes --exclude-per-directory=<gitignore>,
not because the flexibility was needed. Again, this was
because the option predates the standardization of the ignore
files.
* git-merge-recursive uses hardcoded per-directory .gitignore
and nothing else. git-clean (scripted version) does not
honor core.* because its call to underlying ls-files does not
know about it. git-clean in C (parked in 'pu') doesn't either.
We probably could change git-ls-files to use the standard set
when no excludes are specified on the command line and ignore
processing was asked, or something like that, but that will be a
change in semantics and might break people's scripts in a subtle
way. I am somewhat reluctant to make such a change.
On the other hand, I think it makes perfect sense to fix
git-read-tree, git-merge-recursive and git-clean to follow the
same rule as other commands. I do not think of a valid use case
to give an exclude-per-directory that is nonstandard to
read-tree command, outside a "negative" test in the t1004 test
script.
This patch is the first step to untangle this mess.
The next step would be to teach read-tree, merge-recursive and
clean (in C) to use setup_standard_excludes().
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-14 16:05:00 +08:00
|
|
|
}
|
2008-09-27 06:56:46 +08:00
|
|
|
|
|
|
|
int remove_path(const char *name)
|
|
|
|
{
|
|
|
|
char *slash;
|
|
|
|
|
2013-04-05 03:03:35 +08:00
|
|
|
if (unlink(name) && errno != ENOENT && errno != ENOTDIR)
|
2008-09-27 06:56:46 +08:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
slash = strrchr(name, '/');
|
|
|
|
if (slash) {
|
|
|
|
char *dirs = xstrdup(name);
|
|
|
|
slash = dirs + (slash - name);
|
|
|
|
do {
|
|
|
|
*slash = '\0';
|
2010-02-19 13:57:21 +08:00
|
|
|
} while (rmdir(dirs) == 0 && (slash = strrchr(dirs, '/')));
|
2008-09-27 06:56:46 +08:00
|
|
|
free(dirs);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-01-07 00:58:05 +08:00
|
|
|
/*
|
|
|
|
* Frees memory within dir which was allocated for exclude lists and
|
|
|
|
* the exclude_stack. Does not free dir itself.
|
|
|
|
*/
|
|
|
|
void clear_directory(struct dir_struct *dir)
|
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
struct exclude_list_group *group;
|
|
|
|
struct exclude_list *el;
|
|
|
|
struct exclude_stack *stk;
|
|
|
|
|
|
|
|
for (i = EXC_CMDL; i <= EXC_FILE; i++) {
|
|
|
|
group = &dir->exclude_list_group[i];
|
|
|
|
for (j = 0; j < group->nr; j++) {
|
|
|
|
el = &group->el[j];
|
|
|
|
if (i == EXC_DIRS)
|
|
|
|
free((char *)el->src);
|
|
|
|
clear_exclude_list(el);
|
|
|
|
}
|
|
|
|
free(group->el);
|
|
|
|
}
|
|
|
|
|
|
|
|
stk = dir->exclude_stack;
|
|
|
|
while (stk) {
|
|
|
|
struct exclude_stack *prev = stk->prev;
|
|
|
|
free(stk);
|
|
|
|
stk = prev;
|
|
|
|
}
|
2014-07-14 17:50:22 +08:00
|
|
|
strbuf_release(&dir->basebuf);
|
2013-01-07 00:58:05 +08:00
|
|
|
}
|
2015-03-08 18:12:33 +08:00
|
|
|
|
|
|
|
struct ondisk_untracked_cache {
|
|
|
|
struct stat_data info_exclude_stat;
|
|
|
|
struct stat_data excludes_file_stat;
|
|
|
|
uint32_t dir_flags;
|
|
|
|
unsigned char info_exclude_sha1[20];
|
|
|
|
unsigned char excludes_file_sha1[20];
|
|
|
|
char exclude_per_dir[FLEX_ARRAY];
|
|
|
|
};
|
|
|
|
|
|
|
|
#define ouc_size(len) (offsetof(struct ondisk_untracked_cache, exclude_per_dir) + len + 1)
|
|
|
|
|
|
|
|
struct write_data {
|
|
|
|
int index; /* number of written untracked_cache_dir */
|
|
|
|
struct ewah_bitmap *check_only; /* from untracked_cache_dir */
|
|
|
|
struct ewah_bitmap *valid; /* from untracked_cache_dir */
|
|
|
|
struct ewah_bitmap *sha1_valid; /* set if exclude_sha1 is not null */
|
|
|
|
struct strbuf out;
|
|
|
|
struct strbuf sb_stat;
|
|
|
|
struct strbuf sb_sha1;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void stat_data_to_disk(struct stat_data *to, const struct stat_data *from)
|
|
|
|
{
|
|
|
|
to->sd_ctime.sec = htonl(from->sd_ctime.sec);
|
|
|
|
to->sd_ctime.nsec = htonl(from->sd_ctime.nsec);
|
|
|
|
to->sd_mtime.sec = htonl(from->sd_mtime.sec);
|
|
|
|
to->sd_mtime.nsec = htonl(from->sd_mtime.nsec);
|
|
|
|
to->sd_dev = htonl(from->sd_dev);
|
|
|
|
to->sd_ino = htonl(from->sd_ino);
|
|
|
|
to->sd_uid = htonl(from->sd_uid);
|
|
|
|
to->sd_gid = htonl(from->sd_gid);
|
|
|
|
to->sd_size = htonl(from->sd_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void write_one_dir(struct untracked_cache_dir *untracked,
|
|
|
|
struct write_data *wd)
|
|
|
|
{
|
|
|
|
struct stat_data stat_data;
|
|
|
|
struct strbuf *out = &wd->out;
|
|
|
|
unsigned char intbuf[16];
|
|
|
|
unsigned int intlen, value;
|
|
|
|
int i = wd->index++;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* untracked_nr should be reset whenever valid is clear, but
|
|
|
|
* for safety..
|
|
|
|
*/
|
|
|
|
if (!untracked->valid) {
|
|
|
|
untracked->untracked_nr = 0;
|
|
|
|
untracked->check_only = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (untracked->check_only)
|
|
|
|
ewah_set(wd->check_only, i);
|
|
|
|
if (untracked->valid) {
|
|
|
|
ewah_set(wd->valid, i);
|
|
|
|
stat_data_to_disk(&stat_data, &untracked->stat_data);
|
|
|
|
strbuf_add(&wd->sb_stat, &stat_data, sizeof(stat_data));
|
|
|
|
}
|
|
|
|
if (!is_null_sha1(untracked->exclude_sha1)) {
|
|
|
|
ewah_set(wd->sha1_valid, i);
|
|
|
|
strbuf_add(&wd->sb_sha1, untracked->exclude_sha1, 20);
|
|
|
|
}
|
|
|
|
|
|
|
|
intlen = encode_varint(untracked->untracked_nr, intbuf);
|
|
|
|
strbuf_add(out, intbuf, intlen);
|
|
|
|
|
|
|
|
/* skip non-recurse directories */
|
|
|
|
for (i = 0, value = 0; i < untracked->dirs_nr; i++)
|
|
|
|
if (untracked->dirs[i]->recurse)
|
|
|
|
value++;
|
|
|
|
intlen = encode_varint(value, intbuf);
|
|
|
|
strbuf_add(out, intbuf, intlen);
|
|
|
|
|
|
|
|
strbuf_add(out, untracked->name, strlen(untracked->name) + 1);
|
|
|
|
|
|
|
|
for (i = 0; i < untracked->untracked_nr; i++)
|
|
|
|
strbuf_add(out, untracked->untracked[i],
|
|
|
|
strlen(untracked->untracked[i]) + 1);
|
|
|
|
|
|
|
|
for (i = 0; i < untracked->dirs_nr; i++)
|
|
|
|
if (untracked->dirs[i]->recurse)
|
|
|
|
write_one_dir(untracked->dirs[i], wd);
|
|
|
|
}
|
|
|
|
|
|
|
|
void write_untracked_extension(struct strbuf *out, struct untracked_cache *untracked)
|
|
|
|
{
|
|
|
|
struct ondisk_untracked_cache *ouc;
|
|
|
|
struct write_data wd;
|
|
|
|
unsigned char varbuf[16];
|
2016-02-23 06:44:42 +08:00
|
|
|
int varint_len;
|
|
|
|
size_t len = strlen(untracked->exclude_per_dir);
|
|
|
|
|
|
|
|
FLEX_ALLOC_MEM(ouc, exclude_per_dir, untracked->exclude_per_dir, len);
|
2015-03-08 18:12:33 +08:00
|
|
|
stat_data_to_disk(&ouc->info_exclude_stat, &untracked->ss_info_exclude.stat);
|
|
|
|
stat_data_to_disk(&ouc->excludes_file_stat, &untracked->ss_excludes_file.stat);
|
|
|
|
hashcpy(ouc->info_exclude_sha1, untracked->ss_info_exclude.sha1);
|
|
|
|
hashcpy(ouc->excludes_file_sha1, untracked->ss_excludes_file.sha1);
|
|
|
|
ouc->dir_flags = htonl(untracked->dir_flags);
|
2015-03-08 18:12:46 +08:00
|
|
|
|
|
|
|
varint_len = encode_varint(untracked->ident.len, varbuf);
|
|
|
|
strbuf_add(out, varbuf, varint_len);
|
|
|
|
strbuf_add(out, untracked->ident.buf, untracked->ident.len);
|
|
|
|
|
2015-03-08 18:12:33 +08:00
|
|
|
strbuf_add(out, ouc, ouc_size(len));
|
|
|
|
free(ouc);
|
|
|
|
ouc = NULL;
|
|
|
|
|
|
|
|
if (!untracked->root) {
|
|
|
|
varint_len = encode_varint(0, varbuf);
|
|
|
|
strbuf_add(out, varbuf, varint_len);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
wd.index = 0;
|
|
|
|
wd.check_only = ewah_new();
|
|
|
|
wd.valid = ewah_new();
|
|
|
|
wd.sha1_valid = ewah_new();
|
|
|
|
strbuf_init(&wd.out, 1024);
|
|
|
|
strbuf_init(&wd.sb_stat, 1024);
|
|
|
|
strbuf_init(&wd.sb_sha1, 1024);
|
|
|
|
write_one_dir(untracked->root, &wd);
|
|
|
|
|
|
|
|
varint_len = encode_varint(wd.index, varbuf);
|
|
|
|
strbuf_add(out, varbuf, varint_len);
|
|
|
|
strbuf_addbuf(out, &wd.out);
|
|
|
|
ewah_serialize_strbuf(wd.valid, out);
|
|
|
|
ewah_serialize_strbuf(wd.check_only, out);
|
|
|
|
ewah_serialize_strbuf(wd.sha1_valid, out);
|
|
|
|
strbuf_addbuf(out, &wd.sb_stat);
|
|
|
|
strbuf_addbuf(out, &wd.sb_sha1);
|
|
|
|
strbuf_addch(out, '\0'); /* safe guard for string lists */
|
|
|
|
|
|
|
|
ewah_free(wd.valid);
|
|
|
|
ewah_free(wd.check_only);
|
|
|
|
ewah_free(wd.sha1_valid);
|
|
|
|
strbuf_release(&wd.out);
|
|
|
|
strbuf_release(&wd.sb_stat);
|
|
|
|
strbuf_release(&wd.sb_sha1);
|
|
|
|
}
|
2015-03-08 18:12:34 +08:00
|
|
|
|
|
|
|
static void free_untracked(struct untracked_cache_dir *ucd)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
if (!ucd)
|
|
|
|
return;
|
|
|
|
for (i = 0; i < ucd->dirs_nr; i++)
|
|
|
|
free_untracked(ucd->dirs[i]);
|
|
|
|
for (i = 0; i < ucd->untracked_nr; i++)
|
|
|
|
free(ucd->untracked[i]);
|
|
|
|
free(ucd->untracked);
|
|
|
|
free(ucd->dirs);
|
|
|
|
free(ucd);
|
|
|
|
}
|
|
|
|
|
|
|
|
void free_untracked_cache(struct untracked_cache *uc)
|
|
|
|
{
|
|
|
|
if (uc)
|
|
|
|
free_untracked(uc->root);
|
|
|
|
free(uc);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct read_data {
|
|
|
|
int index;
|
|
|
|
struct untracked_cache_dir **ucd;
|
|
|
|
struct ewah_bitmap *check_only;
|
|
|
|
struct ewah_bitmap *valid;
|
|
|
|
struct ewah_bitmap *sha1_valid;
|
|
|
|
const unsigned char *data;
|
|
|
|
const unsigned char *end;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void stat_data_from_disk(struct stat_data *to, const struct stat_data *from)
|
|
|
|
{
|
|
|
|
to->sd_ctime.sec = get_be32(&from->sd_ctime.sec);
|
|
|
|
to->sd_ctime.nsec = get_be32(&from->sd_ctime.nsec);
|
|
|
|
to->sd_mtime.sec = get_be32(&from->sd_mtime.sec);
|
|
|
|
to->sd_mtime.nsec = get_be32(&from->sd_mtime.nsec);
|
|
|
|
to->sd_dev = get_be32(&from->sd_dev);
|
|
|
|
to->sd_ino = get_be32(&from->sd_ino);
|
|
|
|
to->sd_uid = get_be32(&from->sd_uid);
|
|
|
|
to->sd_gid = get_be32(&from->sd_gid);
|
|
|
|
to->sd_size = get_be32(&from->sd_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int read_one_dir(struct untracked_cache_dir **untracked_,
|
|
|
|
struct read_data *rd)
|
|
|
|
{
|
|
|
|
struct untracked_cache_dir ud, *untracked;
|
|
|
|
const unsigned char *next, *data = rd->data, *end = rd->end;
|
|
|
|
unsigned int value;
|
|
|
|
int i, len;
|
|
|
|
|
|
|
|
memset(&ud, 0, sizeof(ud));
|
|
|
|
|
|
|
|
next = data;
|
|
|
|
value = decode_varint(&next);
|
|
|
|
if (next > end)
|
|
|
|
return -1;
|
|
|
|
ud.recurse = 1;
|
|
|
|
ud.untracked_alloc = value;
|
|
|
|
ud.untracked_nr = value;
|
|
|
|
if (ud.untracked_nr)
|
2016-02-23 06:44:25 +08:00
|
|
|
ALLOC_ARRAY(ud.untracked, ud.untracked_nr);
|
2015-03-08 18:12:34 +08:00
|
|
|
data = next;
|
|
|
|
|
|
|
|
next = data;
|
|
|
|
ud.dirs_alloc = ud.dirs_nr = decode_varint(&next);
|
|
|
|
if (next > end)
|
|
|
|
return -1;
|
2016-02-23 06:44:25 +08:00
|
|
|
ALLOC_ARRAY(ud.dirs, ud.dirs_nr);
|
2015-03-08 18:12:34 +08:00
|
|
|
data = next;
|
|
|
|
|
|
|
|
len = strlen((const char *)data);
|
|
|
|
next = data + len + 1;
|
|
|
|
if (next > rd->end)
|
|
|
|
return -1;
|
2016-02-23 06:44:35 +08:00
|
|
|
*untracked_ = untracked = xmalloc(st_add(sizeof(*untracked), len));
|
2015-03-08 18:12:34 +08:00
|
|
|
memcpy(untracked, &ud, sizeof(ud));
|
|
|
|
memcpy(untracked->name, data, len + 1);
|
|
|
|
data = next;
|
|
|
|
|
|
|
|
for (i = 0; i < untracked->untracked_nr; i++) {
|
|
|
|
len = strlen((const char *)data);
|
|
|
|
next = data + len + 1;
|
|
|
|
if (next > rd->end)
|
|
|
|
return -1;
|
|
|
|
untracked->untracked[i] = xstrdup((const char*)data);
|
|
|
|
data = next;
|
|
|
|
}
|
|
|
|
|
|
|
|
rd->ucd[rd->index++] = untracked;
|
|
|
|
rd->data = data;
|
|
|
|
|
|
|
|
for (i = 0; i < untracked->dirs_nr; i++) {
|
|
|
|
len = read_one_dir(untracked->dirs + i, rd);
|
|
|
|
if (len < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_check_only(size_t pos, void *cb)
|
|
|
|
{
|
|
|
|
struct read_data *rd = cb;
|
|
|
|
struct untracked_cache_dir *ud = rd->ucd[pos];
|
|
|
|
ud->check_only = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void read_stat(size_t pos, void *cb)
|
|
|
|
{
|
|
|
|
struct read_data *rd = cb;
|
|
|
|
struct untracked_cache_dir *ud = rd->ucd[pos];
|
|
|
|
if (rd->data + sizeof(struct stat_data) > rd->end) {
|
|
|
|
rd->data = rd->end + 1;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
stat_data_from_disk(&ud->stat_data, (struct stat_data *)rd->data);
|
|
|
|
rd->data += sizeof(struct stat_data);
|
|
|
|
ud->valid = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void read_sha1(size_t pos, void *cb)
|
|
|
|
{
|
|
|
|
struct read_data *rd = cb;
|
|
|
|
struct untracked_cache_dir *ud = rd->ucd[pos];
|
|
|
|
if (rd->data + 20 > rd->end) {
|
|
|
|
rd->data = rd->end + 1;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
hashcpy(ud->exclude_sha1, rd->data);
|
|
|
|
rd->data += 20;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void load_sha1_stat(struct sha1_stat *sha1_stat,
|
|
|
|
const struct stat_data *stat,
|
|
|
|
const unsigned char *sha1)
|
|
|
|
{
|
|
|
|
stat_data_from_disk(&sha1_stat->stat, stat);
|
|
|
|
hashcpy(sha1_stat->sha1, sha1);
|
|
|
|
sha1_stat->valid = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct untracked_cache *read_untracked_extension(const void *data, unsigned long sz)
|
|
|
|
{
|
|
|
|
const struct ondisk_untracked_cache *ouc;
|
|
|
|
struct untracked_cache *uc;
|
|
|
|
struct read_data rd;
|
|
|
|
const unsigned char *next = data, *end = (const unsigned char *)data + sz;
|
2015-03-08 18:12:46 +08:00
|
|
|
const char *ident;
|
|
|
|
int ident_len, len;
|
2015-03-08 18:12:34 +08:00
|
|
|
|
|
|
|
if (sz <= 1 || end[-1] != '\0')
|
|
|
|
return NULL;
|
|
|
|
end--;
|
|
|
|
|
2015-03-08 18:12:46 +08:00
|
|
|
ident_len = decode_varint(&next);
|
|
|
|
if (next + ident_len > end)
|
|
|
|
return NULL;
|
|
|
|
ident = (const char *)next;
|
|
|
|
next += ident_len;
|
|
|
|
|
2015-03-08 18:12:34 +08:00
|
|
|
ouc = (const struct ondisk_untracked_cache *)next;
|
|
|
|
if (next + ouc_size(0) > end)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
uc = xcalloc(1, sizeof(*uc));
|
2015-03-08 18:12:46 +08:00
|
|
|
strbuf_init(&uc->ident, ident_len);
|
|
|
|
strbuf_add(&uc->ident, ident, ident_len);
|
2015-03-08 18:12:34 +08:00
|
|
|
load_sha1_stat(&uc->ss_info_exclude, &ouc->info_exclude_stat,
|
|
|
|
ouc->info_exclude_sha1);
|
|
|
|
load_sha1_stat(&uc->ss_excludes_file, &ouc->excludes_file_stat,
|
|
|
|
ouc->excludes_file_sha1);
|
|
|
|
uc->dir_flags = get_be32(&ouc->dir_flags);
|
|
|
|
uc->exclude_per_dir = xstrdup(ouc->exclude_per_dir);
|
|
|
|
/* NUL after exclude_per_dir is covered by sizeof(*ouc) */
|
|
|
|
next += ouc_size(strlen(ouc->exclude_per_dir));
|
|
|
|
if (next >= end)
|
|
|
|
goto done2;
|
|
|
|
|
|
|
|
len = decode_varint(&next);
|
|
|
|
if (next > end || len == 0)
|
|
|
|
goto done2;
|
|
|
|
|
|
|
|
rd.valid = ewah_new();
|
|
|
|
rd.check_only = ewah_new();
|
|
|
|
rd.sha1_valid = ewah_new();
|
|
|
|
rd.data = next;
|
|
|
|
rd.end = end;
|
|
|
|
rd.index = 0;
|
2016-02-23 06:44:25 +08:00
|
|
|
ALLOC_ARRAY(rd.ucd, len);
|
2015-03-08 18:12:34 +08:00
|
|
|
|
|
|
|
if (read_one_dir(&uc->root, &rd) || rd.index != len)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
next = rd.data;
|
|
|
|
len = ewah_read_mmap(rd.valid, next, end - next);
|
|
|
|
if (len < 0)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
next += len;
|
|
|
|
len = ewah_read_mmap(rd.check_only, next, end - next);
|
|
|
|
if (len < 0)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
next += len;
|
|
|
|
len = ewah_read_mmap(rd.sha1_valid, next, end - next);
|
|
|
|
if (len < 0)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
ewah_each_bit(rd.check_only, set_check_only, &rd);
|
|
|
|
rd.data = next + len;
|
|
|
|
ewah_each_bit(rd.valid, read_stat, &rd);
|
|
|
|
ewah_each_bit(rd.sha1_valid, read_sha1, &rd);
|
|
|
|
next = rd.data;
|
|
|
|
|
|
|
|
done:
|
|
|
|
free(rd.ucd);
|
|
|
|
ewah_free(rd.valid);
|
|
|
|
ewah_free(rd.check_only);
|
|
|
|
ewah_free(rd.sha1_valid);
|
|
|
|
done2:
|
|
|
|
if (next != end) {
|
|
|
|
free_untracked_cache(uc);
|
|
|
|
uc = NULL;
|
|
|
|
}
|
|
|
|
return uc;
|
|
|
|
}
|
2015-03-08 18:12:35 +08:00
|
|
|
|
2015-08-19 21:01:26 +08:00
|
|
|
static void invalidate_one_directory(struct untracked_cache *uc,
|
|
|
|
struct untracked_cache_dir *ucd)
|
|
|
|
{
|
|
|
|
uc->dir_invalidated++;
|
|
|
|
ucd->valid = 0;
|
|
|
|
ucd->untracked_nr = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Normally when an entry is added or removed from a directory,
|
|
|
|
* invalidating that directory is enough. No need to touch its
|
|
|
|
* ancestors. When a directory is shown as "foo/bar/" in git-status
|
|
|
|
* however, deleting or adding an entry may have cascading effect.
|
|
|
|
*
|
|
|
|
* Say the "foo/bar/file" has become untracked, we need to tell the
|
|
|
|
* untracked_cache_dir of "foo" that "bar/" is not an untracked
|
|
|
|
* directory any more (because "bar" is managed by foo as an untracked
|
|
|
|
* "file").
|
|
|
|
*
|
|
|
|
* Similarly, if "foo/bar/file" moves from untracked to tracked and it
|
|
|
|
* was the last untracked entry in the entire "foo", we should show
|
|
|
|
* "foo/" instead. Which means we have to invalidate past "bar" up to
|
|
|
|
* "foo".
|
|
|
|
*
|
|
|
|
* This function traverses all directories from root to leaf. If there
|
|
|
|
* is a chance of one of the above cases happening, we invalidate back
|
|
|
|
* to root. Otherwise we just invalidate the leaf. There may be a more
|
|
|
|
* sophisticated way than checking for SHOW_OTHER_DIRECTORIES to
|
|
|
|
* detect these cases and avoid unnecessary invalidation, for example,
|
|
|
|
* checking for the untracked entry named "bar/" in "foo", but for now
|
|
|
|
* stick to something safe and simple.
|
|
|
|
*/
|
|
|
|
static int invalidate_one_component(struct untracked_cache *uc,
|
|
|
|
struct untracked_cache_dir *dir,
|
|
|
|
const char *path, int len)
|
|
|
|
{
|
|
|
|
const char *rest = strchr(path, '/');
|
|
|
|
|
|
|
|
if (rest) {
|
|
|
|
int component_len = rest - path;
|
|
|
|
struct untracked_cache_dir *d =
|
|
|
|
lookup_untracked(uc, dir, path, component_len);
|
|
|
|
int ret =
|
|
|
|
invalidate_one_component(uc, d, rest + 1,
|
|
|
|
len - (component_len + 1));
|
|
|
|
if (ret)
|
|
|
|
invalidate_one_directory(uc, dir);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
invalidate_one_directory(uc, dir);
|
|
|
|
return uc->dir_flags & DIR_SHOW_OTHER_DIRECTORIES;
|
|
|
|
}
|
|
|
|
|
2015-03-08 18:12:35 +08:00
|
|
|
void untracked_cache_invalidate_path(struct index_state *istate,
|
|
|
|
const char *path)
|
|
|
|
{
|
|
|
|
if (!istate->untracked || !istate->untracked->root)
|
|
|
|
return;
|
2015-08-19 21:01:26 +08:00
|
|
|
invalidate_one_component(istate->untracked, istate->untracked->root,
|
|
|
|
path, strlen(path));
|
2015-03-08 18:12:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void untracked_cache_remove_from_index(struct index_state *istate,
|
|
|
|
const char *path)
|
|
|
|
{
|
|
|
|
untracked_cache_invalidate_path(istate, path);
|
|
|
|
}
|
|
|
|
|
|
|
|
void untracked_cache_add_to_index(struct index_state *istate,
|
|
|
|
const char *path)
|
|
|
|
{
|
|
|
|
untracked_cache_invalidate_path(istate, path);
|
|
|
|
}
|