mirror of
https://github.com/git/git.git
synced 2024-11-25 19:04:18 +08:00
125fd98434
The rule has always been that a cache entry that is ce_uptodate(ce) means that we already have checked the work tree entity and we know there is no change in the work tree compared to the index, and nobody should have to double check. Note that false ce_uptodate(ce) does not mean it is known to be dirty---it only means we don't know if it is clean. There are a few codepaths (refresh-index and preload-index are among them) that mark a cache entry as up-to-date based solely on the return value from ie_match_stat(); this function uses lstat() to see if the work tree entity has been touched, and for a submodule entry, if its HEAD points at the same commit as the commit recorded in the index of the superproject (a submodule that is not even cloned is considered clean). A submodule is no longer considered unmodified merely because its HEAD matches the index of the superproject these days, in order to prevent people from forgetting to commit in the submodule and updating the superproject index with the new submodule commit, before commiting the state in the superproject. However, the patch to do so didn't update the codepath that marks cache entries up-to-date based on the updated definition and instead worked it around by saying "we don't trust the return value of ce_uptodate() for submodules." This makes ce_uptodate() trustworthy again by not marking submodule entries up-to-date. The next step _could_ be to introduce a few "in-core" flag bits to cache_entry structure to record "this entry is _known_ to be dirty", call is_submodule_modified() from ie_match_stat(), and use these new bits to avoid running this rather expensive check more than once, but that can be a separate patch. Signed-off-by: Junio C Hamano <gitster@pobox.com>
107 lines
2.3 KiB
C
107 lines
2.3 KiB
C
/*
|
|
* Copyright (C) 2008 Linus Torvalds
|
|
*/
|
|
#include "cache.h"
|
|
|
|
#ifdef NO_PTHREADS
|
|
static void preload_index(struct index_state *index, const char **pathspec)
|
|
{
|
|
; /* nothing */
|
|
}
|
|
#else
|
|
|
|
#include <pthread.h>
|
|
|
|
/*
|
|
* Mostly randomly chosen maximum thread counts: we
|
|
* cap the parallelism to 20 threads, and we want
|
|
* to have at least 500 lstat's per thread for it to
|
|
* be worth starting a thread.
|
|
*/
|
|
#define MAX_PARALLEL (20)
|
|
#define THREAD_COST (500)
|
|
|
|
struct thread_data {
|
|
pthread_t pthread;
|
|
struct index_state *index;
|
|
const char **pathspec;
|
|
int offset, nr;
|
|
};
|
|
|
|
static void *preload_thread(void *_data)
|
|
{
|
|
int nr;
|
|
struct thread_data *p = _data;
|
|
struct index_state *index = p->index;
|
|
struct cache_entry **cep = index->cache + p->offset;
|
|
struct cache_def cache;
|
|
|
|
memset(&cache, 0, sizeof(cache));
|
|
nr = p->nr;
|
|
if (nr + p->offset > index->cache_nr)
|
|
nr = index->cache_nr - p->offset;
|
|
|
|
do {
|
|
struct cache_entry *ce = *cep++;
|
|
struct stat st;
|
|
|
|
if (ce_stage(ce))
|
|
continue;
|
|
if (S_ISGITLINK(ce->ce_mode))
|
|
continue;
|
|
if (ce_uptodate(ce))
|
|
continue;
|
|
if (!ce_path_match(ce, p->pathspec))
|
|
continue;
|
|
if (threaded_has_symlink_leading_path(&cache, ce->name, ce_namelen(ce)))
|
|
continue;
|
|
if (lstat(ce->name, &st))
|
|
continue;
|
|
if (ie_match_stat(index, ce, &st, CE_MATCH_RACY_IS_DIRTY))
|
|
continue;
|
|
ce_mark_uptodate(ce);
|
|
} while (--nr > 0);
|
|
return NULL;
|
|
}
|
|
|
|
static void preload_index(struct index_state *index, const char **pathspec)
|
|
{
|
|
int threads, i, work, offset;
|
|
struct thread_data data[MAX_PARALLEL];
|
|
|
|
if (!core_preload_index)
|
|
return;
|
|
|
|
threads = index->cache_nr / THREAD_COST;
|
|
if (threads < 2)
|
|
return;
|
|
if (threads > MAX_PARALLEL)
|
|
threads = MAX_PARALLEL;
|
|
offset = 0;
|
|
work = DIV_ROUND_UP(index->cache_nr, threads);
|
|
for (i = 0; i < threads; i++) {
|
|
struct thread_data *p = data+i;
|
|
p->index = index;
|
|
p->pathspec = pathspec;
|
|
p->offset = offset;
|
|
p->nr = work;
|
|
offset += work;
|
|
if (pthread_create(&p->pthread, NULL, preload_thread, p))
|
|
die("unable to create threaded lstat");
|
|
}
|
|
for (i = 0; i < threads; i++) {
|
|
struct thread_data *p = data+i;
|
|
if (pthread_join(p->pthread, NULL))
|
|
die("unable to join threaded lstat");
|
|
}
|
|
}
|
|
#endif
|
|
|
|
int read_index_preload(struct index_state *index, const char **pathspec)
|
|
{
|
|
int retval = read_index(index);
|
|
|
|
preload_index(index, pathspec);
|
|
return retval;
|
|
}
|