global: introduce `USE_THE_REPOSITORY_VARIABLE` macro
Use of the `the_repository` variable is deprecated nowadays, and we
slowly but steadily convert the codebase to not use it anymore. Instead,
callers should be passing down the repository to work on via parameters.
It is hard though to prove that a given code unit does not use this
variable anymore. The most trivial case, merely demonstrating that there
is no direct use of `the_repository`, is already a bit of a pain during
code reviews as the reviewer needs to manually verify claims made by the
patch author. The bigger problem though is that we have many interfaces
that implicitly rely on `the_repository`.
Introduce a new `USE_THE_REPOSITORY_VARIABLE` macro that allows code
units to opt into usage of `the_repository`. The intent of this macro is
to demonstrate that a certain code unit does not use this variable
anymore, and to keep it from new dependencies on it in future changes,
be it explicit or implicit
For now, the macro only guards `the_repository` itself as well as
`the_hash_algo`. There are many more known interfaces where we have an
implicit dependency on `the_repository`, but those are not guarded at
the current point in time. Over time though, we should start to add
guards as required (or even better, just remove them).
Define the macro as required in our code units. As expected, most of our
code still relies on the global variable. Nearly all of our builtins
rely on the variable as there is no way yet to pass `the_repository` to
their entry point. For now, declare the macro in "biultin.h" to keep the
required changes at least a little bit more contained.
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2024-06-14 14:50:23 +08:00
|
|
|
#define USE_THE_REPOSITORY_VARIABLE
|
|
|
|
|
2023-04-23 04:17:23 +08:00
|
|
|
#include "git-compat-util.h"
|
2023-03-21 14:26:03 +08:00
|
|
|
#include "environment.h"
|
2023-03-21 14:25:54 +08:00
|
|
|
#include "gettext.h"
|
2023-02-24 08:09:27 +08:00
|
|
|
#include "hex.h"
|
2018-01-24 07:46:51 +08:00
|
|
|
#include "list.h"
|
2017-08-19 06:20:19 +08:00
|
|
|
#include "pack.h"
|
2018-03-24 01:20:57 +08:00
|
|
|
#include "repository.h"
|
2017-08-19 06:20:26 +08:00
|
|
|
#include "dir.h"
|
|
|
|
#include "mergesort.h"
|
|
|
|
#include "packfile.h"
|
2017-08-19 06:20:28 +08:00
|
|
|
#include "delta.h"
|
2020-12-31 19:56:23 +08:00
|
|
|
#include "hash-lookup.h"
|
2017-12-06 00:58:44 +08:00
|
|
|
#include "commit.h"
|
|
|
|
#include "object.h"
|
|
|
|
#include "tag.h"
|
2023-04-11 11:00:38 +08:00
|
|
|
#include "trace.h"
|
2017-12-06 00:58:44 +08:00
|
|
|
#include "tree-walk.h"
|
|
|
|
#include "tree.h"
|
2023-04-11 15:41:53 +08:00
|
|
|
#include "object-file.h"
|
2023-05-16 14:34:06 +08:00
|
|
|
#include "object-store-ll.h"
|
2018-07-13 03:39:33 +08:00
|
|
|
#include "midx.h"
|
2019-05-18 02:41:48 +08:00
|
|
|
#include "commit-graph.h"
|
2023-04-11 11:00:41 +08:00
|
|
|
#include "pack-revindex.h"
|
2019-06-25 21:40:31 +08:00
|
|
|
#include "promisor-remote.h"
|
2017-08-19 06:20:16 +08:00
|
|
|
|
|
|
|
char *odb_pack_name(struct strbuf *buf,
|
2019-08-19 04:04:22 +08:00
|
|
|
const unsigned char *hash,
|
2017-08-19 06:20:16 +08:00
|
|
|
const char *ext)
|
|
|
|
{
|
|
|
|
strbuf_reset(buf);
|
2024-09-12 19:29:30 +08:00
|
|
|
strbuf_addf(buf, "%s/pack/pack-%s.%s", repo_get_object_directory(the_repository),
|
2019-08-19 04:04:22 +08:00
|
|
|
hash_to_hex(hash), ext);
|
2017-08-19 06:20:16 +08:00
|
|
|
return buf->buf;
|
|
|
|
}
|
|
|
|
|
2017-08-19 06:20:22 +08:00
|
|
|
static unsigned int pack_used_ctr;
|
|
|
|
static unsigned int pack_mmap_calls;
|
|
|
|
static unsigned int peak_pack_open_windows;
|
|
|
|
static unsigned int pack_open_windows;
|
2017-08-19 06:20:25 +08:00
|
|
|
static unsigned int pack_open_fds;
|
2017-08-19 06:20:22 +08:00
|
|
|
static unsigned int pack_max_fds;
|
|
|
|
static size_t peak_pack_mapped;
|
|
|
|
static size_t pack_mapped;
|
2017-08-19 06:20:18 +08:00
|
|
|
|
|
|
|
#define SZ_FMT PRIuMAX
|
|
|
|
static inline uintmax_t sz_fmt(size_t s) { return s; }
|
|
|
|
|
|
|
|
void pack_report(void)
|
|
|
|
{
|
|
|
|
fprintf(stderr,
|
|
|
|
"pack_report: getpagesize() = %10" SZ_FMT "\n"
|
|
|
|
"pack_report: core.packedGitWindowSize = %10" SZ_FMT "\n"
|
|
|
|
"pack_report: core.packedGitLimit = %10" SZ_FMT "\n",
|
|
|
|
sz_fmt(getpagesize()),
|
|
|
|
sz_fmt(packed_git_window_size),
|
|
|
|
sz_fmt(packed_git_limit));
|
|
|
|
fprintf(stderr,
|
|
|
|
"pack_report: pack_used_ctr = %10u\n"
|
|
|
|
"pack_report: pack_mmap_calls = %10u\n"
|
|
|
|
"pack_report: pack_open_windows = %10u / %10u\n"
|
|
|
|
"pack_report: pack_mapped = "
|
|
|
|
"%10" SZ_FMT " / %10" SZ_FMT "\n",
|
|
|
|
pack_used_ctr,
|
|
|
|
pack_mmap_calls,
|
|
|
|
pack_open_windows, peak_pack_open_windows,
|
|
|
|
sz_fmt(pack_mapped), sz_fmt(peak_pack_mapped));
|
|
|
|
}
|
2017-08-19 06:20:19 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Open and mmap the index file at path, perform a couple of
|
|
|
|
* consistency checks, then record its information to p. Return 0 on
|
|
|
|
* success.
|
|
|
|
*/
|
|
|
|
static int check_packed_git_idx(const char *path, struct packed_git *p)
|
|
|
|
{
|
|
|
|
void *idx_map;
|
|
|
|
size_t idx_size;
|
2018-10-13 08:58:41 +08:00
|
|
|
int fd = git_open(path), ret;
|
2017-08-19 06:20:19 +08:00
|
|
|
struct stat st;
|
2018-05-02 08:25:36 +08:00
|
|
|
const unsigned int hashsz = the_hash_algo->rawsz;
|
2017-08-19 06:20:19 +08:00
|
|
|
|
|
|
|
if (fd < 0)
|
|
|
|
return -1;
|
|
|
|
if (fstat(fd, &st)) {
|
|
|
|
close(fd);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
idx_size = xsize_t(st.st_size);
|
2018-05-02 08:25:36 +08:00
|
|
|
if (idx_size < 4 * 256 + hashsz + hashsz) {
|
2017-08-19 06:20:19 +08:00
|
|
|
close(fd);
|
|
|
|
return error("index file %s is too small", path);
|
|
|
|
}
|
|
|
|
idx_map = xmmap(NULL, idx_size, PROT_READ, MAP_PRIVATE, fd, 0);
|
|
|
|
close(fd);
|
|
|
|
|
2018-10-13 08:58:41 +08:00
|
|
|
ret = load_idx(path, hashsz, idx_map, idx_size, p);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
munmap(idx_map, idx_size);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int load_idx(const char *path, const unsigned int hashsz, void *idx_map,
|
|
|
|
size_t idx_size, struct packed_git *p)
|
|
|
|
{
|
|
|
|
struct pack_idx_header *hdr = idx_map;
|
|
|
|
uint32_t version, nr, i, *index;
|
|
|
|
|
|
|
|
if (idx_size < 4 * 256 + hashsz + hashsz)
|
|
|
|
return error("index file %s is too small", path);
|
2022-05-03 00:50:37 +08:00
|
|
|
if (!idx_map)
|
2018-10-13 08:58:41 +08:00
|
|
|
return error("empty data");
|
|
|
|
|
2017-08-19 06:20:19 +08:00
|
|
|
if (hdr->idx_signature == htonl(PACK_IDX_SIGNATURE)) {
|
|
|
|
version = ntohl(hdr->idx_version);
|
2018-10-13 08:58:41 +08:00
|
|
|
if (version < 2 || version > 2)
|
2017-08-19 06:20:19 +08:00
|
|
|
return error("index file %s is version %"PRIu32
|
|
|
|
" and is not supported by this binary"
|
|
|
|
" (try upgrading GIT to a newer version)",
|
|
|
|
path, version);
|
|
|
|
} else
|
|
|
|
version = 1;
|
|
|
|
|
|
|
|
nr = 0;
|
|
|
|
index = idx_map;
|
|
|
|
if (version > 1)
|
|
|
|
index += 2; /* skip index header */
|
|
|
|
for (i = 0; i < 256; i++) {
|
|
|
|
uint32_t n = ntohl(index[i]);
|
2018-10-13 08:58:41 +08:00
|
|
|
if (n < nr)
|
2017-08-19 06:20:19 +08:00
|
|
|
return error("non-monotonic index %s", path);
|
|
|
|
nr = n;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (version == 1) {
|
|
|
|
/*
|
|
|
|
* Total size:
|
|
|
|
* - 256 index entries 4 bytes each
|
2018-05-02 08:25:36 +08:00
|
|
|
* - 24-byte entries * nr (object ID + 4-byte offset)
|
|
|
|
* - hash of the packfile
|
|
|
|
* - file checksum
|
2017-08-19 06:20:19 +08:00
|
|
|
*/
|
2020-11-13 13:07:19 +08:00
|
|
|
if (idx_size != st_add(4 * 256 + hashsz + hashsz, st_mult(nr, hashsz + 4)))
|
2017-08-19 06:20:19 +08:00
|
|
|
return error("wrong index v1 file size in %s", path);
|
|
|
|
} else if (version == 2) {
|
|
|
|
/*
|
|
|
|
* Minimum size:
|
|
|
|
* - 8 bytes of header
|
|
|
|
* - 256 index entries 4 bytes each
|
2018-05-02 08:25:36 +08:00
|
|
|
* - object ID entry * nr
|
2017-08-19 06:20:19 +08:00
|
|
|
* - 4-byte crc entry * nr
|
|
|
|
* - 4-byte offset entry * nr
|
2018-05-02 08:25:36 +08:00
|
|
|
* - hash of the packfile
|
|
|
|
* - file checksum
|
2017-08-19 06:20:19 +08:00
|
|
|
* And after the 4-byte offset table might be a
|
|
|
|
* variable sized table containing 8-byte entries
|
|
|
|
* for offsets larger than 2^31.
|
|
|
|
*/
|
2020-11-13 13:07:19 +08:00
|
|
|
size_t min_size = st_add(8 + 4*256 + hashsz + hashsz, st_mult(nr, hashsz + 4 + 4));
|
2020-11-13 13:07:01 +08:00
|
|
|
size_t max_size = min_size;
|
2017-08-19 06:20:19 +08:00
|
|
|
if (nr)
|
2020-11-13 13:07:19 +08:00
|
|
|
max_size = st_add(max_size, st_mult(nr - 1, 8));
|
2018-10-13 08:58:41 +08:00
|
|
|
if (idx_size < min_size || idx_size > max_size)
|
2017-08-19 06:20:19 +08:00
|
|
|
return error("wrong index v2 file size in %s", path);
|
|
|
|
if (idx_size != min_size &&
|
|
|
|
/*
|
|
|
|
* make sure we can deal with large pack offsets.
|
|
|
|
* 31-bit signed offset won't be enough, neither
|
|
|
|
* 32-bit unsigned one will be.
|
|
|
|
*/
|
2018-10-13 08:58:41 +08:00
|
|
|
(sizeof(off_t) <= 4))
|
2017-08-19 06:20:19 +08:00
|
|
|
return error("pack too large for current definition of off_t in %s", path);
|
2023-07-14 08:54:54 +08:00
|
|
|
p->crc_offset = st_add(8 + 4 * 256, st_mult(nr, hashsz));
|
2017-08-19 06:20:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
p->index_version = version;
|
|
|
|
p->index_data = idx_map;
|
|
|
|
p->index_size = idx_size;
|
|
|
|
p->num_objects = nr;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int open_pack_index(struct packed_git *p)
|
|
|
|
{
|
|
|
|
char *idx_name;
|
|
|
|
size_t len;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (p->index_data)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!strip_suffix(p->pack_name, ".pack", &len))
|
2018-05-02 17:38:39 +08:00
|
|
|
BUG("pack_name does not end in .pack");
|
2017-08-19 06:20:19 +08:00
|
|
|
idx_name = xstrfmt("%.*s.idx", (int)len, p->pack_name);
|
|
|
|
ret = check_packed_git_idx(idx_name, p);
|
|
|
|
free(idx_name);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-07-13 03:39:29 +08:00
|
|
|
uint32_t get_pack_fanout(struct packed_git *p, uint32_t value)
|
|
|
|
{
|
|
|
|
const uint32_t *level1_ofs = p->index_data;
|
|
|
|
|
|
|
|
if (!level1_ofs) {
|
|
|
|
if (open_pack_index(p))
|
|
|
|
return 0;
|
|
|
|
level1_ofs = p->index_data;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (p->index_version > 1) {
|
|
|
|
level1_ofs += 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ntohl(level1_ofs[value]);
|
|
|
|
}
|
|
|
|
|
2017-08-19 06:20:19 +08:00
|
|
|
static struct packed_git *alloc_packed_git(int extra)
|
|
|
|
{
|
|
|
|
struct packed_git *p = xmalloc(st_add(sizeof(*p), extra));
|
|
|
|
memset(p, 0, sizeof(*p));
|
|
|
|
p->pack_fd = -1;
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
dumb-http: store downloaded pack idx as tempfile
This patch fixes a regression in b1b8dfde69 (finalize_object_file():
implement collision check, 2024-09-26) where fetching a v1 pack idx file
over the dumb-http protocol would cause the fetch to fail.
The core of the issue is that dumb-http stores the idx we fetch from the
remote at the same path that will eventually hold the idx we generate
from "index-pack --stdin". The sequence is something like this:
0. We realize we need some object X, which we don't have locally, and
nor does the other side have it as a loose object.
1. We download the list of remote packs from objects/info/packs.
2. For each entry in that file, we download each pack index and store
it locally in .git/objects/pack/pack-$hash.idx (the $hash is not
something we can verify yet and is given to us by the remote).
3. We check each pack index we got to see if it has object X. When we
find a match, we download the matching .pack file from the remote
to a tempfile. We feed that to "index-pack --stdin", which
reindexes the pack, rather than trusting that it has what the other
side claims it does. In most cases, this will end up generating the
exact same (byte-for-byte) pack index which we'll store at the same
pack-$hash.idx path, because the index generation and $hash id are
computed based on what's in the packfile. But:
a. The other side might have used other options to generate the
index. For instance we use index v2 by default, but long ago
it was v1 (and you can still ask for v1 explicitly).
b. The other side might even use a different mechanism to
determine $hash. E.g., long ago it was based on the sorted
list of objects in the packfile, but we switched to using the
pack checksum in 1190a1acf8 (pack-objects: name pack files
after trailer hash, 2013-12-05).
The regression we saw in the real world was (3a). A recent client
fetching from a server with a v1 index downloaded that index, then
complained about trying to overwrite it with its own v2 index. This
collision is otherwise harmless; we know we want to replace the remote
version with our local one, but the collision check doesn't realize
that.
There are a few options to fix it:
- we could teach index-pack a command-line option to ignore only pack
idx collisions, and use it when the dumb-http code invokes
index-pack. This would be an awkward thing to expose users to and
would involve a lot of boilerplate to get the option down to the
collision code.
- we could delete the remote .idx file right before running
index-pack. It should be redundant at that point (since we've just
downloaded the matching pack). But it feels risky to delete
something from our own .git/objects based on what the other side has
said. I'm not entirely positive that a malicious server couldn't lie
about which pack-$hash.idx it has and get us to delete something
precious.
- we can stop co-mingling the downloaded idx files in our local
objects directory. This is a slightly bigger change but I think
fixes the root of the problem more directly.
This patch implements the third option. The big design questions are:
where do we store the downloaded files, and how do we manage their
lifetimes?
There are some additional quirks to the dumb-http system we should
consider. Remember that in step 2 we downloaded every pack index, but in
step 3 we may only download some of the matching packs. What happens to
those other idx files now? They sit in the .git/objects/pack directory,
possibly waiting to be used at a later date. That may save bandwidth for
a subsequent fetch, but it also creates a lot of weird corner cases:
- our local object directory now has semi-untrusted .idx files sitting
around, without their matching .pack
- in case 3b, we noted that we might not generate the same hash as the
other side. In that case even if we download the matching pack,
our index-pack invocation will store it in a different
pack-$hash.idx file. And the unmatched .idx will sit there forever.
- if the server repacks, it may delete the old packs. Now we have
these orphaned .idx files sitting around locally that will never be
used (nor deleted).
- if we repack locally we may delete our local version of the server's
pack index and not realize we have it. So we'll download it again,
even though we have all of the objects it mentions.
I think the right solution here is probably some more complex cache
management system: download the remote .idx files to their own storage
directory, mark them as "seen" when we get their matching pack (to avoid
re-downloading even if we repack), and then delete them when the
server's objects/info/refs no longer mentions them.
But since the dumb http protocol is so ancient and so inferior to the
smart http protocol, I don't think it's worth spending a lot of time
creating such a system. For this patch I'm just downloading the idx
files to .git/objects/tmp_pack_*, and marking them as tempfiles to be
deleted when we exit (and due to the name, any we miss due to a crash,
etc, should eventually be removed by "git gc" runs based on timestamps).
That is slightly worse for one case: if we download an idx but not the
matching pack, we won't retain that idx for subsequent runs. But the
flip side is that we're making other cases better (we never hold on to
useless idx files forever). I suspect that worse case does not even come
up often, since it implies that the packs are generated to match
distinct parts of history (i.e., in practice even in a repo with many
packs you're going to end up grabbing all of those packs to do a clone).
If somebody really cares about that, I think the right path forward is a
managed cache directory as above, and this patch is providing the first
step in that direction anyway (by moving things out of the objects/pack/
directory).
There are two test changes. One demonstrates the broken v1 index case
(it double-checks the resulting clone with fsck to be careful, but prior
to this patch it actually fails at the clone step). The other tweaks the
expectation for a test that covers the "slightly worse" case to
accommodate the extra index download.
The code changes are fairly simple. We stop using finalize_object_file()
to copy the remote's index file into place, and leave it as a tempfile.
We give the tempfile a real ".idx" name, since the packfile code expects
that, and thus we make sure it is out of the usual packs/ directory (so
we'd never mistake it for a real local .idx).
We also have to change parse_pack_index(), which creates a temporary
packed_git to access our index (we need this because all of the pack idx
code assumes we have that struct). It reads the index data from the
tempfile, but prior to this patch would speculatively write the
finalized name into the packed_git struct using the pack-$hash we expect
to use.
I was mildly surprised that this worked at all, since we call
verify_pack_index() on the packed_git which mentions the final name
before moving the file into place! But it works because
parse_pack_index() leaves the mmap-ed data in the struct, so the
lazy-open in verify_pack_index() never triggers, and we read from the
tempfile, ignoring the filename in the struct completely. Hacky, but it
works.
After this patch, parse_pack_index() now uses the index filename we pass
in to derive a matching .pack name. This is OK to change because there
are only two callers, both in the dumb http code (and the other passes
in an existing pack-$hash.idx name, so the derived name is going to be
pack-$hash.pack, which is what we were using anyway).
I'll follow up with some more cleanups in that area, but this patch is
sufficient to fix the regression.
Reported-by: fox <fox.gbr@townlong-yak.com>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Taylor Blau <me@ttaylorr.com>
2024-10-25 14:58:06 +08:00
|
|
|
static char *pack_path_from_idx(const char *idx_path)
|
|
|
|
{
|
|
|
|
size_t len;
|
|
|
|
if (!strip_suffix(idx_path, ".idx", &len))
|
|
|
|
BUG("idx path does not end in .idx: %s", idx_path);
|
|
|
|
return xstrfmt("%.*s.pack", (int)len, idx_path);
|
|
|
|
}
|
|
|
|
|
2017-08-19 06:20:19 +08:00
|
|
|
struct packed_git *parse_pack_index(unsigned char *sha1, const char *idx_path)
|
|
|
|
{
|
dumb-http: store downloaded pack idx as tempfile
This patch fixes a regression in b1b8dfde69 (finalize_object_file():
implement collision check, 2024-09-26) where fetching a v1 pack idx file
over the dumb-http protocol would cause the fetch to fail.
The core of the issue is that dumb-http stores the idx we fetch from the
remote at the same path that will eventually hold the idx we generate
from "index-pack --stdin". The sequence is something like this:
0. We realize we need some object X, which we don't have locally, and
nor does the other side have it as a loose object.
1. We download the list of remote packs from objects/info/packs.
2. For each entry in that file, we download each pack index and store
it locally in .git/objects/pack/pack-$hash.idx (the $hash is not
something we can verify yet and is given to us by the remote).
3. We check each pack index we got to see if it has object X. When we
find a match, we download the matching .pack file from the remote
to a tempfile. We feed that to "index-pack --stdin", which
reindexes the pack, rather than trusting that it has what the other
side claims it does. In most cases, this will end up generating the
exact same (byte-for-byte) pack index which we'll store at the same
pack-$hash.idx path, because the index generation and $hash id are
computed based on what's in the packfile. But:
a. The other side might have used other options to generate the
index. For instance we use index v2 by default, but long ago
it was v1 (and you can still ask for v1 explicitly).
b. The other side might even use a different mechanism to
determine $hash. E.g., long ago it was based on the sorted
list of objects in the packfile, but we switched to using the
pack checksum in 1190a1acf8 (pack-objects: name pack files
after trailer hash, 2013-12-05).
The regression we saw in the real world was (3a). A recent client
fetching from a server with a v1 index downloaded that index, then
complained about trying to overwrite it with its own v2 index. This
collision is otherwise harmless; we know we want to replace the remote
version with our local one, but the collision check doesn't realize
that.
There are a few options to fix it:
- we could teach index-pack a command-line option to ignore only pack
idx collisions, and use it when the dumb-http code invokes
index-pack. This would be an awkward thing to expose users to and
would involve a lot of boilerplate to get the option down to the
collision code.
- we could delete the remote .idx file right before running
index-pack. It should be redundant at that point (since we've just
downloaded the matching pack). But it feels risky to delete
something from our own .git/objects based on what the other side has
said. I'm not entirely positive that a malicious server couldn't lie
about which pack-$hash.idx it has and get us to delete something
precious.
- we can stop co-mingling the downloaded idx files in our local
objects directory. This is a slightly bigger change but I think
fixes the root of the problem more directly.
This patch implements the third option. The big design questions are:
where do we store the downloaded files, and how do we manage their
lifetimes?
There are some additional quirks to the dumb-http system we should
consider. Remember that in step 2 we downloaded every pack index, but in
step 3 we may only download some of the matching packs. What happens to
those other idx files now? They sit in the .git/objects/pack directory,
possibly waiting to be used at a later date. That may save bandwidth for
a subsequent fetch, but it also creates a lot of weird corner cases:
- our local object directory now has semi-untrusted .idx files sitting
around, without their matching .pack
- in case 3b, we noted that we might not generate the same hash as the
other side. In that case even if we download the matching pack,
our index-pack invocation will store it in a different
pack-$hash.idx file. And the unmatched .idx will sit there forever.
- if the server repacks, it may delete the old packs. Now we have
these orphaned .idx files sitting around locally that will never be
used (nor deleted).
- if we repack locally we may delete our local version of the server's
pack index and not realize we have it. So we'll download it again,
even though we have all of the objects it mentions.
I think the right solution here is probably some more complex cache
management system: download the remote .idx files to their own storage
directory, mark them as "seen" when we get their matching pack (to avoid
re-downloading even if we repack), and then delete them when the
server's objects/info/refs no longer mentions them.
But since the dumb http protocol is so ancient and so inferior to the
smart http protocol, I don't think it's worth spending a lot of time
creating such a system. For this patch I'm just downloading the idx
files to .git/objects/tmp_pack_*, and marking them as tempfiles to be
deleted when we exit (and due to the name, any we miss due to a crash,
etc, should eventually be removed by "git gc" runs based on timestamps).
That is slightly worse for one case: if we download an idx but not the
matching pack, we won't retain that idx for subsequent runs. But the
flip side is that we're making other cases better (we never hold on to
useless idx files forever). I suspect that worse case does not even come
up often, since it implies that the packs are generated to match
distinct parts of history (i.e., in practice even in a repo with many
packs you're going to end up grabbing all of those packs to do a clone).
If somebody really cares about that, I think the right path forward is a
managed cache directory as above, and this patch is providing the first
step in that direction anyway (by moving things out of the objects/pack/
directory).
There are two test changes. One demonstrates the broken v1 index case
(it double-checks the resulting clone with fsck to be careful, but prior
to this patch it actually fails at the clone step). The other tweaks the
expectation for a test that covers the "slightly worse" case to
accommodate the extra index download.
The code changes are fairly simple. We stop using finalize_object_file()
to copy the remote's index file into place, and leave it as a tempfile.
We give the tempfile a real ".idx" name, since the packfile code expects
that, and thus we make sure it is out of the usual packs/ directory (so
we'd never mistake it for a real local .idx).
We also have to change parse_pack_index(), which creates a temporary
packed_git to access our index (we need this because all of the pack idx
code assumes we have that struct). It reads the index data from the
tempfile, but prior to this patch would speculatively write the
finalized name into the packed_git struct using the pack-$hash we expect
to use.
I was mildly surprised that this worked at all, since we call
verify_pack_index() on the packed_git which mentions the final name
before moving the file into place! But it works because
parse_pack_index() leaves the mmap-ed data in the struct, so the
lazy-open in verify_pack_index() never triggers, and we read from the
tempfile, ignoring the filename in the struct completely. Hacky, but it
works.
After this patch, parse_pack_index() now uses the index filename we pass
in to derive a matching .pack name. This is OK to change because there
are only two callers, both in the dumb http code (and the other passes
in an existing pack-$hash.idx name, so the derived name is going to be
pack-$hash.pack, which is what we were using anyway).
I'll follow up with some more cleanups in that area, but this patch is
sufficient to fix the regression.
Reported-by: fox <fox.gbr@townlong-yak.com>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Taylor Blau <me@ttaylorr.com>
2024-10-25 14:58:06 +08:00
|
|
|
char *path = pack_path_from_idx(idx_path);
|
2017-08-19 06:20:19 +08:00
|
|
|
size_t alloc = st_add(strlen(path), 1);
|
|
|
|
struct packed_git *p = alloc_packed_git(alloc);
|
|
|
|
|
|
|
|
memcpy(p->pack_name, path, alloc); /* includes NUL */
|
dumb-http: store downloaded pack idx as tempfile
This patch fixes a regression in b1b8dfde69 (finalize_object_file():
implement collision check, 2024-09-26) where fetching a v1 pack idx file
over the dumb-http protocol would cause the fetch to fail.
The core of the issue is that dumb-http stores the idx we fetch from the
remote at the same path that will eventually hold the idx we generate
from "index-pack --stdin". The sequence is something like this:
0. We realize we need some object X, which we don't have locally, and
nor does the other side have it as a loose object.
1. We download the list of remote packs from objects/info/packs.
2. For each entry in that file, we download each pack index and store
it locally in .git/objects/pack/pack-$hash.idx (the $hash is not
something we can verify yet and is given to us by the remote).
3. We check each pack index we got to see if it has object X. When we
find a match, we download the matching .pack file from the remote
to a tempfile. We feed that to "index-pack --stdin", which
reindexes the pack, rather than trusting that it has what the other
side claims it does. In most cases, this will end up generating the
exact same (byte-for-byte) pack index which we'll store at the same
pack-$hash.idx path, because the index generation and $hash id are
computed based on what's in the packfile. But:
a. The other side might have used other options to generate the
index. For instance we use index v2 by default, but long ago
it was v1 (and you can still ask for v1 explicitly).
b. The other side might even use a different mechanism to
determine $hash. E.g., long ago it was based on the sorted
list of objects in the packfile, but we switched to using the
pack checksum in 1190a1acf8 (pack-objects: name pack files
after trailer hash, 2013-12-05).
The regression we saw in the real world was (3a). A recent client
fetching from a server with a v1 index downloaded that index, then
complained about trying to overwrite it with its own v2 index. This
collision is otherwise harmless; we know we want to replace the remote
version with our local one, but the collision check doesn't realize
that.
There are a few options to fix it:
- we could teach index-pack a command-line option to ignore only pack
idx collisions, and use it when the dumb-http code invokes
index-pack. This would be an awkward thing to expose users to and
would involve a lot of boilerplate to get the option down to the
collision code.
- we could delete the remote .idx file right before running
index-pack. It should be redundant at that point (since we've just
downloaded the matching pack). But it feels risky to delete
something from our own .git/objects based on what the other side has
said. I'm not entirely positive that a malicious server couldn't lie
about which pack-$hash.idx it has and get us to delete something
precious.
- we can stop co-mingling the downloaded idx files in our local
objects directory. This is a slightly bigger change but I think
fixes the root of the problem more directly.
This patch implements the third option. The big design questions are:
where do we store the downloaded files, and how do we manage their
lifetimes?
There are some additional quirks to the dumb-http system we should
consider. Remember that in step 2 we downloaded every pack index, but in
step 3 we may only download some of the matching packs. What happens to
those other idx files now? They sit in the .git/objects/pack directory,
possibly waiting to be used at a later date. That may save bandwidth for
a subsequent fetch, but it also creates a lot of weird corner cases:
- our local object directory now has semi-untrusted .idx files sitting
around, without their matching .pack
- in case 3b, we noted that we might not generate the same hash as the
other side. In that case even if we download the matching pack,
our index-pack invocation will store it in a different
pack-$hash.idx file. And the unmatched .idx will sit there forever.
- if the server repacks, it may delete the old packs. Now we have
these orphaned .idx files sitting around locally that will never be
used (nor deleted).
- if we repack locally we may delete our local version of the server's
pack index and not realize we have it. So we'll download it again,
even though we have all of the objects it mentions.
I think the right solution here is probably some more complex cache
management system: download the remote .idx files to their own storage
directory, mark them as "seen" when we get their matching pack (to avoid
re-downloading even if we repack), and then delete them when the
server's objects/info/refs no longer mentions them.
But since the dumb http protocol is so ancient and so inferior to the
smart http protocol, I don't think it's worth spending a lot of time
creating such a system. For this patch I'm just downloading the idx
files to .git/objects/tmp_pack_*, and marking them as tempfiles to be
deleted when we exit (and due to the name, any we miss due to a crash,
etc, should eventually be removed by "git gc" runs based on timestamps).
That is slightly worse for one case: if we download an idx but not the
matching pack, we won't retain that idx for subsequent runs. But the
flip side is that we're making other cases better (we never hold on to
useless idx files forever). I suspect that worse case does not even come
up often, since it implies that the packs are generated to match
distinct parts of history (i.e., in practice even in a repo with many
packs you're going to end up grabbing all of those packs to do a clone).
If somebody really cares about that, I think the right path forward is a
managed cache directory as above, and this patch is providing the first
step in that direction anyway (by moving things out of the objects/pack/
directory).
There are two test changes. One demonstrates the broken v1 index case
(it double-checks the resulting clone with fsck to be careful, but prior
to this patch it actually fails at the clone step). The other tweaks the
expectation for a test that covers the "slightly worse" case to
accommodate the extra index download.
The code changes are fairly simple. We stop using finalize_object_file()
to copy the remote's index file into place, and leave it as a tempfile.
We give the tempfile a real ".idx" name, since the packfile code expects
that, and thus we make sure it is out of the usual packs/ directory (so
we'd never mistake it for a real local .idx).
We also have to change parse_pack_index(), which creates a temporary
packed_git to access our index (we need this because all of the pack idx
code assumes we have that struct). It reads the index data from the
tempfile, but prior to this patch would speculatively write the
finalized name into the packed_git struct using the pack-$hash we expect
to use.
I was mildly surprised that this worked at all, since we call
verify_pack_index() on the packed_git which mentions the final name
before moving the file into place! But it works because
parse_pack_index() leaves the mmap-ed data in the struct, so the
lazy-open in verify_pack_index() never triggers, and we read from the
tempfile, ignoring the filename in the struct completely. Hacky, but it
works.
After this patch, parse_pack_index() now uses the index filename we pass
in to derive a matching .pack name. This is OK to change because there
are only two callers, both in the dumb http code (and the other passes
in an existing pack-$hash.idx name, so the derived name is going to be
pack-$hash.pack, which is what we were using anyway).
I'll follow up with some more cleanups in that area, but this patch is
sufficient to fix the regression.
Reported-by: fox <fox.gbr@townlong-yak.com>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Taylor Blau <me@ttaylorr.com>
2024-10-25 14:58:06 +08:00
|
|
|
free(path);
|
2024-06-14 14:49:50 +08:00
|
|
|
hashcpy(p->hash, sha1, the_repository->hash_algo);
|
2017-08-19 06:20:19 +08:00
|
|
|
if (check_packed_git_idx(idx_path, p)) {
|
|
|
|
free(p);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return p;
|
|
|
|
}
|
2017-08-19 06:20:20 +08:00
|
|
|
|
|
|
|
static void scan_windows(struct packed_git *p,
|
|
|
|
struct packed_git **lru_p,
|
|
|
|
struct pack_window **lru_w,
|
|
|
|
struct pack_window **lru_l)
|
|
|
|
{
|
|
|
|
struct pack_window *w, *w_l;
|
|
|
|
|
|
|
|
for (w_l = NULL, w = p->windows; w; w = w->next) {
|
|
|
|
if (!w->inuse_cnt) {
|
|
|
|
if (!*lru_w || w->last_used < (*lru_w)->last_used) {
|
|
|
|
*lru_p = p;
|
|
|
|
*lru_w = w;
|
|
|
|
*lru_l = w_l;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
w_l = w;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-19 06:20:22 +08:00
|
|
|
static int unuse_one_window(struct packed_git *current)
|
2017-08-19 06:20:20 +08:00
|
|
|
{
|
|
|
|
struct packed_git *p, *lru_p = NULL;
|
|
|
|
struct pack_window *lru_w = NULL, *lru_l = NULL;
|
|
|
|
|
|
|
|
if (current)
|
|
|
|
scan_windows(current, &lru_p, &lru_w, &lru_l);
|
2018-03-24 01:20:59 +08:00
|
|
|
for (p = the_repository->objects->packed_git; p; p = p->next)
|
2017-08-19 06:20:20 +08:00
|
|
|
scan_windows(p, &lru_p, &lru_w, &lru_l);
|
|
|
|
if (lru_p) {
|
|
|
|
munmap(lru_w->base, lru_w->len);
|
|
|
|
pack_mapped -= lru_w->len;
|
|
|
|
if (lru_l)
|
|
|
|
lru_l->next = lru_w->next;
|
|
|
|
else
|
|
|
|
lru_p->windows = lru_w->next;
|
|
|
|
free(lru_w);
|
|
|
|
pack_open_windows--;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-19 06:20:21 +08:00
|
|
|
void close_pack_windows(struct packed_git *p)
|
|
|
|
{
|
|
|
|
while (p->windows) {
|
|
|
|
struct pack_window *w = p->windows;
|
|
|
|
|
|
|
|
if (w->inuse_cnt)
|
|
|
|
die("pack '%s' still has open windows to it",
|
|
|
|
p->pack_name);
|
|
|
|
munmap(w->base, w->len);
|
|
|
|
pack_mapped -= w->len;
|
|
|
|
pack_open_windows--;
|
|
|
|
p->windows = w->next;
|
|
|
|
free(w);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-22 03:36:15 +08:00
|
|
|
int close_pack_fd(struct packed_git *p)
|
2017-08-19 06:20:21 +08:00
|
|
|
{
|
|
|
|
if (p->pack_fd < 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
close(p->pack_fd);
|
|
|
|
pack_open_fds--;
|
|
|
|
p->pack_fd = -1;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void close_pack_index(struct packed_git *p)
|
|
|
|
{
|
|
|
|
if (p->index_data) {
|
|
|
|
munmap((void *)p->index_data, p->index_size);
|
|
|
|
p->index_data = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-05 06:51:38 +08:00
|
|
|
static void close_pack_revindex(struct packed_git *p)
|
|
|
|
{
|
packfile: prepare for the existence of '*.rev' files
Specify the format of the on-disk reverse index 'pack-*.rev' file, as
well as prepare the code for the existence of such files.
The reverse index maps from pack relative positions (i.e., an index into
the array of object which is sorted by their offsets within the
packfile) to their position within the 'pack-*.idx' file. Today, this is
done by building up a list of (off_t, uint32_t) tuples for each object
(the off_t corresponding to that object's offset, and the uint32_t
corresponding to its position in the index). To convert between pack and
index position quickly, this array of tuples is radix sorted based on
its offset.
This has two major drawbacks:
First, the in-memory cost scales linearly with the number of objects in
a pack. Each 'struct revindex_entry' is sizeof(off_t) +
sizeof(uint32_t) + padding bytes for a total of 16.
To observe this, force Git to load the reverse index by, for e.g.,
running 'git cat-file --batch-check="%(objectsize:disk)"'. When asking
for a single object in a fresh clone of the kernel, Git needs to
allocate 120+ MB of memory in order to hold the reverse index in memory.
Second, the cost to sort also scales with the size of the pack.
Luckily, this is a linear function since 'load_pack_revindex()' uses a
radix sort, but this cost still must be paid once per pack per process.
As an example, it takes ~60x longer to print the _size_ of an object as
it does to print that entire object's _contents_:
Benchmark #1: git.compile cat-file --batch <obj
Time (mean ± σ): 3.4 ms ± 0.1 ms [User: 3.3 ms, System: 2.1 ms]
Range (min … max): 3.2 ms … 3.7 ms 726 runs
Benchmark #2: git.compile cat-file --batch-check="%(objectsize:disk)" <obj
Time (mean ± σ): 210.3 ms ± 8.9 ms [User: 188.2 ms, System: 23.2 ms]
Range (min … max): 193.7 ms … 224.4 ms 13 runs
Instead, avoid computing and sorting the revindex once per process by
writing it to a file when the pack itself is generated.
The format is relatively straightforward. It contains an array of
uint32_t's, the length of which is equal to the number of objects in the
pack. The ith entry in this table contains the index position of the
ith object in the pack, where "ith object in the pack" is determined by
pack offset.
One thing that the on-disk format does _not_ contain is the full (up to)
eight-byte offset corresponding to each object. This is something that
the in-memory revindex contains (it stores an off_t in 'struct
revindex_entry' along with the same uint32_t that the on-disk format
has). Omit it in the on-disk format, since knowing the index position
for some object is sufficient to get a constant-time lookup in the
pack-*.idx file to ask for an object's offset within the pack.
This trades off between the on-disk size of the 'pack-*.rev' file for
runtime to chase down the offset for some object. Even though the lookup
is constant time, the constant is heavier, since it can potentially
involve two pointer walks in v2 indexes (one to access the 4-byte offset
table, and potentially a second to access the double wide offset table).
Consider trying to map an object's pack offset to a relative position
within that pack. In a cold-cache scenario, more page faults occur while
switching between binary searching through the reverse index and
searching through the *.idx file for an object's offset. Sure enough,
with a cold cache (writing '3' into '/proc/sys/vm/drop_caches' after
'sync'ing), printing out the entire object's contents is still
marginally faster than printing its size:
Benchmark #1: git.compile cat-file --batch-check="%(objectsize:disk)" <obj >/dev/null
Time (mean ± σ): 22.6 ms ± 0.5 ms [User: 2.4 ms, System: 7.9 ms]
Range (min … max): 21.4 ms … 23.5 ms 41 runs
Benchmark #2: git.compile cat-file --batch <obj >/dev/null
Time (mean ± σ): 17.2 ms ± 0.7 ms [User: 2.8 ms, System: 5.5 ms]
Range (min … max): 15.6 ms … 18.2 ms 45 runs
(Numbers taken in the kernel after cheating and using the next patch to
generate a reverse index). There are a couple of approaches to improve
cold cache performance not pursued here:
- We could include the object offsets in the reverse index format.
Predictably, this does result in fewer page faults, but it triples
the size of the file, while simultaneously duplicating a ton of data
already available in the .idx file. (This was the original way I
implemented the format, and it did show
`--batch-check='%(objectsize:disk)'` winning out against `--batch`.)
On the other hand, this increase in size also results in a large
block-cache footprint, which could potentially hurt other workloads.
- We could store the mapping from pack to index position in more
cache-friendly way, like constructing a binary search tree from the
table and writing the values in breadth-first order. This would
result in much better locality, but the price you pay is trading
O(1) lookup in 'pack_pos_to_index()' for an O(log n) one (since you
can no longer directly index the table).
So, neither of these approaches are taken here. (Thankfully, the format
is versioned, so we are free to pursue these in the future.) But, cold
cache performance likely isn't interesting outside of one-off cases like
asking for the size of an object directly. In real-world usage, Git is
often performing many operations in the revindex (i.e., asking about
many objects rather than a single one).
The trade-off is worth it, since we will avoid the vast majority of the
cost of generating the revindex that the extra pointer chase will look
like noise in the following patch's benchmarks.
This patch describes the format and prepares callers (like in
pack-revindex.c) to be able to read *.rev files once they exist. An
implementation of the writer will appear in the next patch, and callers
will gradually begin to start using the writer in the patches that
follow after that.
Signed-off-by: Taylor Blau <me@ttaylorr.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-26 07:37:14 +08:00
|
|
|
if (!p->revindex_map)
|
|
|
|
return;
|
|
|
|
|
|
|
|
munmap((void *)p->revindex_map, p->revindex_size);
|
|
|
|
p->revindex_map = NULL;
|
|
|
|
p->revindex_data = NULL;
|
|
|
|
}
|
|
|
|
|
2022-05-21 07:17:35 +08:00
|
|
|
static void close_pack_mtimes(struct packed_git *p)
|
|
|
|
{
|
|
|
|
if (!p->mtimes_map)
|
|
|
|
return;
|
|
|
|
|
|
|
|
munmap((void *)p->mtimes_map, p->mtimes_size);
|
|
|
|
p->mtimes_map = NULL;
|
|
|
|
}
|
|
|
|
|
2018-04-10 20:56:06 +08:00
|
|
|
void close_pack(struct packed_git *p)
|
2017-08-19 06:20:21 +08:00
|
|
|
{
|
|
|
|
close_pack_windows(p);
|
|
|
|
close_pack_fd(p);
|
|
|
|
close_pack_index(p);
|
packfile: prepare for the existence of '*.rev' files
Specify the format of the on-disk reverse index 'pack-*.rev' file, as
well as prepare the code for the existence of such files.
The reverse index maps from pack relative positions (i.e., an index into
the array of object which is sorted by their offsets within the
packfile) to their position within the 'pack-*.idx' file. Today, this is
done by building up a list of (off_t, uint32_t) tuples for each object
(the off_t corresponding to that object's offset, and the uint32_t
corresponding to its position in the index). To convert between pack and
index position quickly, this array of tuples is radix sorted based on
its offset.
This has two major drawbacks:
First, the in-memory cost scales linearly with the number of objects in
a pack. Each 'struct revindex_entry' is sizeof(off_t) +
sizeof(uint32_t) + padding bytes for a total of 16.
To observe this, force Git to load the reverse index by, for e.g.,
running 'git cat-file --batch-check="%(objectsize:disk)"'. When asking
for a single object in a fresh clone of the kernel, Git needs to
allocate 120+ MB of memory in order to hold the reverse index in memory.
Second, the cost to sort also scales with the size of the pack.
Luckily, this is a linear function since 'load_pack_revindex()' uses a
radix sort, but this cost still must be paid once per pack per process.
As an example, it takes ~60x longer to print the _size_ of an object as
it does to print that entire object's _contents_:
Benchmark #1: git.compile cat-file --batch <obj
Time (mean ± σ): 3.4 ms ± 0.1 ms [User: 3.3 ms, System: 2.1 ms]
Range (min … max): 3.2 ms … 3.7 ms 726 runs
Benchmark #2: git.compile cat-file --batch-check="%(objectsize:disk)" <obj
Time (mean ± σ): 210.3 ms ± 8.9 ms [User: 188.2 ms, System: 23.2 ms]
Range (min … max): 193.7 ms … 224.4 ms 13 runs
Instead, avoid computing and sorting the revindex once per process by
writing it to a file when the pack itself is generated.
The format is relatively straightforward. It contains an array of
uint32_t's, the length of which is equal to the number of objects in the
pack. The ith entry in this table contains the index position of the
ith object in the pack, where "ith object in the pack" is determined by
pack offset.
One thing that the on-disk format does _not_ contain is the full (up to)
eight-byte offset corresponding to each object. This is something that
the in-memory revindex contains (it stores an off_t in 'struct
revindex_entry' along with the same uint32_t that the on-disk format
has). Omit it in the on-disk format, since knowing the index position
for some object is sufficient to get a constant-time lookup in the
pack-*.idx file to ask for an object's offset within the pack.
This trades off between the on-disk size of the 'pack-*.rev' file for
runtime to chase down the offset for some object. Even though the lookup
is constant time, the constant is heavier, since it can potentially
involve two pointer walks in v2 indexes (one to access the 4-byte offset
table, and potentially a second to access the double wide offset table).
Consider trying to map an object's pack offset to a relative position
within that pack. In a cold-cache scenario, more page faults occur while
switching between binary searching through the reverse index and
searching through the *.idx file for an object's offset. Sure enough,
with a cold cache (writing '3' into '/proc/sys/vm/drop_caches' after
'sync'ing), printing out the entire object's contents is still
marginally faster than printing its size:
Benchmark #1: git.compile cat-file --batch-check="%(objectsize:disk)" <obj >/dev/null
Time (mean ± σ): 22.6 ms ± 0.5 ms [User: 2.4 ms, System: 7.9 ms]
Range (min … max): 21.4 ms … 23.5 ms 41 runs
Benchmark #2: git.compile cat-file --batch <obj >/dev/null
Time (mean ± σ): 17.2 ms ± 0.7 ms [User: 2.8 ms, System: 5.5 ms]
Range (min … max): 15.6 ms … 18.2 ms 45 runs
(Numbers taken in the kernel after cheating and using the next patch to
generate a reverse index). There are a couple of approaches to improve
cold cache performance not pursued here:
- We could include the object offsets in the reverse index format.
Predictably, this does result in fewer page faults, but it triples
the size of the file, while simultaneously duplicating a ton of data
already available in the .idx file. (This was the original way I
implemented the format, and it did show
`--batch-check='%(objectsize:disk)'` winning out against `--batch`.)
On the other hand, this increase in size also results in a large
block-cache footprint, which could potentially hurt other workloads.
- We could store the mapping from pack to index position in more
cache-friendly way, like constructing a binary search tree from the
table and writing the values in breadth-first order. This would
result in much better locality, but the price you pay is trading
O(1) lookup in 'pack_pos_to_index()' for an O(log n) one (since you
can no longer directly index the table).
So, neither of these approaches are taken here. (Thankfully, the format
is versioned, so we are free to pursue these in the future.) But, cold
cache performance likely isn't interesting outside of one-off cases like
asking for the size of an object directly. In real-world usage, Git is
often performing many operations in the revindex (i.e., asking about
many objects rather than a single one).
The trade-off is worth it, since we will avoid the vast majority of the
cost of generating the revindex that the extra pointer chase will look
like noise in the following patch's benchmarks.
This patch describes the format and prepares callers (like in
pack-revindex.c) to be able to read *.rev files once they exist. An
implementation of the writer will appear in the next patch, and callers
will gradually begin to start using the writer in the patches that
follow after that.
Signed-off-by: Taylor Blau <me@ttaylorr.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-26 07:37:14 +08:00
|
|
|
close_pack_revindex(p);
|
2022-05-21 07:17:35 +08:00
|
|
|
close_pack_mtimes(p);
|
2021-09-24 14:10:10 +08:00
|
|
|
oidset_clear(&p->bad_objects);
|
2017-08-19 06:20:21 +08:00
|
|
|
}
|
|
|
|
|
2019-05-18 02:41:49 +08:00
|
|
|
void close_object_store(struct raw_object_store *o)
|
2017-08-19 06:20:21 +08:00
|
|
|
{
|
|
|
|
struct packed_git *p;
|
|
|
|
|
2018-03-24 01:21:00 +08:00
|
|
|
for (p = o->packed_git; p; p = p->next)
|
2017-08-19 06:20:21 +08:00
|
|
|
if (p->do_not_close)
|
2018-05-02 17:38:39 +08:00
|
|
|
BUG("want to close pack marked 'do-not-close'");
|
2017-08-19 06:20:21 +08:00
|
|
|
else
|
|
|
|
close_pack(p);
|
2018-10-25 20:54:05 +08:00
|
|
|
|
|
|
|
if (o->multi_pack_index) {
|
|
|
|
close_midx(o->multi_pack_index);
|
|
|
|
o->multi_pack_index = NULL;
|
|
|
|
}
|
2019-05-18 02:41:48 +08:00
|
|
|
|
|
|
|
close_commit_graph(o);
|
2017-08-19 06:20:21 +08:00
|
|
|
}
|
2017-08-19 06:20:22 +08:00
|
|
|
|
2019-06-11 07:35:22 +08:00
|
|
|
void unlink_pack_path(const char *pack_name, int force_delete)
|
|
|
|
{
|
packfile: delete .idx files before .pack files
When installing a packfile, we place the .pack file before the .idx
file. The intention is that Git scans for .idx files in the pack
directory and then loads the .pack files from that list.
However, when we delete packfiles, we do not do this in the reverse
order as we should. The unlink_pack_path() method deletes the .pack
followed by the .idx.
This creates a window where the process could be interrupted between
the .pack deletion and the .idx deletion, leaving the repository in a
state that looks strange, but isn't actually too problematic if we
assume the pack was safe to delete. The .idx without a .pack will cause
some overhead, but will not interrupt other Git processes.
This ordering was introduced into the 'git repack' builtin by
a1bbc6c0176 (repack: rewrite the shell script in C, 2013-09-15), though
we must be careful to track history through the code move in 8434e85d5f9
(repack: refactor pack deletion for future use, 2019-06-10) to see that.
This became more important after 73320e49add (builtin/repack.c: only
collect fully-formed packs, 2023-06-07) changed how 'git repack' scanned
for packfiles for use in the cruft pack process. It previously looked
for .pack files, but that was problematic due to the order that packs
are installed: repacks between the creation of a .pack and the creation
of its .idx would result in hard failures.
There is an independent proposal about what to do in the case of a .idx
without a .pack during this 'git repack' scenario, but this change is
focused on deleting .pack files more safely.
Modify the order to delete the .idx before the .pack. The rest of the
modifiers on the .pack should still come after the .pack so we know all
of the presumed properties of the packfile as long as it exists in the
filesystem, in case we wish to reinstate it by re-indexing the .pack
file.
Signed-off-by: Derrick Stolee <derrickstolee@github.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2023-06-21 03:01:15 +08:00
|
|
|
static const char *exts[] = {".idx", ".pack", ".rev", ".keep", ".bitmap", ".promisor", ".mtimes"};
|
2019-06-11 07:35:22 +08:00
|
|
|
int i;
|
|
|
|
struct strbuf buf = STRBUF_INIT;
|
|
|
|
size_t plen;
|
|
|
|
|
|
|
|
strbuf_addstr(&buf, pack_name);
|
|
|
|
strip_suffix_mem(buf.buf, &buf.len, ".pack");
|
|
|
|
plen = buf.len;
|
|
|
|
|
|
|
|
if (!force_delete) {
|
|
|
|
strbuf_addstr(&buf, ".keep");
|
|
|
|
if (!access(buf.buf, F_OK)) {
|
|
|
|
strbuf_release(&buf);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(exts); i++) {
|
|
|
|
strbuf_setlen(&buf, plen);
|
|
|
|
strbuf_addstr(&buf, exts[i]);
|
|
|
|
unlink(buf.buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
strbuf_release(&buf);
|
|
|
|
}
|
|
|
|
|
2017-08-19 06:20:22 +08:00
|
|
|
/*
|
|
|
|
* The LRU pack is the one with the oldest MRU window, preferring packs
|
|
|
|
* with no used windows, or the oldest mtime if it has no windows allocated.
|
|
|
|
*/
|
|
|
|
static void find_lru_pack(struct packed_git *p, struct packed_git **lru_p, struct pack_window **mru_w, int *accept_windows_inuse)
|
|
|
|
{
|
|
|
|
struct pack_window *w, *this_mru_w;
|
|
|
|
int has_windows_inuse = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reject this pack if it has windows and the previously selected
|
|
|
|
* one does not. If this pack does not have windows, reject
|
|
|
|
* it if the pack file is newer than the previously selected one.
|
|
|
|
*/
|
|
|
|
if (*lru_p && !*mru_w && (p->windows || p->mtime > (*lru_p)->mtime))
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (w = this_mru_w = p->windows; w; w = w->next) {
|
|
|
|
/*
|
|
|
|
* Reject this pack if any of its windows are in use,
|
|
|
|
* but the previously selected pack did not have any
|
|
|
|
* inuse windows. Otherwise, record that this pack
|
|
|
|
* has windows in use.
|
|
|
|
*/
|
|
|
|
if (w->inuse_cnt) {
|
|
|
|
if (*accept_windows_inuse)
|
|
|
|
has_windows_inuse = 1;
|
|
|
|
else
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (w->last_used > this_mru_w->last_used)
|
|
|
|
this_mru_w = w;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reject this pack if it has windows that have been
|
|
|
|
* used more recently than the previously selected pack.
|
|
|
|
* If the previously selected pack had windows inuse and
|
|
|
|
* we have not encountered a window in this pack that is
|
|
|
|
* inuse, skip this check since we prefer a pack with no
|
|
|
|
* inuse windows to one that has inuse windows.
|
|
|
|
*/
|
|
|
|
if (*mru_w && *accept_windows_inuse == has_windows_inuse &&
|
|
|
|
this_mru_w->last_used > (*mru_w)->last_used)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Select this pack.
|
|
|
|
*/
|
|
|
|
*mru_w = this_mru_w;
|
|
|
|
*lru_p = p;
|
|
|
|
*accept_windows_inuse = has_windows_inuse;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int close_one_pack(void)
|
|
|
|
{
|
|
|
|
struct packed_git *p, *lru_p = NULL;
|
|
|
|
struct pack_window *mru_w = NULL;
|
|
|
|
int accept_windows_inuse = 1;
|
|
|
|
|
2018-03-24 01:20:59 +08:00
|
|
|
for (p = the_repository->objects->packed_git; p; p = p->next) {
|
2017-08-19 06:20:22 +08:00
|
|
|
if (p->pack_fd == -1)
|
|
|
|
continue;
|
|
|
|
find_lru_pack(p, &lru_p, &mru_w, &accept_windows_inuse);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lru_p)
|
|
|
|
return close_pack_fd(lru_p);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int get_max_fd_limit(void)
|
|
|
|
{
|
|
|
|
#ifdef RLIMIT_NOFILE
|
|
|
|
{
|
|
|
|
struct rlimit lim;
|
|
|
|
|
|
|
|
if (!getrlimit(RLIMIT_NOFILE, &lim))
|
|
|
|
return lim.rlim_cur;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef _SC_OPEN_MAX
|
|
|
|
{
|
|
|
|
long open_max = sysconf(_SC_OPEN_MAX);
|
|
|
|
if (0 < open_max)
|
|
|
|
return open_max;
|
|
|
|
/*
|
|
|
|
* Otherwise, we got -1 for one of the two
|
|
|
|
* reasons:
|
|
|
|
*
|
|
|
|
* (1) sysconf() did not understand _SC_OPEN_MAX
|
|
|
|
* and signaled an error with -1; or
|
|
|
|
* (2) sysconf() said there is no limit.
|
|
|
|
*
|
|
|
|
* We _could_ clear errno before calling sysconf() to
|
|
|
|
* tell these two cases apart and return a huge number
|
|
|
|
* in the latter case to let the caller cap it to a
|
|
|
|
* value that is not so selfish, but letting the
|
|
|
|
* fallback OPEN_MAX codepath take care of these cases
|
|
|
|
* is a lot simpler.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef OPEN_MAX
|
|
|
|
return OPEN_MAX;
|
|
|
|
#else
|
|
|
|
return 1; /* see the caller ;-) */
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-04-06 02:06:22 +08:00
|
|
|
const char *pack_basename(struct packed_git *p)
|
|
|
|
{
|
|
|
|
const char *ret = strrchr(p->pack_name, '/');
|
|
|
|
if (ret)
|
|
|
|
ret = ret + 1; /* skip past slash */
|
|
|
|
else
|
|
|
|
ret = p->pack_name; /* we only have a base */
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-08-19 06:20:22 +08:00
|
|
|
/*
|
|
|
|
* Do not call this directly as this leaks p->pack_fd on error return;
|
|
|
|
* call open_packed_git() instead.
|
|
|
|
*/
|
|
|
|
static int open_packed_git_1(struct packed_git *p)
|
|
|
|
{
|
|
|
|
struct stat st;
|
|
|
|
struct pack_header hdr;
|
2018-05-02 08:25:36 +08:00
|
|
|
unsigned char hash[GIT_MAX_RAWSZ];
|
|
|
|
unsigned char *idx_hash;
|
2017-09-27 14:02:11 +08:00
|
|
|
ssize_t read_result;
|
2018-05-02 08:25:36 +08:00
|
|
|
const unsigned hashsz = the_hash_algo->rawsz;
|
2017-08-19 06:20:22 +08:00
|
|
|
|
packfile.c: protect against disappearing indexes
In 17c35c8969 (packfile: skip loading index if in multi-pack-index,
2018-07-12) we stopped loading the .idx file for packs that are
contained within a multi-pack index.
This saves us the effort of loading an .idx and doing some lightweight
validity checks by way of 'packfile.c:load_idx()', but introduces a race
between processes that need to load the index (e.g., to generate a
reverse index) and processes that can delete the index.
For example, running the following in your shell:
$ git init repo && cd repo
$ git commit --allow-empty -m 'base'
$ git repack -ad && git multi-pack-index write
followed by:
$ rm -f .git/objects/pack/pack-*.idx
$ git rev-parse HEAD | git cat-file --batch-check='%(objectsize:disk)'
will result in a segfault prior to this patch. What's happening here is
that we notice that the pack is in the multi-pack index, and so don't
check that it still has a .idx. When we then try and load that index to
generate a reverse index, we don't have it, so the call to
'find_pack_revindex()' in 'packfile.c:packed_object_info()' returns
NULL, and then dereferencing it causes a segfault.
Of course, we don't ever expect someone to remove the index file by
hand, or to be in a state where we never wrote it to begin with (yet
find that pack in the multi-pack-index). But, this can happen in a
timing race with 'git repack -ad', which removes all existing packs
after writing a new pack containing all of their objects.
Avoid this by reverting the hunk of 17c35c8969 which stops loading the
index when the pack is contained in a MIDX. This makes the latter half
of 17c35c8969 useless, since we'll always have a non-NULL
'p->index_data', in which case that if statement isn't guarding
anything.
These two together effectively revert 17c35c8969, and avoid the race
explained above.
Co-authored-by: Jeff King <peff@peff.net>
Signed-off-by: Taylor Blau <me@ttaylorr.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-11-26 01:17:28 +08:00
|
|
|
if (open_pack_index(p))
|
|
|
|
return error("packfile %s index unavailable", p->pack_name);
|
2017-08-19 06:20:22 +08:00
|
|
|
|
|
|
|
if (!pack_max_fds) {
|
|
|
|
unsigned int max_fds = get_max_fd_limit();
|
|
|
|
|
|
|
|
/* Save 3 for stdin/stdout/stderr, 22 for work */
|
|
|
|
if (25 < max_fds)
|
|
|
|
pack_max_fds = max_fds - 25;
|
|
|
|
else
|
|
|
|
pack_max_fds = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (pack_max_fds <= pack_open_fds && close_one_pack())
|
|
|
|
; /* nothing */
|
|
|
|
|
|
|
|
p->pack_fd = git_open(p->pack_name);
|
|
|
|
if (p->pack_fd < 0 || fstat(p->pack_fd, &st))
|
|
|
|
return -1;
|
|
|
|
pack_open_fds++;
|
|
|
|
|
|
|
|
/* If we created the struct before we had the pack we lack size. */
|
|
|
|
if (!p->pack_size) {
|
|
|
|
if (!S_ISREG(st.st_mode))
|
|
|
|
return error("packfile %s not a regular file", p->pack_name);
|
|
|
|
p->pack_size = st.st_size;
|
|
|
|
} else if (p->pack_size != st.st_size)
|
|
|
|
return error("packfile %s size changed", p->pack_name);
|
|
|
|
|
|
|
|
/* Verify we recognize this pack file format. */
|
2017-09-27 14:02:11 +08:00
|
|
|
read_result = read_in_full(p->pack_fd, &hdr, sizeof(hdr));
|
|
|
|
if (read_result < 0)
|
|
|
|
return error_errno("error reading from %s", p->pack_name);
|
|
|
|
if (read_result != sizeof(hdr))
|
2017-08-19 06:20:22 +08:00
|
|
|
return error("file %s is far too short to be a packfile", p->pack_name);
|
|
|
|
if (hdr.hdr_signature != htonl(PACK_SIGNATURE))
|
|
|
|
return error("file %s is not a GIT packfile", p->pack_name);
|
|
|
|
if (!pack_version_ok(hdr.hdr_version))
|
|
|
|
return error("packfile %s is version %"PRIu32" and not"
|
|
|
|
" supported (try upgrading GIT to a newer version)",
|
|
|
|
p->pack_name, ntohl(hdr.hdr_version));
|
|
|
|
|
|
|
|
/* Verify the pack matches its index. */
|
|
|
|
if (p->num_objects != ntohl(hdr.hdr_entries))
|
|
|
|
return error("packfile %s claims to have %"PRIu32" objects"
|
|
|
|
" while index indicates %"PRIu32" objects",
|
|
|
|
p->pack_name, ntohl(hdr.hdr_entries),
|
|
|
|
p->num_objects);
|
2019-12-26 18:42:20 +08:00
|
|
|
read_result = pread_in_full(p->pack_fd, hash, hashsz,
|
|
|
|
p->pack_size - hashsz);
|
2017-09-27 14:02:11 +08:00
|
|
|
if (read_result < 0)
|
|
|
|
return error_errno("error reading from %s", p->pack_name);
|
2018-05-02 08:25:36 +08:00
|
|
|
if (read_result != hashsz)
|
2017-08-19 06:20:22 +08:00
|
|
|
return error("packfile %s signature is unavailable", p->pack_name);
|
2018-05-02 08:25:36 +08:00
|
|
|
idx_hash = ((unsigned char *)p->index_data) + p->index_size - hashsz * 2;
|
2024-06-14 14:49:50 +08:00
|
|
|
if (!hasheq(hash, idx_hash, the_repository->hash_algo))
|
2017-08-19 06:20:22 +08:00
|
|
|
return error("packfile %s does not match index", p->pack_name);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-19 06:20:33 +08:00
|
|
|
static int open_packed_git(struct packed_git *p)
|
2017-08-19 06:20:22 +08:00
|
|
|
{
|
|
|
|
if (!open_packed_git_1(p))
|
|
|
|
return 0;
|
|
|
|
close_pack_fd(p);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int in_window(struct pack_window *win, off_t offset)
|
|
|
|
{
|
2018-05-02 08:25:36 +08:00
|
|
|
/* We must promise at least one full hash after the
|
2017-08-19 06:20:22 +08:00
|
|
|
* offset is available from this window, otherwise the offset
|
|
|
|
* is not actually in this window and a different window (which
|
|
|
|
* has that one hash excess) must be used. This is to support
|
|
|
|
* the object header and delta base parsing routines below.
|
|
|
|
*/
|
|
|
|
off_t win_off = win->offset;
|
|
|
|
return win_off <= offset
|
2018-05-02 08:25:36 +08:00
|
|
|
&& (offset + the_hash_algo->rawsz) <= (win_off + win->len);
|
2017-08-19 06:20:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned char *use_pack(struct packed_git *p,
|
|
|
|
struct pack_window **w_cursor,
|
|
|
|
off_t offset,
|
|
|
|
unsigned long *left)
|
|
|
|
{
|
|
|
|
struct pack_window *win = *w_cursor;
|
|
|
|
|
|
|
|
/* Since packfiles end in a hash of their content and it's
|
|
|
|
* pointless to ask for an offset into the middle of that
|
|
|
|
* hash, and the in_window function above wouldn't match
|
|
|
|
* don't allow an offset too close to the end of the file.
|
|
|
|
*/
|
|
|
|
if (!p->pack_size && p->pack_fd == -1 && open_packed_git(p))
|
|
|
|
die("packfile %s cannot be accessed", p->pack_name);
|
2018-05-02 08:25:36 +08:00
|
|
|
if (offset > (p->pack_size - the_hash_algo->rawsz))
|
2017-08-19 06:20:22 +08:00
|
|
|
die("offset beyond end of packfile (truncated pack?)");
|
|
|
|
if (offset < 0)
|
|
|
|
die(_("offset before end of packfile (broken .idx?)"));
|
|
|
|
|
|
|
|
if (!win || !in_window(win, offset)) {
|
|
|
|
if (win)
|
|
|
|
win->inuse_cnt--;
|
|
|
|
for (win = p->windows; win; win = win->next) {
|
|
|
|
if (in_window(win, offset))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!win) {
|
|
|
|
size_t window_align = packed_git_window_size / 2;
|
|
|
|
off_t len;
|
|
|
|
|
|
|
|
if (p->pack_fd == -1 && open_packed_git(p))
|
|
|
|
die("packfile %s cannot be accessed", p->pack_name);
|
|
|
|
|
2021-03-14 00:17:22 +08:00
|
|
|
CALLOC_ARRAY(win, 1);
|
2017-08-19 06:20:22 +08:00
|
|
|
win->offset = (offset / window_align) * window_align;
|
|
|
|
len = p->pack_size - win->offset;
|
|
|
|
if (len > packed_git_window_size)
|
|
|
|
len = packed_git_window_size;
|
|
|
|
win->len = (size_t)len;
|
|
|
|
pack_mapped += win->len;
|
|
|
|
while (packed_git_limit < pack_mapped
|
|
|
|
&& unuse_one_window(p))
|
|
|
|
; /* nothing */
|
2019-05-16 08:37:36 +08:00
|
|
|
win->base = xmmap_gently(NULL, win->len,
|
2017-08-19 06:20:22 +08:00
|
|
|
PROT_READ, MAP_PRIVATE,
|
|
|
|
p->pack_fd, win->offset);
|
|
|
|
if (win->base == MAP_FAILED)
|
2021-06-30 08:01:32 +08:00
|
|
|
die_errno(_("packfile %s cannot be mapped%s"),
|
|
|
|
p->pack_name, mmap_os_err());
|
2017-08-19 06:20:22 +08:00
|
|
|
if (!win->offset && win->len == p->pack_size
|
|
|
|
&& !p->do_not_close)
|
|
|
|
close_pack_fd(p);
|
|
|
|
pack_mmap_calls++;
|
|
|
|
pack_open_windows++;
|
|
|
|
if (pack_mapped > peak_pack_mapped)
|
|
|
|
peak_pack_mapped = pack_mapped;
|
|
|
|
if (pack_open_windows > peak_pack_open_windows)
|
|
|
|
peak_pack_open_windows = pack_open_windows;
|
|
|
|
win->next = p->windows;
|
|
|
|
p->windows = win;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (win != *w_cursor) {
|
|
|
|
win->last_used = pack_used_ctr++;
|
|
|
|
win->inuse_cnt++;
|
|
|
|
*w_cursor = win;
|
|
|
|
}
|
|
|
|
offset -= win->offset;
|
|
|
|
if (left)
|
|
|
|
*left = win->len - xsize_t(offset);
|
|
|
|
return win->base + offset;
|
|
|
|
}
|
2017-08-19 06:20:23 +08:00
|
|
|
|
|
|
|
void unuse_pack(struct pack_window **w_cursor)
|
|
|
|
{
|
|
|
|
struct pack_window *w = *w_cursor;
|
|
|
|
if (w) {
|
|
|
|
w->inuse_cnt--;
|
|
|
|
*w_cursor = NULL;
|
|
|
|
}
|
|
|
|
}
|
2017-08-19 06:20:24 +08:00
|
|
|
|
|
|
|
struct packed_git *add_packed_git(const char *path, size_t path_len, int local)
|
|
|
|
{
|
|
|
|
struct stat st;
|
|
|
|
size_t alloc;
|
|
|
|
struct packed_git *p;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure a corresponding .pack file exists and that
|
|
|
|
* the index looks sane.
|
|
|
|
*/
|
|
|
|
if (!strip_suffix_mem(path, &path_len, ".idx"))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/*
|
2017-12-06 00:58:44 +08:00
|
|
|
* ".promisor" is long enough to hold any suffix we're adding (and
|
2017-08-19 06:20:24 +08:00
|
|
|
* the use xsnprintf double-checks that)
|
|
|
|
*/
|
2017-12-06 00:58:44 +08:00
|
|
|
alloc = st_add3(path_len, strlen(".promisor"), 1);
|
2017-08-19 06:20:24 +08:00
|
|
|
p = alloc_packed_git(alloc);
|
|
|
|
memcpy(p->pack_name, path, path_len);
|
|
|
|
|
|
|
|
xsnprintf(p->pack_name + path_len, alloc - path_len, ".keep");
|
|
|
|
if (!access(p->pack_name, F_OK))
|
|
|
|
p->pack_keep = 1;
|
|
|
|
|
2017-12-06 00:58:44 +08:00
|
|
|
xsnprintf(p->pack_name + path_len, alloc - path_len, ".promisor");
|
|
|
|
if (!access(p->pack_name, F_OK))
|
|
|
|
p->pack_promisor = 1;
|
|
|
|
|
2022-05-21 07:17:35 +08:00
|
|
|
xsnprintf(p->pack_name + path_len, alloc - path_len, ".mtimes");
|
|
|
|
if (!access(p->pack_name, F_OK))
|
|
|
|
p->is_cruft = 1;
|
|
|
|
|
2017-08-19 06:20:24 +08:00
|
|
|
xsnprintf(p->pack_name + path_len, alloc - path_len, ".pack");
|
|
|
|
if (stat(p->pack_name, &st) || !S_ISREG(st.st_mode)) {
|
|
|
|
free(p);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ok, it looks sane as far as we can check without
|
|
|
|
* actually mapping the pack file.
|
|
|
|
*/
|
|
|
|
p->pack_size = st.st_size;
|
|
|
|
p->pack_local = local;
|
|
|
|
p->mtime = st.st_mtime;
|
2018-05-02 08:25:36 +08:00
|
|
|
if (path_len < the_hash_algo->hexsz ||
|
hex: retire get_sha1_hex()
The naming convention around get_sha1_hex() and its friends is
awkward these days, after "struct object_id" was introduced.
There are three public functions around this area:
* get_sha1_hex() - use the implied the_hash_algo, fill uchar *
* get_oid_hex() - use the implied the_hash_algo, fill oid *
* get_oid_hex_algop() - use the passed algop, fill oid *
Between the latter two, the "_algop" suffix signals whether the
the_hash_algo is used as the implied algorithm or the caller should
pass an algorithm explicitly. That is very much understandable and
is a good convention.
Between the former two, however, the "SHA1" vs "OID" in the names
differentiate in what type of variable the result is stored.
We could argue that it makes sense to use "SHA1" to mean "flat byte
buffer" to honor the historical practice in the days before "struct
object_id" was invented, but the natural fourth friend of the above
group would take an algop and fill a flat byte buffer, and it would
be strange to name it get_sha1_hex_algop(). Do we use the passed in
algo, or are we limited to SHA-1 ;-)?
In fact, such a function exists, albeit as a private helper function
used by the implementation of these functions, and is named a lot
more sensibly: get_hash_hex_algop().
Correct the misnomer of get_sha1_hex() and use "hash", instead of
"sha1", as "flat byte buffer that stores binary (as opposed to
hexadecimal) representation of the hash".
The four (2x2) friends now become:
* get_hash_hex() - use the implied the_hash_algo, fill uchar *
* get_oid_hex() - use the implied the_hash_algo, fill oid *
* get_hash_hex_algop() - use the passed algop, fill uchar *
* get_oid_hex_algop() - use the passed algop, fill oid *
As there are only two remaining calls to get_sha1_hex() in the
codebase right now, the blast radious of this change is fairly
small.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2023-07-25 07:11:03 +08:00
|
|
|
get_hash_hex(path + path_len - the_hash_algo->hexsz, p->hash))
|
2024-06-14 14:49:50 +08:00
|
|
|
hashclr(p->hash, the_repository->hash_algo);
|
2017-08-19 06:20:24 +08:00
|
|
|
return p;
|
|
|
|
}
|
2017-08-19 06:20:25 +08:00
|
|
|
|
2018-03-24 01:45:18 +08:00
|
|
|
void install_packed_git(struct repository *r, struct packed_git *pack)
|
2017-08-19 06:20:25 +08:00
|
|
|
{
|
|
|
|
if (pack->pack_fd != -1)
|
|
|
|
pack_open_fds++;
|
|
|
|
|
2018-03-24 01:45:18 +08:00
|
|
|
pack->next = r->objects->packed_git;
|
|
|
|
r->objects->packed_git = pack;
|
2019-11-28 06:24:53 +08:00
|
|
|
|
|
|
|
hashmap_entry_init(&pack->packmap_ent, strhash(pack->pack_name));
|
|
|
|
hashmap_add(&r->objects->pack_map, &pack->packmap_ent);
|
2017-08-19 06:20:25 +08:00
|
|
|
}
|
2017-08-19 06:20:26 +08:00
|
|
|
|
|
|
|
void (*report_garbage)(unsigned seen_bits, const char *path);
|
|
|
|
|
|
|
|
static void report_helper(const struct string_list *list,
|
|
|
|
int seen_bits, int first, int last)
|
|
|
|
{
|
|
|
|
if (seen_bits == (PACKDIR_FILE_PACK|PACKDIR_FILE_IDX))
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (; first < last; first++)
|
|
|
|
report_garbage(seen_bits, list->items[first].string);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void report_pack_garbage(struct string_list *list)
|
|
|
|
{
|
|
|
|
int i, baselen = -1, first = 0, seen_bits = 0;
|
|
|
|
|
|
|
|
if (!report_garbage)
|
|
|
|
return;
|
|
|
|
|
|
|
|
string_list_sort(list);
|
|
|
|
|
|
|
|
for (i = 0; i < list->nr; i++) {
|
|
|
|
const char *path = list->items[i].string;
|
|
|
|
if (baselen != -1 &&
|
|
|
|
strncmp(path, list->items[first].string, baselen)) {
|
|
|
|
report_helper(list, seen_bits, first, i);
|
|
|
|
baselen = -1;
|
|
|
|
seen_bits = 0;
|
|
|
|
}
|
|
|
|
if (baselen == -1) {
|
|
|
|
const char *dot = strrchr(path, '.');
|
|
|
|
if (!dot) {
|
|
|
|
report_garbage(PACKDIR_FILE_GARBAGE, path);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
baselen = dot - path + 1;
|
|
|
|
first = i;
|
|
|
|
}
|
|
|
|
if (!strcmp(path + baselen, "pack"))
|
|
|
|
seen_bits |= 1;
|
|
|
|
else if (!strcmp(path + baselen, "idx"))
|
|
|
|
seen_bits |= 2;
|
|
|
|
}
|
|
|
|
report_helper(list, seen_bits, first, list->nr);
|
|
|
|
}
|
|
|
|
|
2024-08-06 23:38:07 +08:00
|
|
|
void for_each_file_in_pack_subdir(const char *objdir,
|
|
|
|
const char *subdir,
|
|
|
|
each_file_in_pack_dir_fn fn,
|
|
|
|
void *data)
|
2017-08-19 06:20:26 +08:00
|
|
|
{
|
|
|
|
struct strbuf path = STRBUF_INIT;
|
|
|
|
size_t dirnamelen;
|
|
|
|
DIR *dir;
|
|
|
|
struct dirent *de;
|
|
|
|
|
|
|
|
strbuf_addstr(&path, objdir);
|
|
|
|
strbuf_addstr(&path, "/pack");
|
2024-08-06 23:38:07 +08:00
|
|
|
if (subdir)
|
|
|
|
strbuf_addf(&path, "/%s", subdir);
|
2017-08-19 06:20:26 +08:00
|
|
|
dir = opendir(path.buf);
|
|
|
|
if (!dir) {
|
|
|
|
if (errno != ENOENT)
|
|
|
|
error_errno("unable to open object pack directory: %s",
|
|
|
|
path.buf);
|
|
|
|
strbuf_release(&path);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
strbuf_addch(&path, '/');
|
|
|
|
dirnamelen = path.len;
|
2021-05-13 01:28:22 +08:00
|
|
|
while ((de = readdir_skip_dot_and_dotdot(dir)) != NULL) {
|
2017-08-19 06:20:26 +08:00
|
|
|
strbuf_setlen(&path, dirnamelen);
|
|
|
|
strbuf_addstr(&path, de->d_name);
|
|
|
|
|
2018-07-13 03:39:25 +08:00
|
|
|
fn(path.buf, path.len, de->d_name, data);
|
2017-08-19 06:20:26 +08:00
|
|
|
}
|
2018-07-13 03:39:25 +08:00
|
|
|
|
2017-08-19 06:20:26 +08:00
|
|
|
closedir(dir);
|
|
|
|
strbuf_release(&path);
|
|
|
|
}
|
|
|
|
|
2024-08-06 23:38:07 +08:00
|
|
|
void for_each_file_in_pack_dir(const char *objdir,
|
|
|
|
each_file_in_pack_dir_fn fn,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
for_each_file_in_pack_subdir(objdir, NULL, fn, data);
|
|
|
|
}
|
|
|
|
|
2018-07-13 03:39:25 +08:00
|
|
|
struct prepare_pack_data {
|
|
|
|
struct repository *r;
|
|
|
|
struct string_list *garbage;
|
|
|
|
int local;
|
2018-07-13 03:39:38 +08:00
|
|
|
struct multi_pack_index *m;
|
2018-07-13 03:39:25 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static void prepare_pack(const char *full_name, size_t full_name_len,
|
|
|
|
const char *file_name, void *_data)
|
|
|
|
{
|
|
|
|
struct prepare_pack_data *data = (struct prepare_pack_data *)_data;
|
|
|
|
struct packed_git *p;
|
|
|
|
size_t base_len = full_name_len;
|
|
|
|
|
2018-08-21 00:51:59 +08:00
|
|
|
if (strip_suffix_mem(full_name, &base_len, ".idx") &&
|
|
|
|
!(data->m && midx_contains_pack(data->m, file_name))) {
|
2019-11-28 06:24:53 +08:00
|
|
|
struct hashmap_entry hent;
|
|
|
|
char *pack_name = xstrfmt("%.*s.pack", (int)base_len, full_name);
|
|
|
|
unsigned int hash = strhash(pack_name);
|
|
|
|
hashmap_entry_init(&hent, hash);
|
2018-07-13 03:39:25 +08:00
|
|
|
|
2019-11-28 06:24:53 +08:00
|
|
|
/* Don't reopen a pack we already have. */
|
|
|
|
if (!hashmap_get(&data->r->objects->pack_map, &hent, pack_name)) {
|
2018-07-13 03:39:25 +08:00
|
|
|
p = add_packed_git(full_name, full_name_len, data->local);
|
|
|
|
if (p)
|
|
|
|
install_packed_git(data->r, p);
|
|
|
|
}
|
2019-11-28 06:24:53 +08:00
|
|
|
free(pack_name);
|
2018-07-13 03:39:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!report_garbage)
|
|
|
|
return;
|
|
|
|
|
2024-08-06 23:37:55 +08:00
|
|
|
if (!strcmp(file_name, "multi-pack-index") ||
|
|
|
|
!strcmp(file_name, "multi-pack-index.d"))
|
2018-08-21 00:51:59 +08:00
|
|
|
return;
|
pack-revindex: read multi-pack reverse indexes
Implement reading for multi-pack reverse indexes, as described in the
previous patch.
Note that these functions don't yet have any callers, and won't until
multi-pack reachability bitmaps are introduced in a later patch series.
In the meantime, this patch implements some of the infrastructure
necessary to support multi-pack bitmaps.
There are three new functions exposed by the revindex API:
- load_midx_revindex(): loads the reverse index corresponding to the
given multi-pack index.
- midx_to_pack_pos() and pack_pos_to_midx(): these convert between the
multi-pack index and pseudo-pack order.
load_midx_revindex() and pack_pos_to_midx() are both relatively
straightforward.
load_midx_revindex() needs a few functions to be exposed from the midx
API. One to get the checksum of a midx, and another to get the .rev's
filename. Similar to recent changes in the packed_git struct, three new
fields are added to the multi_pack_index struct: one to keep track of
the size, one to keep track of the mmap'd pointer, and another to point
past the header and at the reverse index's data.
pack_pos_to_midx() simply reads the corresponding entry out of the
table.
midx_to_pack_pos() is the trickiest, since it needs to find an object's
position in the psuedo-pack order, but that order can only be recovered
in the .rev file itself. This mapping can be implemented with a binary
search, but note that the thing we're binary searching over isn't an
array of values, but rather a permuted order of those values.
So, when comparing two items, it's helpful to keep in mind the
difference. Instead of a traditional binary search, where you are
comparing two things directly, here we're comparing a (pack, offset)
tuple with an index into the multi-pack index. That index describes
another (pack, offset) tuple, and it is _those_ two tuples that are
compared.
Signed-off-by: Taylor Blau <me@ttaylorr.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-03-30 23:04:26 +08:00
|
|
|
if (starts_with(file_name, "multi-pack-index") &&
|
2021-09-01 04:52:21 +08:00
|
|
|
(ends_with(file_name, ".bitmap") || ends_with(file_name, ".rev")))
|
pack-revindex: read multi-pack reverse indexes
Implement reading for multi-pack reverse indexes, as described in the
previous patch.
Note that these functions don't yet have any callers, and won't until
multi-pack reachability bitmaps are introduced in a later patch series.
In the meantime, this patch implements some of the infrastructure
necessary to support multi-pack bitmaps.
There are three new functions exposed by the revindex API:
- load_midx_revindex(): loads the reverse index corresponding to the
given multi-pack index.
- midx_to_pack_pos() and pack_pos_to_midx(): these convert between the
multi-pack index and pseudo-pack order.
load_midx_revindex() and pack_pos_to_midx() are both relatively
straightforward.
load_midx_revindex() needs a few functions to be exposed from the midx
API. One to get the checksum of a midx, and another to get the .rev's
filename. Similar to recent changes in the packed_git struct, three new
fields are added to the multi_pack_index struct: one to keep track of
the size, one to keep track of the mmap'd pointer, and another to point
past the header and at the reverse index's data.
pack_pos_to_midx() simply reads the corresponding entry out of the
table.
midx_to_pack_pos() is the trickiest, since it needs to find an object's
position in the psuedo-pack order, but that order can only be recovered
in the .rev file itself. This mapping can be implemented with a binary
search, but note that the thing we're binary searching over isn't an
array of values, but rather a permuted order of those values.
So, when comparing two items, it's helpful to keep in mind the
difference. Instead of a traditional binary search, where you are
comparing two things directly, here we're comparing a (pack, offset)
tuple with an index into the multi-pack index. That index describes
another (pack, offset) tuple, and it is _those_ two tuples that are
compared.
Signed-off-by: Taylor Blau <me@ttaylorr.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-03-30 23:04:26 +08:00
|
|
|
return;
|
2018-07-13 03:39:25 +08:00
|
|
|
if (ends_with(file_name, ".idx") ||
|
packfile: prepare for the existence of '*.rev' files
Specify the format of the on-disk reverse index 'pack-*.rev' file, as
well as prepare the code for the existence of such files.
The reverse index maps from pack relative positions (i.e., an index into
the array of object which is sorted by their offsets within the
packfile) to their position within the 'pack-*.idx' file. Today, this is
done by building up a list of (off_t, uint32_t) tuples for each object
(the off_t corresponding to that object's offset, and the uint32_t
corresponding to its position in the index). To convert between pack and
index position quickly, this array of tuples is radix sorted based on
its offset.
This has two major drawbacks:
First, the in-memory cost scales linearly with the number of objects in
a pack. Each 'struct revindex_entry' is sizeof(off_t) +
sizeof(uint32_t) + padding bytes for a total of 16.
To observe this, force Git to load the reverse index by, for e.g.,
running 'git cat-file --batch-check="%(objectsize:disk)"'. When asking
for a single object in a fresh clone of the kernel, Git needs to
allocate 120+ MB of memory in order to hold the reverse index in memory.
Second, the cost to sort also scales with the size of the pack.
Luckily, this is a linear function since 'load_pack_revindex()' uses a
radix sort, but this cost still must be paid once per pack per process.
As an example, it takes ~60x longer to print the _size_ of an object as
it does to print that entire object's _contents_:
Benchmark #1: git.compile cat-file --batch <obj
Time (mean ± σ): 3.4 ms ± 0.1 ms [User: 3.3 ms, System: 2.1 ms]
Range (min … max): 3.2 ms … 3.7 ms 726 runs
Benchmark #2: git.compile cat-file --batch-check="%(objectsize:disk)" <obj
Time (mean ± σ): 210.3 ms ± 8.9 ms [User: 188.2 ms, System: 23.2 ms]
Range (min … max): 193.7 ms … 224.4 ms 13 runs
Instead, avoid computing and sorting the revindex once per process by
writing it to a file when the pack itself is generated.
The format is relatively straightforward. It contains an array of
uint32_t's, the length of which is equal to the number of objects in the
pack. The ith entry in this table contains the index position of the
ith object in the pack, where "ith object in the pack" is determined by
pack offset.
One thing that the on-disk format does _not_ contain is the full (up to)
eight-byte offset corresponding to each object. This is something that
the in-memory revindex contains (it stores an off_t in 'struct
revindex_entry' along with the same uint32_t that the on-disk format
has). Omit it in the on-disk format, since knowing the index position
for some object is sufficient to get a constant-time lookup in the
pack-*.idx file to ask for an object's offset within the pack.
This trades off between the on-disk size of the 'pack-*.rev' file for
runtime to chase down the offset for some object. Even though the lookup
is constant time, the constant is heavier, since it can potentially
involve two pointer walks in v2 indexes (one to access the 4-byte offset
table, and potentially a second to access the double wide offset table).
Consider trying to map an object's pack offset to a relative position
within that pack. In a cold-cache scenario, more page faults occur while
switching between binary searching through the reverse index and
searching through the *.idx file for an object's offset. Sure enough,
with a cold cache (writing '3' into '/proc/sys/vm/drop_caches' after
'sync'ing), printing out the entire object's contents is still
marginally faster than printing its size:
Benchmark #1: git.compile cat-file --batch-check="%(objectsize:disk)" <obj >/dev/null
Time (mean ± σ): 22.6 ms ± 0.5 ms [User: 2.4 ms, System: 7.9 ms]
Range (min … max): 21.4 ms … 23.5 ms 41 runs
Benchmark #2: git.compile cat-file --batch <obj >/dev/null
Time (mean ± σ): 17.2 ms ± 0.7 ms [User: 2.8 ms, System: 5.5 ms]
Range (min … max): 15.6 ms … 18.2 ms 45 runs
(Numbers taken in the kernel after cheating and using the next patch to
generate a reverse index). There are a couple of approaches to improve
cold cache performance not pursued here:
- We could include the object offsets in the reverse index format.
Predictably, this does result in fewer page faults, but it triples
the size of the file, while simultaneously duplicating a ton of data
already available in the .idx file. (This was the original way I
implemented the format, and it did show
`--batch-check='%(objectsize:disk)'` winning out against `--batch`.)
On the other hand, this increase in size also results in a large
block-cache footprint, which could potentially hurt other workloads.
- We could store the mapping from pack to index position in more
cache-friendly way, like constructing a binary search tree from the
table and writing the values in breadth-first order. This would
result in much better locality, but the price you pay is trading
O(1) lookup in 'pack_pos_to_index()' for an O(log n) one (since you
can no longer directly index the table).
So, neither of these approaches are taken here. (Thankfully, the format
is versioned, so we are free to pursue these in the future.) But, cold
cache performance likely isn't interesting outside of one-off cases like
asking for the size of an object directly. In real-world usage, Git is
often performing many operations in the revindex (i.e., asking about
many objects rather than a single one).
The trade-off is worth it, since we will avoid the vast majority of the
cost of generating the revindex that the extra pointer chase will look
like noise in the following patch's benchmarks.
This patch describes the format and prepares callers (like in
pack-revindex.c) to be able to read *.rev files once they exist. An
implementation of the writer will appear in the next patch, and callers
will gradually begin to start using the writer in the patches that
follow after that.
Signed-off-by: Taylor Blau <me@ttaylorr.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-26 07:37:14 +08:00
|
|
|
ends_with(file_name, ".rev") ||
|
2018-07-13 03:39:25 +08:00
|
|
|
ends_with(file_name, ".pack") ||
|
|
|
|
ends_with(file_name, ".bitmap") ||
|
|
|
|
ends_with(file_name, ".keep") ||
|
2022-05-21 07:17:35 +08:00
|
|
|
ends_with(file_name, ".promisor") ||
|
|
|
|
ends_with(file_name, ".mtimes"))
|
2018-07-13 03:39:25 +08:00
|
|
|
string_list_append(data->garbage, full_name);
|
|
|
|
else
|
|
|
|
report_garbage(PACKDIR_FILE_GARBAGE, full_name);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prepare_packed_git_one(struct repository *r, char *objdir, int local)
|
|
|
|
{
|
|
|
|
struct prepare_pack_data data;
|
|
|
|
struct string_list garbage = STRING_LIST_INIT_DUP;
|
|
|
|
|
2018-07-13 03:39:38 +08:00
|
|
|
data.m = r->objects->multi_pack_index;
|
|
|
|
|
|
|
|
/* look for the multi-pack-index for this object directory */
|
|
|
|
while (data.m && strcmp(data.m->object_dir, objdir))
|
|
|
|
data.m = data.m->next;
|
|
|
|
|
2018-07-13 03:39:25 +08:00
|
|
|
data.r = r;
|
|
|
|
data.garbage = &garbage;
|
|
|
|
data.local = local;
|
|
|
|
|
|
|
|
for_each_file_in_pack_dir(objdir, prepare_pack, &data);
|
|
|
|
|
|
|
|
report_pack_garbage(data.garbage);
|
|
|
|
string_list_clear(data.garbage, 0);
|
|
|
|
}
|
|
|
|
|
2018-03-24 01:45:27 +08:00
|
|
|
static void prepare_packed_git(struct repository *r);
|
2017-08-19 06:20:26 +08:00
|
|
|
/*
|
|
|
|
* Give a fast, rough count of the number of objects in the repository. This
|
|
|
|
* ignores loose objects completely. If you have a lot of them, then either
|
|
|
|
* you should repack because your performance will be awful, or they are
|
|
|
|
* all unreachable objects about to be pruned, in which case they're not really
|
|
|
|
* interesting as a measure of repo size in the first place.
|
|
|
|
*/
|
2019-04-06 19:34:23 +08:00
|
|
|
unsigned long repo_approximate_object_count(struct repository *r)
|
2017-08-19 06:20:26 +08:00
|
|
|
{
|
2019-04-06 19:34:23 +08:00
|
|
|
if (!r->objects->approximate_object_count_valid) {
|
2018-03-24 01:21:02 +08:00
|
|
|
unsigned long count;
|
2018-07-13 03:39:37 +08:00
|
|
|
struct multi_pack_index *m;
|
2017-08-19 06:20:26 +08:00
|
|
|
struct packed_git *p;
|
|
|
|
|
2019-04-06 19:34:23 +08:00
|
|
|
prepare_packed_git(r);
|
2017-08-19 06:20:26 +08:00
|
|
|
count = 0;
|
2019-04-06 19:34:23 +08:00
|
|
|
for (m = get_multi_pack_index(r); m; m = m->next)
|
2018-07-13 03:39:37 +08:00
|
|
|
count += m->num_objects;
|
2019-04-06 19:34:23 +08:00
|
|
|
for (p = r->objects->packed_git; p; p = p->next) {
|
2017-08-19 06:20:26 +08:00
|
|
|
if (open_pack_index(p))
|
|
|
|
continue;
|
|
|
|
count += p->num_objects;
|
|
|
|
}
|
2019-04-06 19:34:23 +08:00
|
|
|
r->objects->approximate_object_count = count;
|
packfile: actually set approximate_object_count_valid
The approximate_object_count() function tries to compute the count only
once per process. But ever since it was introduced in 8e3f52d778
(find_unique_abbrev: move logic out of get_short_sha1(), 2016-10-03), we
failed to actually set the "valid" flag, meaning we'd compute it fresh
on every call.
This turns out not to be _too_ bad, because we're only iterating through
the packed_git list, and not making any system calls. But since it may
get called for every abbreviated hash we output, even this can add up if
you have many packs.
Here are before-and-after timings for a new perf test which just asks
rev-list to abbreviate each commit hash (the test repo is linux.git,
with commit-graphs):
Test origin HEAD
----------------------------------------------------------------------------
5303.3: rev-list (1) 28.91(28.46+0.44) 29.03(28.65+0.38) +0.4%
5303.4: abbrev-commit (1) 1.18(1.06+0.11) 1.17(1.02+0.14) -0.8%
5303.7: rev-list (50) 28.95(28.56+0.38) 29.50(29.17+0.32) +1.9%
5303.8: abbrev-commit (50) 3.67(3.56+0.10) 3.57(3.42+0.15) -2.7%
5303.11: rev-list (1000) 30.34(29.89+0.43) 30.82(30.35+0.46) +1.6%
5303.12: abbrev-commit (1000) 86.82(86.52+0.29) 77.82(77.59+0.22) -10.4%
5303.15: load 10,000 packs 0.08(0.02+0.05) 0.08(0.02+0.06) +0.0%
It doesn't help at all when we have 1 pack (5303.4), but we get a 10%
speedup when there are 1000 packs (5303.12). That's a modest speedup for
a case that's already slow and we'd hope to avoid in general (note how
slow it is even after, because we have to look in each of those packs
for abbreviations). But it's a one-line change that clearly matches the
original intent, so it seems worth doing.
The included perf test may also be useful for keeping an eye on any
regressions in the overall abbreviation code.
Reported-by: Rasmus Villemoes <rv@rasmusvillemoes.dk>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-09-18 00:47:43 +08:00
|
|
|
r->objects->approximate_object_count_valid = 1;
|
2017-08-19 06:20:26 +08:00
|
|
|
}
|
2019-04-06 19:34:23 +08:00
|
|
|
return r->objects->approximate_object_count;
|
2017-08-19 06:20:26 +08:00
|
|
|
}
|
|
|
|
|
2022-07-17 01:01:18 +08:00
|
|
|
DEFINE_LIST_SORT(static, sort_packs, struct packed_git, next);
|
2017-08-19 06:20:26 +08:00
|
|
|
|
2022-07-17 01:01:18 +08:00
|
|
|
static int sort_pack(const struct packed_git *a, const struct packed_git *b)
|
2017-08-19 06:20:26 +08:00
|
|
|
{
|
|
|
|
int st;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Local packs tend to contain objects specific to our
|
|
|
|
* variant of the project than remote ones. In addition,
|
|
|
|
* remote ones could be on a network mounted filesystem.
|
|
|
|
* Favor local ones for these reasons.
|
|
|
|
*/
|
|
|
|
st = a->pack_local - b->pack_local;
|
|
|
|
if (st)
|
|
|
|
return -st;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Younger packs tend to contain more recent objects,
|
|
|
|
* and more recent objects tend to get accessed more
|
|
|
|
* often.
|
|
|
|
*/
|
|
|
|
if (a->mtime < b->mtime)
|
|
|
|
return 1;
|
|
|
|
else if (a->mtime == b->mtime)
|
|
|
|
return 0;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-03-24 01:45:17 +08:00
|
|
|
static void rearrange_packed_git(struct repository *r)
|
2017-08-19 06:20:26 +08:00
|
|
|
{
|
2022-07-17 01:01:18 +08:00
|
|
|
sort_packs(&r->objects->packed_git, sort_pack);
|
2017-08-19 06:20:26 +08:00
|
|
|
}
|
|
|
|
|
2018-03-24 01:45:16 +08:00
|
|
|
static void prepare_packed_git_mru(struct repository *r)
|
2017-08-19 06:20:26 +08:00
|
|
|
{
|
|
|
|
struct packed_git *p;
|
|
|
|
|
2018-03-24 01:45:16 +08:00
|
|
|
INIT_LIST_HEAD(&r->objects->packed_git_mru);
|
2018-01-24 07:46:51 +08:00
|
|
|
|
2018-03-24 01:45:16 +08:00
|
|
|
for (p = r->objects->packed_git; p; p = p->next)
|
|
|
|
list_add_tail(&p->mru, &r->objects->packed_git_mru);
|
2017-08-19 06:20:26 +08:00
|
|
|
}
|
|
|
|
|
2018-03-24 01:45:27 +08:00
|
|
|
static void prepare_packed_git(struct repository *r)
|
2017-08-19 06:20:26 +08:00
|
|
|
{
|
2018-11-12 22:48:47 +08:00
|
|
|
struct object_directory *odb;
|
2017-08-19 06:20:26 +08:00
|
|
|
|
2018-03-24 01:45:23 +08:00
|
|
|
if (r->objects->packed_git_initialized)
|
2017-08-19 06:20:26 +08:00
|
|
|
return;
|
sha1-file: use an object_directory for the main object dir
Our handling of alternate object directories is needlessly different
from the main object directory. As a result, many places in the code
basically look like this:
do_something(r->objects->objdir);
for (odb = r->objects->alt_odb_list; odb; odb = odb->next)
do_something(odb->path);
That gets annoying when do_something() is non-trivial, and we've
resorted to gross hacks like creating fake alternates (see
find_short_object_filename()).
Instead, let's give each raw_object_store a unified list of
object_directory structs. The first will be the main store, and
everything after is an alternate. Very few callers even care about the
distinction, and can just loop over the whole list (and those who care
can just treat the first element differently).
A few observations:
- we don't need r->objects->objectdir anymore, and can just
mechanically convert that to r->objects->odb->path
- object_directory's path field needs to become a real pointer rather
than a FLEX_ARRAY, in order to fill it with expand_base_dir()
- we'll call prepare_alt_odb() earlier in many functions (i.e.,
outside of the loop). This may result in us calling it even when our
function would be satisfied looking only at the main odb.
But this doesn't matter in practice. It's not a very expensive
operation in the first place, and in the majority of cases it will
be a noop. We call it already (and cache its results) in
prepare_packed_git(), and we'll generally check packs before loose
objects. So essentially every program is going to call it
immediately once per program.
Arguably we should just prepare_alt_odb() immediately upon setting
up the repository's object directory, which would save us sprinkling
calls throughout the code base (and forgetting to do so has been a
source of subtle bugs in the past). But I've stopped short of that
here, since there are already a lot of other moving parts in this
patch.
- Most call sites just get shorter. The check_and_freshen() functions
are an exception, because they have entry points to handle local and
nonlocal directories separately.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-12 22:50:39 +08:00
|
|
|
|
2018-03-24 01:45:23 +08:00
|
|
|
prepare_alt_odb(r);
|
sha1-file: use an object_directory for the main object dir
Our handling of alternate object directories is needlessly different
from the main object directory. As a result, many places in the code
basically look like this:
do_something(r->objects->objdir);
for (odb = r->objects->alt_odb_list; odb; odb = odb->next)
do_something(odb->path);
That gets annoying when do_something() is non-trivial, and we've
resorted to gross hacks like creating fake alternates (see
find_short_object_filename()).
Instead, let's give each raw_object_store a unified list of
object_directory structs. The first will be the main store, and
everything after is an alternate. Very few callers even care about the
distinction, and can just loop over the whole list (and those who care
can just treat the first element differently).
A few observations:
- we don't need r->objects->objectdir anymore, and can just
mechanically convert that to r->objects->odb->path
- object_directory's path field needs to become a real pointer rather
than a FLEX_ARRAY, in order to fill it with expand_base_dir()
- we'll call prepare_alt_odb() earlier in many functions (i.e.,
outside of the loop). This may result in us calling it even when our
function would be satisfied looking only at the main odb.
But this doesn't matter in practice. It's not a very expensive
operation in the first place, and in the majority of cases it will
be a noop. We call it already (and cache its results) in
prepare_packed_git(), and we'll generally check packs before loose
objects. So essentially every program is going to call it
immediately once per program.
Arguably we should just prepare_alt_odb() immediately upon setting
up the repository's object directory, which would save us sprinkling
calls throughout the code base (and forgetting to do so has been a
source of subtle bugs in the past). But I've stopped short of that
here, since there are already a lot of other moving parts in this
patch.
- Most call sites just get shorter. The check_and_freshen() functions
are an exception, because they have entry points to handle local and
nonlocal directories separately.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-12 22:50:39 +08:00
|
|
|
for (odb = r->objects->odb; odb; odb = odb->next) {
|
|
|
|
int local = (odb == r->objects->odb);
|
|
|
|
prepare_multi_pack_index_one(r, odb->path, local);
|
|
|
|
prepare_packed_git_one(r, odb->path, local);
|
2018-07-13 03:39:33 +08:00
|
|
|
}
|
2018-03-24 01:45:23 +08:00
|
|
|
rearrange_packed_git(r);
|
2018-08-21 00:52:02 +08:00
|
|
|
|
2018-03-24 01:45:23 +08:00
|
|
|
prepare_packed_git_mru(r);
|
|
|
|
r->objects->packed_git_initialized = 1;
|
2017-08-19 06:20:26 +08:00
|
|
|
}
|
|
|
|
|
2018-03-24 01:45:24 +08:00
|
|
|
void reprepare_packed_git(struct repository *r)
|
2017-08-19 06:20:26 +08:00
|
|
|
{
|
2018-11-12 22:50:56 +08:00
|
|
|
struct object_directory *odb;
|
|
|
|
|
2020-01-16 10:39:57 +08:00
|
|
|
obj_read_lock();
|
2023-03-09 02:47:32 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Reprepare alt odbs, in case the alternates file was modified
|
|
|
|
* during the course of this process. This only _adds_ odbs to
|
|
|
|
* the linked list, so existing odbs will continue to exist for
|
|
|
|
* the lifetime of the process.
|
|
|
|
*/
|
|
|
|
r->objects->loaded_alternates = 0;
|
|
|
|
prepare_alt_odb(r);
|
|
|
|
|
2019-01-07 00:45:39 +08:00
|
|
|
for (odb = r->objects->odb; odb; odb = odb->next)
|
|
|
|
odb_clear_loose_cache(odb);
|
2018-11-12 22:50:56 +08:00
|
|
|
|
2018-03-24 01:45:24 +08:00
|
|
|
r->objects->approximate_object_count_valid = 0;
|
|
|
|
r->objects->packed_git_initialized = 0;
|
|
|
|
prepare_packed_git(r);
|
2020-01-16 10:39:57 +08:00
|
|
|
obj_read_unlock();
|
2017-08-19 06:20:26 +08:00
|
|
|
}
|
2017-08-19 06:20:27 +08:00
|
|
|
|
2018-03-24 01:20:59 +08:00
|
|
|
struct packed_git *get_packed_git(struct repository *r)
|
|
|
|
{
|
2018-03-24 01:45:27 +08:00
|
|
|
prepare_packed_git(r);
|
2018-03-24 01:20:59 +08:00
|
|
|
return r->objects->packed_git;
|
|
|
|
}
|
|
|
|
|
2018-07-13 03:39:35 +08:00
|
|
|
struct multi_pack_index *get_multi_pack_index(struct repository *r)
|
|
|
|
{
|
|
|
|
prepare_packed_git(r);
|
|
|
|
return r->objects->multi_pack_index;
|
|
|
|
}
|
|
|
|
|
midx: traverse the local MIDX first
When a repository has an alternate object directory configured, callers
can traverse through each alternate's MIDX by walking the '->next'
pointer.
But, when 'prepare_multi_pack_index_one()' loads multiple MIDXs, it
places the new ones at the front of this pointer chain, not at the end.
This can be confusing for callers such as 'git repack -ad', causing test
failures like in t7700.6 with 'GIT_TEST_MULTI_PACK_INDEX=1'.
The occurs when dropping a pack known to the local MIDX with alternates
configured that have their own MIDX. Since the alternate's MIDX is
returned via 'get_multi_pack_index()', 'midx_contains_pack()' returns
true (which is correct, since it traverses through the '->next' pointer
to find the MIDX in the chain that does contain the requested object).
But, we call 'clear_midx_file()' on 'the_repository', which drops the
MIDX at the path of the first MIDX in the chain, which (in the case of
t7700.6 is the one in the alternate).
This patch addresses that by:
- placing the local MIDX first in the chain when calling
'prepare_multi_pack_index_one()', and
- introducing a new 'get_local_multi_pack_index()', which explicitly
returns the repository-local MIDX, if any.
Don't impose an additional order on the MIDX's '->next' pointer beyond
that the first item in the chain must be local if one exists so that we
avoid a quadratic insertion.
Likewise, use 'get_local_multi_pack_index()' in
'remove_redundant_pack()' to fix the formerly broken t7700.6 when run
with 'GIT_TEST_MULTI_PACK_INDEX=1'.
Finally, note that the MIDX ordering invariant is only preserved by the
insertion order in 'prepare_packed_git()', which traverses through the
ODB's '->next' pointer, meaning we visit the local object store first.
This fragility makes this an undesirable long-term solution if more
callers are added, but it is acceptable for now since this is the only
caller.
Helped-by: Jeff King <peff@peff.net>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Taylor Blau <me@ttaylorr.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-08-29 04:22:13 +08:00
|
|
|
struct multi_pack_index *get_local_multi_pack_index(struct repository *r)
|
|
|
|
{
|
|
|
|
struct multi_pack_index *m = get_multi_pack_index(r);
|
|
|
|
|
|
|
|
/* no need to iterate; we always put the local one first (if any) */
|
|
|
|
if (m && m->local)
|
|
|
|
return m;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-08-21 00:52:02 +08:00
|
|
|
struct packed_git *get_all_packs(struct repository *r)
|
|
|
|
{
|
midx: add packs to packed_git linked list
The multi-pack-index allows searching for objects across multiple
packs using one object list. The original design gains many of
these performance benefits by keeping the packs in the
multi-pack-index out of the packed_git list.
Unfortunately, this has one major drawback. If the multi-pack-index
covers thousands of packs, and a command loads many of those packs,
then we can hit the limit for open file descriptors. The
close_one_pack() method is used to limit this resource, but it
only looks at the packed_git list, and uses an LRU cache to prevent
thrashing.
Instead of complicating this close_one_pack() logic to include
direct references to the multi-pack-index, simply add the packs
opened by the multi-pack-index to the packed_git list. This
immediately solves the file-descriptor limit problem, but requires
some extra steps to avoid performance issues or other problems:
1. Create a multi_pack_index bit in the packed_git struct that is
one if and only if the pack was loaded from a multi-pack-index.
2. Skip packs with the multi_pack_index bit when doing object
lookups and abbreviations. These algorithms already check the
multi-pack-index before the packed_git struct. This has a very
small performance hit, as we need to walk more packed_git
structs. This is acceptable, since these operations run binary
search on the other packs, so this walk-and-ignore logic is
very fast by comparison.
3. When closing a multi-pack-index file, do not close its packs,
as those packs will be closed using close_all_packs(). In some
cases, such as 'git repack', we run 'close_midx()' without also
closing the packs, so we need to un-set the multi_pack_index bit
in those packs. This is necessary, and caught by running
t6501-freshen-objects.sh with GIT_TEST_MULTI_PACK_INDEX=1.
To manually test this change, I inserted trace2 logging into
close_pack_fd() and set pack_max_fds to 10, then ran 'git rev-list
--all --objects' on a copy of the Git repo with 300+ pack-files and
a multi-pack-index. The logs verified the packs are closed as
we read them beyond the file descriptor limit.
Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-04-30 00:18:56 +08:00
|
|
|
struct multi_pack_index *m;
|
2018-08-21 00:52:02 +08:00
|
|
|
|
midx: add packs to packed_git linked list
The multi-pack-index allows searching for objects across multiple
packs using one object list. The original design gains many of
these performance benefits by keeping the packs in the
multi-pack-index out of the packed_git list.
Unfortunately, this has one major drawback. If the multi-pack-index
covers thousands of packs, and a command loads many of those packs,
then we can hit the limit for open file descriptors. The
close_one_pack() method is used to limit this resource, but it
only looks at the packed_git list, and uses an LRU cache to prevent
thrashing.
Instead of complicating this close_one_pack() logic to include
direct references to the multi-pack-index, simply add the packs
opened by the multi-pack-index to the packed_git list. This
immediately solves the file-descriptor limit problem, but requires
some extra steps to avoid performance issues or other problems:
1. Create a multi_pack_index bit in the packed_git struct that is
one if and only if the pack was loaded from a multi-pack-index.
2. Skip packs with the multi_pack_index bit when doing object
lookups and abbreviations. These algorithms already check the
multi-pack-index before the packed_git struct. This has a very
small performance hit, as we need to walk more packed_git
structs. This is acceptable, since these operations run binary
search on the other packs, so this walk-and-ignore logic is
very fast by comparison.
3. When closing a multi-pack-index file, do not close its packs,
as those packs will be closed using close_all_packs(). In some
cases, such as 'git repack', we run 'close_midx()' without also
closing the packs, so we need to un-set the multi_pack_index bit
in those packs. This is necessary, and caught by running
t6501-freshen-objects.sh with GIT_TEST_MULTI_PACK_INDEX=1.
To manually test this change, I inserted trace2 logging into
close_pack_fd() and set pack_max_fds to 10, then ran 'git rev-list
--all --objects' on a copy of the Git repo with 300+ pack-files and
a multi-pack-index. The logs verified the packs are closed as
we read them beyond the file descriptor limit.
Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-04-30 00:18:56 +08:00
|
|
|
prepare_packed_git(r);
|
|
|
|
for (m = r->objects->multi_pack_index; m; m = m->next) {
|
|
|
|
uint32_t i;
|
2024-08-06 23:37:55 +08:00
|
|
|
for (i = 0; i < m->num_packs + m->num_packs_in_base; i++)
|
midx: add packs to packed_git linked list
The multi-pack-index allows searching for objects across multiple
packs using one object list. The original design gains many of
these performance benefits by keeping the packs in the
multi-pack-index out of the packed_git list.
Unfortunately, this has one major drawback. If the multi-pack-index
covers thousands of packs, and a command loads many of those packs,
then we can hit the limit for open file descriptors. The
close_one_pack() method is used to limit this resource, but it
only looks at the packed_git list, and uses an LRU cache to prevent
thrashing.
Instead of complicating this close_one_pack() logic to include
direct references to the multi-pack-index, simply add the packs
opened by the multi-pack-index to the packed_git list. This
immediately solves the file-descriptor limit problem, but requires
some extra steps to avoid performance issues or other problems:
1. Create a multi_pack_index bit in the packed_git struct that is
one if and only if the pack was loaded from a multi-pack-index.
2. Skip packs with the multi_pack_index bit when doing object
lookups and abbreviations. These algorithms already check the
multi-pack-index before the packed_git struct. This has a very
small performance hit, as we need to walk more packed_git
structs. This is acceptable, since these operations run binary
search on the other packs, so this walk-and-ignore logic is
very fast by comparison.
3. When closing a multi-pack-index file, do not close its packs,
as those packs will be closed using close_all_packs(). In some
cases, such as 'git repack', we run 'close_midx()' without also
closing the packs, so we need to un-set the multi_pack_index bit
in those packs. This is necessary, and caught by running
t6501-freshen-objects.sh with GIT_TEST_MULTI_PACK_INDEX=1.
To manually test this change, I inserted trace2 logging into
close_pack_fd() and set pack_max_fds to 10, then ran 'git rev-list
--all --objects' on a copy of the Git repo with 300+ pack-files and
a multi-pack-index. The logs verified the packs are closed as
we read them beyond the file descriptor limit.
Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-04-30 00:18:56 +08:00
|
|
|
prepare_midx_pack(r, m, i);
|
2018-08-21 00:52:02 +08:00
|
|
|
}
|
|
|
|
|
midx: add packs to packed_git linked list
The multi-pack-index allows searching for objects across multiple
packs using one object list. The original design gains many of
these performance benefits by keeping the packs in the
multi-pack-index out of the packed_git list.
Unfortunately, this has one major drawback. If the multi-pack-index
covers thousands of packs, and a command loads many of those packs,
then we can hit the limit for open file descriptors. The
close_one_pack() method is used to limit this resource, but it
only looks at the packed_git list, and uses an LRU cache to prevent
thrashing.
Instead of complicating this close_one_pack() logic to include
direct references to the multi-pack-index, simply add the packs
opened by the multi-pack-index to the packed_git list. This
immediately solves the file-descriptor limit problem, but requires
some extra steps to avoid performance issues or other problems:
1. Create a multi_pack_index bit in the packed_git struct that is
one if and only if the pack was loaded from a multi-pack-index.
2. Skip packs with the multi_pack_index bit when doing object
lookups and abbreviations. These algorithms already check the
multi-pack-index before the packed_git struct. This has a very
small performance hit, as we need to walk more packed_git
structs. This is acceptable, since these operations run binary
search on the other packs, so this walk-and-ignore logic is
very fast by comparison.
3. When closing a multi-pack-index file, do not close its packs,
as those packs will be closed using close_all_packs(). In some
cases, such as 'git repack', we run 'close_midx()' without also
closing the packs, so we need to un-set the multi_pack_index bit
in those packs. This is necessary, and caught by running
t6501-freshen-objects.sh with GIT_TEST_MULTI_PACK_INDEX=1.
To manually test this change, I inserted trace2 logging into
close_pack_fd() and set pack_max_fds to 10, then ran 'git rev-list
--all --objects' on a copy of the Git repo with 300+ pack-files and
a multi-pack-index. The logs verified the packs are closed as
we read them beyond the file descriptor limit.
Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-04-30 00:18:56 +08:00
|
|
|
return r->objects->packed_git;
|
2018-08-21 00:52:02 +08:00
|
|
|
}
|
|
|
|
|
2018-03-24 01:20:59 +08:00
|
|
|
struct list_head *get_packed_git_mru(struct repository *r)
|
|
|
|
{
|
2018-03-24 01:45:27 +08:00
|
|
|
prepare_packed_git(r);
|
2018-03-24 01:20:59 +08:00
|
|
|
return &r->objects->packed_git_mru;
|
|
|
|
}
|
|
|
|
|
2017-08-19 06:20:27 +08:00
|
|
|
unsigned long unpack_object_header_buffer(const unsigned char *buf,
|
|
|
|
unsigned long len, enum object_type *type, unsigned long *sizep)
|
|
|
|
{
|
|
|
|
unsigned shift;
|
2021-11-02 23:46:10 +08:00
|
|
|
size_t size, c;
|
2017-08-19 06:20:27 +08:00
|
|
|
unsigned long used = 0;
|
|
|
|
|
|
|
|
c = buf[used++];
|
|
|
|
*type = (c >> 4) & 7;
|
|
|
|
size = c & 15;
|
|
|
|
shift = 4;
|
|
|
|
while (c & 0x80) {
|
2022-01-13 04:11:42 +08:00
|
|
|
if (len <= used || (bitsizeof(long) - 7) < shift) {
|
2017-08-19 06:20:27 +08:00
|
|
|
error("bad object header");
|
|
|
|
size = used = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
c = buf[used++];
|
2021-11-02 23:46:10 +08:00
|
|
|
size = st_add(size, st_left_shift(c & 0x7f, shift));
|
2017-08-19 06:20:27 +08:00
|
|
|
shift += 7;
|
|
|
|
}
|
2021-11-02 23:46:10 +08:00
|
|
|
*sizep = cast_size_t_to_ulong(size);
|
2017-08-19 06:20:27 +08:00
|
|
|
return used;
|
|
|
|
}
|
2017-08-19 06:20:28 +08:00
|
|
|
|
|
|
|
unsigned long get_size_from_delta(struct packed_git *p,
|
|
|
|
struct pack_window **w_curs,
|
|
|
|
off_t curpos)
|
|
|
|
{
|
|
|
|
const unsigned char *data;
|
|
|
|
unsigned char delta_head[20], *in;
|
|
|
|
git_zstream stream;
|
|
|
|
int st;
|
|
|
|
|
|
|
|
memset(&stream, 0, sizeof(stream));
|
|
|
|
stream.next_out = delta_head;
|
|
|
|
stream.avail_out = sizeof(delta_head);
|
|
|
|
|
|
|
|
git_inflate_init(&stream);
|
|
|
|
do {
|
|
|
|
in = use_pack(p, w_curs, curpos, &stream.avail_in);
|
|
|
|
stream.next_in = in;
|
object-store: allow threaded access to object reading
Allow object reading to be performed by multiple threads protecting it
with an internal lock, the obj_read_mutex. The lock usage can be toggled
with enable_obj_read_lock() and disable_obj_read_lock(). Currently, the
functions which can be safely called in parallel are:
read_object_file_extended(), repo_read_object_file(),
read_object_file(), read_object_with_reference(), read_object(),
oid_object_info() and oid_object_info_extended(). It's also possible
to use obj_read_lock() and obj_read_unlock() to protect other sections
that cannot execute in parallel with object reading.
Probably there are many spots in the functions listed above that could
be executed unlocked (and thus, in parallel). But, for now, we are most
interested in allowing parallel access to zlib inflation. This is one of
the sections where object reading spends most of the time in (e.g. up to
one-third of git-grep's execution time in the chromium repo corresponds
to inflation) and it's already thread-safe. So, to take advantage of
that, the obj_read_mutex is released when calling git_inflate() and
re-acquired right after, for every calling spot in
oid_object_info_extended()'s call chain. We may refine this lock to also
exploit other possible parallel spots in the future, but for now,
threaded zlib inflation should already give great speedups for threaded
object reading callers.
Note that add_delta_base_cache() was also modified to skip adding
already present entries to the cache. This wasn't possible before, but
it would be now, with the parallel inflation. Take for example the
following situation, where two threads - A and B - are executing the
code at unpack_entry():
1. Thread A is performing the decompression of a base O (which is not
yet in the cache) at PHASE II. Thread B is simultaneously trying to
unpack O, but just starting at PHASE I.
2. Since O is not yet in the cache, B will go to PHASE II to also
perform the decompression.
3. When they finish decompressing, one of them will get the object
reading mutex and go to PHASE III while the other waits for the
mutex. Let’s say A got the mutex first.
4. Thread A will add O to the cache, go throughout the rest of PHASE III
and return.
5. Thread B gets the mutex, also add O to the cache (if the check wasn't
there) and returns.
Finally, it is also important to highlight that the object reading lock
can only ensure thread-safety in the mentioned functions thanks to two
complementary mechanisms: the use of 'struct raw_object_store's
replace_mutex, which guards sections in the object reading machinery
that would otherwise be thread-unsafe; and the 'struct pack_window's
inuse_cnt, which protects window reading operations (such as the one
performed during the inflation of a packed object), allowing them to
execute without the acquisition of the obj_read_mutex.
Signed-off-by: Matheus Tavares <matheus.bernardino@usp.br>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-01-16 10:39:53 +08:00
|
|
|
/*
|
|
|
|
* Note: the window section returned by use_pack() must be
|
|
|
|
* available throughout git_inflate()'s unlocked execution. To
|
|
|
|
* ensure no other thread will modify the window in the
|
|
|
|
* meantime, we rely on the packed_window.inuse_cnt. This
|
|
|
|
* counter is incremented before window reading and checked
|
|
|
|
* before window disposal.
|
|
|
|
*
|
|
|
|
* Other worrying sections could be the call to close_pack_fd(),
|
|
|
|
* which can close packs even with in-use windows, and to
|
|
|
|
* reprepare_packed_git(). Regarding the former, mmap doc says:
|
|
|
|
* "closing the file descriptor does not unmap the region". And
|
|
|
|
* for the latter, it won't re-open already available packs.
|
|
|
|
*/
|
|
|
|
obj_read_unlock();
|
2017-08-19 06:20:28 +08:00
|
|
|
st = git_inflate(&stream, Z_FINISH);
|
object-store: allow threaded access to object reading
Allow object reading to be performed by multiple threads protecting it
with an internal lock, the obj_read_mutex. The lock usage can be toggled
with enable_obj_read_lock() and disable_obj_read_lock(). Currently, the
functions which can be safely called in parallel are:
read_object_file_extended(), repo_read_object_file(),
read_object_file(), read_object_with_reference(), read_object(),
oid_object_info() and oid_object_info_extended(). It's also possible
to use obj_read_lock() and obj_read_unlock() to protect other sections
that cannot execute in parallel with object reading.
Probably there are many spots in the functions listed above that could
be executed unlocked (and thus, in parallel). But, for now, we are most
interested in allowing parallel access to zlib inflation. This is one of
the sections where object reading spends most of the time in (e.g. up to
one-third of git-grep's execution time in the chromium repo corresponds
to inflation) and it's already thread-safe. So, to take advantage of
that, the obj_read_mutex is released when calling git_inflate() and
re-acquired right after, for every calling spot in
oid_object_info_extended()'s call chain. We may refine this lock to also
exploit other possible parallel spots in the future, but for now,
threaded zlib inflation should already give great speedups for threaded
object reading callers.
Note that add_delta_base_cache() was also modified to skip adding
already present entries to the cache. This wasn't possible before, but
it would be now, with the parallel inflation. Take for example the
following situation, where two threads - A and B - are executing the
code at unpack_entry():
1. Thread A is performing the decompression of a base O (which is not
yet in the cache) at PHASE II. Thread B is simultaneously trying to
unpack O, but just starting at PHASE I.
2. Since O is not yet in the cache, B will go to PHASE II to also
perform the decompression.
3. When they finish decompressing, one of them will get the object
reading mutex and go to PHASE III while the other waits for the
mutex. Let’s say A got the mutex first.
4. Thread A will add O to the cache, go throughout the rest of PHASE III
and return.
5. Thread B gets the mutex, also add O to the cache (if the check wasn't
there) and returns.
Finally, it is also important to highlight that the object reading lock
can only ensure thread-safety in the mentioned functions thanks to two
complementary mechanisms: the use of 'struct raw_object_store's
replace_mutex, which guards sections in the object reading machinery
that would otherwise be thread-unsafe; and the 'struct pack_window's
inuse_cnt, which protects window reading operations (such as the one
performed during the inflation of a packed object), allowing them to
execute without the acquisition of the obj_read_mutex.
Signed-off-by: Matheus Tavares <matheus.bernardino@usp.br>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-01-16 10:39:53 +08:00
|
|
|
obj_read_lock();
|
2017-08-19 06:20:28 +08:00
|
|
|
curpos += stream.next_in - in;
|
|
|
|
} while ((st == Z_OK || st == Z_BUF_ERROR) &&
|
|
|
|
stream.total_out < sizeof(delta_head));
|
|
|
|
git_inflate_end(&stream);
|
|
|
|
if ((st != Z_STREAM_END) && stream.total_out != sizeof(delta_head)) {
|
|
|
|
error("delta data unpack-initial failed");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Examine the initial part of the delta to figure out
|
|
|
|
* the result size.
|
|
|
|
*/
|
|
|
|
data = delta_head;
|
|
|
|
|
|
|
|
/* ignore base size */
|
|
|
|
get_delta_hdr_size(&data, delta_head+sizeof(delta_head));
|
|
|
|
|
|
|
|
/* Read the result size */
|
|
|
|
return get_delta_hdr_size(&data, delta_head+sizeof(delta_head));
|
|
|
|
}
|
2017-08-19 06:20:29 +08:00
|
|
|
|
|
|
|
int unpack_object_header(struct packed_git *p,
|
|
|
|
struct pack_window **w_curs,
|
|
|
|
off_t *curpos,
|
|
|
|
unsigned long *sizep)
|
|
|
|
{
|
|
|
|
unsigned char *base;
|
|
|
|
unsigned long left;
|
|
|
|
unsigned long used;
|
|
|
|
enum object_type type;
|
|
|
|
|
|
|
|
/* use_pack() assures us we have [base, base + 20) available
|
|
|
|
* as a range that we can look at. (Its actually the hash
|
|
|
|
* size that is assured.) With our object header encoding
|
|
|
|
* the maximum deflated object size is 2^137, which is just
|
|
|
|
* insane, so we know won't exceed what we have been given.
|
|
|
|
*/
|
|
|
|
base = use_pack(p, w_curs, *curpos, &left);
|
|
|
|
used = unpack_object_header_buffer(base, left, &type, sizep);
|
|
|
|
if (!used) {
|
|
|
|
type = OBJ_BAD;
|
|
|
|
} else
|
|
|
|
*curpos += used;
|
|
|
|
|
|
|
|
return type;
|
|
|
|
}
|
2017-08-19 06:20:30 +08:00
|
|
|
|
2021-09-12 04:40:33 +08:00
|
|
|
void mark_bad_packed_object(struct packed_git *p, const struct object_id *oid)
|
2017-08-19 06:20:30 +08:00
|
|
|
{
|
2021-09-12 04:43:26 +08:00
|
|
|
oidset_insert(&p->bad_objects, oid);
|
2017-08-19 06:20:30 +08:00
|
|
|
}
|
|
|
|
|
2018-10-17 07:35:33 +08:00
|
|
|
const struct packed_git *has_packed_and_bad(struct repository *r,
|
2021-09-12 04:42:20 +08:00
|
|
|
const struct object_id *oid)
|
2017-08-19 06:20:30 +08:00
|
|
|
{
|
|
|
|
struct packed_git *p;
|
|
|
|
|
2018-10-17 07:35:33 +08:00
|
|
|
for (p = r->objects->packed_git; p; p = p->next)
|
2021-09-12 04:43:26 +08:00
|
|
|
if (oidset_contains(&p->bad_objects, oid))
|
|
|
|
return p;
|
2017-08-19 06:20:30 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-09-13 21:02:18 +08:00
|
|
|
off_t get_delta_base(struct packed_git *p,
|
|
|
|
struct pack_window **w_curs,
|
|
|
|
off_t *curpos,
|
|
|
|
enum object_type type,
|
|
|
|
off_t delta_obj_offset)
|
2017-08-19 06:20:30 +08:00
|
|
|
{
|
|
|
|
unsigned char *base_info = use_pack(p, w_curs, *curpos, NULL);
|
|
|
|
off_t base_offset;
|
|
|
|
|
|
|
|
/* use_pack() assured us we have [base_info, base_info + 20)
|
|
|
|
* as a range that we can look at without walking off the
|
|
|
|
* end of the mapped window. Its actually the hash size
|
|
|
|
* that is assured. An OFS_DELTA longer than the hash size
|
|
|
|
* is stupid, as then a REF_DELTA would be smaller to store.
|
|
|
|
*/
|
|
|
|
if (type == OBJ_OFS_DELTA) {
|
|
|
|
unsigned used = 0;
|
|
|
|
unsigned char c = base_info[used++];
|
|
|
|
base_offset = c & 127;
|
|
|
|
while (c & 128) {
|
|
|
|
base_offset += 1;
|
|
|
|
if (!base_offset || MSB(base_offset, 7))
|
|
|
|
return 0; /* overflow */
|
|
|
|
c = base_info[used++];
|
|
|
|
base_offset = (base_offset << 7) + (c & 127);
|
|
|
|
}
|
|
|
|
base_offset = delta_obj_offset - base_offset;
|
|
|
|
if (base_offset <= 0 || base_offset >= delta_obj_offset)
|
|
|
|
return 0; /* out of bound */
|
|
|
|
*curpos += used;
|
|
|
|
} else if (type == OBJ_REF_DELTA) {
|
|
|
|
/* The base entry _must_ be in the same pack */
|
2024-10-25 15:06:06 +08:00
|
|
|
struct object_id oid;
|
packfile: use oidread() instead of hashcpy() to fill object_id
When chasing a REF_DELTA, we need to pull the raw hash bytes out of the
mmap'd packfile into an object_id struct. We do that with a raw
hashcpy() of the appropriate length (that happens directly now, though
before the previous commit it happened inside find_pack_entry_one(),
also using a hashcpy).
But I think this creates a potentially dangerous situation due to
d4d364b2c7 (hash: convert `oidcmp()` and `oideq()` to compare whole
hash, 2024-06-14). When using sha1, we'll have uninitialized bytes in
the latter part of the object_id.hash buffer, which could fool oideq(),
etc.
We should use oidread() instead, which correctly zero-pads the extra
bytes, as of c98d762ed9 (global: ensure that object IDs are always
padded, 2024-06-14).
As far as I can see, this has not been a problem in practice because the
object_id we feed to find_pack_entry_one() is never used with oideq(),
etc. It is being compared to the bytes mmap'd from a pack idx file,
which of course do not have the extra padding bytes themselves. So
there's no bug here, but this just puzzled me while looking at the code.
We should do the more obviously safe thing, both for future-proofing and
to avoid confusing readers.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Taylor Blau <me@ttaylorr.com>
2024-10-25 15:08:10 +08:00
|
|
|
oidread(&oid, base_info, the_repository->hash_algo);
|
2024-10-25 15:06:06 +08:00
|
|
|
base_offset = find_pack_entry_one(&oid, p);
|
2018-05-02 08:25:36 +08:00
|
|
|
*curpos += the_hash_algo->rawsz;
|
2017-08-19 06:20:30 +08:00
|
|
|
} else
|
|
|
|
die("I am totally screwed");
|
|
|
|
return base_offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Like get_delta_base above, but we return the sha1 instead of the pack
|
|
|
|
* offset. This means it is cheaper for REF deltas (we do not have to do
|
|
|
|
* the final object lookup), but more expensive for OFS deltas (we
|
|
|
|
* have to load the revidx to convert the offset back into a sha1).
|
|
|
|
*/
|
2020-02-24 12:37:31 +08:00
|
|
|
static int get_delta_base_oid(struct packed_git *p,
|
|
|
|
struct pack_window **w_curs,
|
|
|
|
off_t curpos,
|
|
|
|
struct object_id *oid,
|
|
|
|
enum object_type type,
|
|
|
|
off_t delta_obj_offset)
|
2017-08-19 06:20:30 +08:00
|
|
|
{
|
|
|
|
if (type == OBJ_REF_DELTA) {
|
|
|
|
unsigned char *base = use_pack(p, w_curs, curpos, NULL);
|
2024-06-14 14:49:54 +08:00
|
|
|
oidread(oid, base, the_repository->hash_algo);
|
2020-02-24 12:37:31 +08:00
|
|
|
return 0;
|
2017-08-19 06:20:30 +08:00
|
|
|
} else if (type == OBJ_OFS_DELTA) {
|
2021-01-14 06:24:32 +08:00
|
|
|
uint32_t base_pos;
|
2017-08-19 06:20:30 +08:00
|
|
|
off_t base_offset = get_delta_base(p, w_curs, &curpos,
|
|
|
|
type, delta_obj_offset);
|
|
|
|
|
|
|
|
if (!base_offset)
|
2020-02-24 12:37:31 +08:00
|
|
|
return -1;
|
2017-08-19 06:20:30 +08:00
|
|
|
|
2021-01-14 06:24:32 +08:00
|
|
|
if (offset_to_pack_pos(p, base_offset, &base_pos) < 0)
|
2020-02-24 12:37:31 +08:00
|
|
|
return -1;
|
2017-08-19 06:20:30 +08:00
|
|
|
|
2021-01-14 06:24:32 +08:00
|
|
|
return nth_packed_object_id(oid, p,
|
|
|
|
pack_pos_to_index(p, base_pos));
|
2017-08-19 06:20:30 +08:00
|
|
|
} else
|
2020-02-24 12:37:31 +08:00
|
|
|
return -1;
|
2017-08-19 06:20:30 +08:00
|
|
|
}
|
|
|
|
|
2018-04-26 02:21:06 +08:00
|
|
|
static int retry_bad_packed_offset(struct repository *r,
|
|
|
|
struct packed_git *p,
|
|
|
|
off_t obj_offset)
|
2017-08-19 06:20:30 +08:00
|
|
|
{
|
|
|
|
int type;
|
2021-01-14 06:24:36 +08:00
|
|
|
uint32_t pos;
|
2018-03-12 10:27:43 +08:00
|
|
|
struct object_id oid;
|
2021-01-14 06:24:36 +08:00
|
|
|
if (offset_to_pack_pos(p, obj_offset, &pos) < 0)
|
2017-08-19 06:20:30 +08:00
|
|
|
return OBJ_BAD;
|
2021-01-14 06:24:36 +08:00
|
|
|
nth_packed_object_id(&oid, p, pack_pos_to_index(p, pos));
|
2021-09-12 04:40:33 +08:00
|
|
|
mark_bad_packed_object(p, &oid);
|
2018-04-26 02:21:06 +08:00
|
|
|
type = oid_object_info(r, &oid, NULL);
|
2017-08-19 06:20:30 +08:00
|
|
|
if (type <= OBJ_NONE)
|
|
|
|
return OBJ_BAD;
|
|
|
|
return type;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define POI_STACK_PREALLOC 64
|
|
|
|
|
2018-04-26 02:21:06 +08:00
|
|
|
static enum object_type packed_to_object_type(struct repository *r,
|
|
|
|
struct packed_git *p,
|
2017-08-19 06:20:30 +08:00
|
|
|
off_t obj_offset,
|
|
|
|
enum object_type type,
|
|
|
|
struct pack_window **w_curs,
|
|
|
|
off_t curpos)
|
|
|
|
{
|
|
|
|
off_t small_poi_stack[POI_STACK_PREALLOC];
|
|
|
|
off_t *poi_stack = small_poi_stack;
|
|
|
|
int poi_stack_nr = 0, poi_stack_alloc = POI_STACK_PREALLOC;
|
|
|
|
|
|
|
|
while (type == OBJ_OFS_DELTA || type == OBJ_REF_DELTA) {
|
|
|
|
off_t base_offset;
|
|
|
|
unsigned long size;
|
|
|
|
/* Push the object we're going to leave behind */
|
|
|
|
if (poi_stack_nr >= poi_stack_alloc && poi_stack == small_poi_stack) {
|
|
|
|
poi_stack_alloc = alloc_nr(poi_stack_nr);
|
|
|
|
ALLOC_ARRAY(poi_stack, poi_stack_alloc);
|
2019-06-16 02:36:35 +08:00
|
|
|
COPY_ARRAY(poi_stack, small_poi_stack, poi_stack_nr);
|
2017-08-19 06:20:30 +08:00
|
|
|
} else {
|
|
|
|
ALLOC_GROW(poi_stack, poi_stack_nr+1, poi_stack_alloc);
|
|
|
|
}
|
|
|
|
poi_stack[poi_stack_nr++] = obj_offset;
|
|
|
|
/* If parsing the base offset fails, just unwind */
|
|
|
|
base_offset = get_delta_base(p, w_curs, &curpos, type, obj_offset);
|
|
|
|
if (!base_offset)
|
|
|
|
goto unwind;
|
|
|
|
curpos = obj_offset = base_offset;
|
|
|
|
type = unpack_object_header(p, w_curs, &curpos, &size);
|
|
|
|
if (type <= OBJ_NONE) {
|
|
|
|
/* If getting the base itself fails, we first
|
|
|
|
* retry the base, otherwise unwind */
|
2018-04-26 02:21:06 +08:00
|
|
|
type = retry_bad_packed_offset(r, p, base_offset);
|
2017-08-19 06:20:30 +08:00
|
|
|
if (type > OBJ_NONE)
|
|
|
|
goto out;
|
|
|
|
goto unwind;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case OBJ_BAD:
|
|
|
|
case OBJ_COMMIT:
|
|
|
|
case OBJ_TREE:
|
|
|
|
case OBJ_BLOB:
|
|
|
|
case OBJ_TAG:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
error("unknown object type %i at offset %"PRIuMAX" in %s",
|
|
|
|
type, (uintmax_t)obj_offset, p->pack_name);
|
|
|
|
type = OBJ_BAD;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (poi_stack != small_poi_stack)
|
|
|
|
free(poi_stack);
|
|
|
|
return type;
|
|
|
|
|
|
|
|
unwind:
|
|
|
|
while (poi_stack_nr) {
|
|
|
|
obj_offset = poi_stack[--poi_stack_nr];
|
2018-04-26 02:21:06 +08:00
|
|
|
type = retry_bad_packed_offset(r, p, obj_offset);
|
2017-08-19 06:20:30 +08:00
|
|
|
if (type > OBJ_NONE)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
type = OBJ_BAD;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct hashmap delta_base_cache;
|
|
|
|
static size_t delta_base_cached;
|
|
|
|
|
|
|
|
static LIST_HEAD(delta_base_cache_lru);
|
|
|
|
|
|
|
|
struct delta_base_cache_key {
|
|
|
|
struct packed_git *p;
|
|
|
|
off_t base_offset;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct delta_base_cache_entry {
|
2019-10-07 07:30:26 +08:00
|
|
|
struct hashmap_entry ent;
|
2017-08-19 06:20:30 +08:00
|
|
|
struct delta_base_cache_key key;
|
|
|
|
struct list_head lru;
|
|
|
|
void *data;
|
|
|
|
unsigned long size;
|
|
|
|
enum object_type type;
|
|
|
|
};
|
|
|
|
|
|
|
|
static unsigned int pack_entry_hash(struct packed_git *p, off_t base_offset)
|
|
|
|
{
|
|
|
|
unsigned int hash;
|
|
|
|
|
|
|
|
hash = (unsigned int)(intptr_t)p + (unsigned int)base_offset;
|
|
|
|
hash += (hash >> 8) + (hash >> 16);
|
|
|
|
return hash;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct delta_base_cache_entry *
|
|
|
|
get_delta_base_cache_entry(struct packed_git *p, off_t base_offset)
|
|
|
|
{
|
2019-10-07 07:30:36 +08:00
|
|
|
struct hashmap_entry entry, *e;
|
2017-08-19 06:20:30 +08:00
|
|
|
struct delta_base_cache_key key;
|
|
|
|
|
|
|
|
if (!delta_base_cache.cmpfn)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
hashmap_entry_init(&entry, pack_entry_hash(p, base_offset));
|
|
|
|
key.p = p;
|
|
|
|
key.base_offset = base_offset;
|
2019-10-07 07:30:36 +08:00
|
|
|
e = hashmap_get(&delta_base_cache, &entry, &key);
|
|
|
|
return e ? container_of(e, struct delta_base_cache_entry, ent) : NULL;
|
2017-08-19 06:20:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int delta_base_cache_key_eq(const struct delta_base_cache_key *a,
|
|
|
|
const struct delta_base_cache_key *b)
|
|
|
|
{
|
|
|
|
return a->p == b->p && a->base_offset == b->base_offset;
|
|
|
|
}
|
|
|
|
|
2022-08-26 01:09:48 +08:00
|
|
|
static int delta_base_cache_hash_cmp(const void *cmp_data UNUSED,
|
2019-10-07 07:30:37 +08:00
|
|
|
const struct hashmap_entry *va,
|
|
|
|
const struct hashmap_entry *vb,
|
2017-08-19 06:20:30 +08:00
|
|
|
const void *vkey)
|
|
|
|
{
|
2019-10-07 07:30:37 +08:00
|
|
|
const struct delta_base_cache_entry *a, *b;
|
2017-08-19 06:20:30 +08:00
|
|
|
const struct delta_base_cache_key *key = vkey;
|
2019-10-07 07:30:37 +08:00
|
|
|
|
|
|
|
a = container_of(va, const struct delta_base_cache_entry, ent);
|
|
|
|
b = container_of(vb, const struct delta_base_cache_entry, ent);
|
|
|
|
|
2017-08-19 06:20:30 +08:00
|
|
|
if (key)
|
|
|
|
return !delta_base_cache_key_eq(&a->key, key);
|
|
|
|
else
|
|
|
|
return !delta_base_cache_key_eq(&a->key, &b->key);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int in_delta_base_cache(struct packed_git *p, off_t base_offset)
|
|
|
|
{
|
|
|
|
return !!get_delta_base_cache_entry(p, base_offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove the entry from the cache, but do _not_ free the associated
|
|
|
|
* entry data. The caller takes ownership of the "data" buffer, and
|
|
|
|
* should copy out any fields it wants before detaching.
|
|
|
|
*/
|
|
|
|
static void detach_delta_base_cache_entry(struct delta_base_cache_entry *ent)
|
|
|
|
{
|
2019-10-07 07:30:31 +08:00
|
|
|
hashmap_remove(&delta_base_cache, &ent->ent, &ent->key);
|
2017-08-19 06:20:30 +08:00
|
|
|
list_del(&ent->lru);
|
|
|
|
delta_base_cached -= ent->size;
|
|
|
|
free(ent);
|
|
|
|
}
|
|
|
|
|
2018-04-26 02:21:06 +08:00
|
|
|
static void *cache_or_unpack_entry(struct repository *r, struct packed_git *p,
|
|
|
|
off_t base_offset, unsigned long *base_size,
|
|
|
|
enum object_type *type)
|
2017-08-19 06:20:30 +08:00
|
|
|
{
|
|
|
|
struct delta_base_cache_entry *ent;
|
|
|
|
|
|
|
|
ent = get_delta_base_cache_entry(p, base_offset);
|
|
|
|
if (!ent)
|
2018-04-26 02:21:06 +08:00
|
|
|
return unpack_entry(r, p, base_offset, type, base_size);
|
2017-08-19 06:20:30 +08:00
|
|
|
|
|
|
|
if (type)
|
|
|
|
*type = ent->type;
|
|
|
|
if (base_size)
|
|
|
|
*base_size = ent->size;
|
|
|
|
return xmemdupz(ent->data, ent->size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void release_delta_base_cache(struct delta_base_cache_entry *ent)
|
|
|
|
{
|
|
|
|
free(ent->data);
|
|
|
|
detach_delta_base_cache_entry(ent);
|
|
|
|
}
|
|
|
|
|
|
|
|
void clear_delta_base_cache(void)
|
|
|
|
{
|
|
|
|
struct list_head *lru, *tmp;
|
|
|
|
list_for_each_safe(lru, tmp, &delta_base_cache_lru) {
|
|
|
|
struct delta_base_cache_entry *entry =
|
|
|
|
list_entry(lru, struct delta_base_cache_entry, lru);
|
|
|
|
release_delta_base_cache(entry);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void add_delta_base_cache(struct packed_git *p, off_t base_offset,
|
|
|
|
void *base, unsigned long base_size, enum object_type type)
|
|
|
|
{
|
2020-09-29 08:01:53 +08:00
|
|
|
struct delta_base_cache_entry *ent;
|
2017-08-19 06:20:30 +08:00
|
|
|
struct list_head *lru, *tmp;
|
|
|
|
|
object-store: allow threaded access to object reading
Allow object reading to be performed by multiple threads protecting it
with an internal lock, the obj_read_mutex. The lock usage can be toggled
with enable_obj_read_lock() and disable_obj_read_lock(). Currently, the
functions which can be safely called in parallel are:
read_object_file_extended(), repo_read_object_file(),
read_object_file(), read_object_with_reference(), read_object(),
oid_object_info() and oid_object_info_extended(). It's also possible
to use obj_read_lock() and obj_read_unlock() to protect other sections
that cannot execute in parallel with object reading.
Probably there are many spots in the functions listed above that could
be executed unlocked (and thus, in parallel). But, for now, we are most
interested in allowing parallel access to zlib inflation. This is one of
the sections where object reading spends most of the time in (e.g. up to
one-third of git-grep's execution time in the chromium repo corresponds
to inflation) and it's already thread-safe. So, to take advantage of
that, the obj_read_mutex is released when calling git_inflate() and
re-acquired right after, for every calling spot in
oid_object_info_extended()'s call chain. We may refine this lock to also
exploit other possible parallel spots in the future, but for now,
threaded zlib inflation should already give great speedups for threaded
object reading callers.
Note that add_delta_base_cache() was also modified to skip adding
already present entries to the cache. This wasn't possible before, but
it would be now, with the parallel inflation. Take for example the
following situation, where two threads - A and B - are executing the
code at unpack_entry():
1. Thread A is performing the decompression of a base O (which is not
yet in the cache) at PHASE II. Thread B is simultaneously trying to
unpack O, but just starting at PHASE I.
2. Since O is not yet in the cache, B will go to PHASE II to also
perform the decompression.
3. When they finish decompressing, one of them will get the object
reading mutex and go to PHASE III while the other waits for the
mutex. Let’s say A got the mutex first.
4. Thread A will add O to the cache, go throughout the rest of PHASE III
and return.
5. Thread B gets the mutex, also add O to the cache (if the check wasn't
there) and returns.
Finally, it is also important to highlight that the object reading lock
can only ensure thread-safety in the mentioned functions thanks to two
complementary mechanisms: the use of 'struct raw_object_store's
replace_mutex, which guards sections in the object reading machinery
that would otherwise be thread-unsafe; and the 'struct pack_window's
inuse_cnt, which protects window reading operations (such as the one
performed during the inflation of a packed object), allowing them to
execute without the acquisition of the obj_read_mutex.
Signed-off-by: Matheus Tavares <matheus.bernardino@usp.br>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-01-16 10:39:53 +08:00
|
|
|
/*
|
|
|
|
* Check required to avoid redundant entries when more than one thread
|
|
|
|
* is unpacking the same object, in unpack_entry() (since its phases I
|
|
|
|
* and III might run concurrently across multiple threads).
|
|
|
|
*/
|
2020-09-29 08:01:53 +08:00
|
|
|
if (in_delta_base_cache(p, base_offset)) {
|
|
|
|
free(base);
|
object-store: allow threaded access to object reading
Allow object reading to be performed by multiple threads protecting it
with an internal lock, the obj_read_mutex. The lock usage can be toggled
with enable_obj_read_lock() and disable_obj_read_lock(). Currently, the
functions which can be safely called in parallel are:
read_object_file_extended(), repo_read_object_file(),
read_object_file(), read_object_with_reference(), read_object(),
oid_object_info() and oid_object_info_extended(). It's also possible
to use obj_read_lock() and obj_read_unlock() to protect other sections
that cannot execute in parallel with object reading.
Probably there are many spots in the functions listed above that could
be executed unlocked (and thus, in parallel). But, for now, we are most
interested in allowing parallel access to zlib inflation. This is one of
the sections where object reading spends most of the time in (e.g. up to
one-third of git-grep's execution time in the chromium repo corresponds
to inflation) and it's already thread-safe. So, to take advantage of
that, the obj_read_mutex is released when calling git_inflate() and
re-acquired right after, for every calling spot in
oid_object_info_extended()'s call chain. We may refine this lock to also
exploit other possible parallel spots in the future, but for now,
threaded zlib inflation should already give great speedups for threaded
object reading callers.
Note that add_delta_base_cache() was also modified to skip adding
already present entries to the cache. This wasn't possible before, but
it would be now, with the parallel inflation. Take for example the
following situation, where two threads - A and B - are executing the
code at unpack_entry():
1. Thread A is performing the decompression of a base O (which is not
yet in the cache) at PHASE II. Thread B is simultaneously trying to
unpack O, but just starting at PHASE I.
2. Since O is not yet in the cache, B will go to PHASE II to also
perform the decompression.
3. When they finish decompressing, one of them will get the object
reading mutex and go to PHASE III while the other waits for the
mutex. Let’s say A got the mutex first.
4. Thread A will add O to the cache, go throughout the rest of PHASE III
and return.
5. Thread B gets the mutex, also add O to the cache (if the check wasn't
there) and returns.
Finally, it is also important to highlight that the object reading lock
can only ensure thread-safety in the mentioned functions thanks to two
complementary mechanisms: the use of 'struct raw_object_store's
replace_mutex, which guards sections in the object reading machinery
that would otherwise be thread-unsafe; and the 'struct pack_window's
inuse_cnt, which protects window reading operations (such as the one
performed during the inflation of a packed object), allowing them to
execute without the acquisition of the obj_read_mutex.
Signed-off-by: Matheus Tavares <matheus.bernardino@usp.br>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-01-16 10:39:53 +08:00
|
|
|
return;
|
2020-09-29 08:01:53 +08:00
|
|
|
}
|
object-store: allow threaded access to object reading
Allow object reading to be performed by multiple threads protecting it
with an internal lock, the obj_read_mutex. The lock usage can be toggled
with enable_obj_read_lock() and disable_obj_read_lock(). Currently, the
functions which can be safely called in parallel are:
read_object_file_extended(), repo_read_object_file(),
read_object_file(), read_object_with_reference(), read_object(),
oid_object_info() and oid_object_info_extended(). It's also possible
to use obj_read_lock() and obj_read_unlock() to protect other sections
that cannot execute in parallel with object reading.
Probably there are many spots in the functions listed above that could
be executed unlocked (and thus, in parallel). But, for now, we are most
interested in allowing parallel access to zlib inflation. This is one of
the sections where object reading spends most of the time in (e.g. up to
one-third of git-grep's execution time in the chromium repo corresponds
to inflation) and it's already thread-safe. So, to take advantage of
that, the obj_read_mutex is released when calling git_inflate() and
re-acquired right after, for every calling spot in
oid_object_info_extended()'s call chain. We may refine this lock to also
exploit other possible parallel spots in the future, but for now,
threaded zlib inflation should already give great speedups for threaded
object reading callers.
Note that add_delta_base_cache() was also modified to skip adding
already present entries to the cache. This wasn't possible before, but
it would be now, with the parallel inflation. Take for example the
following situation, where two threads - A and B - are executing the
code at unpack_entry():
1. Thread A is performing the decompression of a base O (which is not
yet in the cache) at PHASE II. Thread B is simultaneously trying to
unpack O, but just starting at PHASE I.
2. Since O is not yet in the cache, B will go to PHASE II to also
perform the decompression.
3. When they finish decompressing, one of them will get the object
reading mutex and go to PHASE III while the other waits for the
mutex. Let’s say A got the mutex first.
4. Thread A will add O to the cache, go throughout the rest of PHASE III
and return.
5. Thread B gets the mutex, also add O to the cache (if the check wasn't
there) and returns.
Finally, it is also important to highlight that the object reading lock
can only ensure thread-safety in the mentioned functions thanks to two
complementary mechanisms: the use of 'struct raw_object_store's
replace_mutex, which guards sections in the object reading machinery
that would otherwise be thread-unsafe; and the 'struct pack_window's
inuse_cnt, which protects window reading operations (such as the one
performed during the inflation of a packed object), allowing them to
execute without the acquisition of the obj_read_mutex.
Signed-off-by: Matheus Tavares <matheus.bernardino@usp.br>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-01-16 10:39:53 +08:00
|
|
|
|
2017-08-19 06:20:30 +08:00
|
|
|
delta_base_cached += base_size;
|
|
|
|
|
|
|
|
list_for_each_safe(lru, tmp, &delta_base_cache_lru) {
|
|
|
|
struct delta_base_cache_entry *f =
|
|
|
|
list_entry(lru, struct delta_base_cache_entry, lru);
|
|
|
|
if (delta_base_cached <= delta_base_cache_limit)
|
|
|
|
break;
|
|
|
|
release_delta_base_cache(f);
|
|
|
|
}
|
|
|
|
|
2020-09-29 08:01:53 +08:00
|
|
|
ent = xmalloc(sizeof(*ent));
|
2017-08-19 06:20:30 +08:00
|
|
|
ent->key.p = p;
|
|
|
|
ent->key.base_offset = base_offset;
|
|
|
|
ent->type = type;
|
|
|
|
ent->data = base;
|
|
|
|
ent->size = base_size;
|
|
|
|
list_add_tail(&ent->lru, &delta_base_cache_lru);
|
|
|
|
|
|
|
|
if (!delta_base_cache.cmpfn)
|
|
|
|
hashmap_init(&delta_base_cache, delta_base_cache_hash_cmp, NULL, 0);
|
2019-10-07 07:30:27 +08:00
|
|
|
hashmap_entry_init(&ent->ent, pack_entry_hash(p, base_offset));
|
2019-10-07 07:30:29 +08:00
|
|
|
hashmap_add(&delta_base_cache, &ent->ent);
|
2017-08-19 06:20:30 +08:00
|
|
|
}
|
|
|
|
|
2018-04-26 02:21:06 +08:00
|
|
|
int packed_object_info(struct repository *r, struct packed_git *p,
|
|
|
|
off_t obj_offset, struct object_info *oi)
|
2017-08-19 06:20:30 +08:00
|
|
|
{
|
|
|
|
struct pack_window *w_curs = NULL;
|
|
|
|
unsigned long size;
|
|
|
|
off_t curpos = obj_offset;
|
|
|
|
enum object_type type;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We always get the representation type, but only convert it to
|
|
|
|
* a "real" type later if the caller is interested.
|
|
|
|
*/
|
|
|
|
if (oi->contentp) {
|
2018-04-26 02:21:06 +08:00
|
|
|
*oi->contentp = cache_or_unpack_entry(r, p, obj_offset, oi->sizep,
|
2017-08-19 06:20:30 +08:00
|
|
|
&type);
|
|
|
|
if (!*oi->contentp)
|
|
|
|
type = OBJ_BAD;
|
|
|
|
} else {
|
|
|
|
type = unpack_object_header(p, &w_curs, &curpos, &size);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!oi->contentp && oi->sizep) {
|
|
|
|
if (type == OBJ_OFS_DELTA || type == OBJ_REF_DELTA) {
|
|
|
|
off_t tmp_pos = curpos;
|
|
|
|
off_t base_offset = get_delta_base(p, &w_curs, &tmp_pos,
|
|
|
|
type, obj_offset);
|
|
|
|
if (!base_offset) {
|
|
|
|
type = OBJ_BAD;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
*oi->sizep = get_size_from_delta(p, &w_curs, tmp_pos);
|
|
|
|
if (*oi->sizep == 0) {
|
|
|
|
type = OBJ_BAD;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
*oi->sizep = size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (oi->disk_sizep) {
|
2021-01-14 06:24:41 +08:00
|
|
|
uint32_t pos;
|
|
|
|
if (offset_to_pack_pos(p, obj_offset, &pos) < 0) {
|
|
|
|
error("could not find object at offset %"PRIuMAX" "
|
|
|
|
"in pack %s", (uintmax_t)obj_offset, p->pack_name);
|
|
|
|
type = OBJ_BAD;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
*oi->disk_sizep = pack_pos_to_offset(p, pos + 1) - obj_offset;
|
2017-08-19 06:20:30 +08:00
|
|
|
}
|
|
|
|
|
2018-02-15 02:59:23 +08:00
|
|
|
if (oi->typep || oi->type_name) {
|
2017-08-19 06:20:30 +08:00
|
|
|
enum object_type ptot;
|
2018-04-26 02:21:06 +08:00
|
|
|
ptot = packed_to_object_type(r, p, obj_offset,
|
2018-04-26 02:21:01 +08:00
|
|
|
type, &w_curs, curpos);
|
2017-08-19 06:20:30 +08:00
|
|
|
if (oi->typep)
|
|
|
|
*oi->typep = ptot;
|
2018-02-15 02:59:23 +08:00
|
|
|
if (oi->type_name) {
|
2018-02-15 02:59:24 +08:00
|
|
|
const char *tn = type_name(ptot);
|
2017-08-19 06:20:30 +08:00
|
|
|
if (tn)
|
2018-02-15 02:59:23 +08:00
|
|
|
strbuf_addstr(oi->type_name, tn);
|
2017-08-19 06:20:30 +08:00
|
|
|
}
|
|
|
|
if (ptot < 0) {
|
|
|
|
type = OBJ_BAD;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-24 12:36:56 +08:00
|
|
|
if (oi->delta_base_oid) {
|
2017-08-19 06:20:30 +08:00
|
|
|
if (type == OBJ_OFS_DELTA || type == OBJ_REF_DELTA) {
|
2020-02-24 12:37:31 +08:00
|
|
|
if (get_delta_base_oid(p, &w_curs, curpos,
|
|
|
|
oi->delta_base_oid,
|
|
|
|
type, obj_offset) < 0) {
|
2017-08-19 06:20:30 +08:00
|
|
|
type = OBJ_BAD;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
} else
|
2024-06-14 14:49:54 +08:00
|
|
|
oidclr(oi->delta_base_oid, the_repository->hash_algo);
|
2017-08-19 06:20:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
oi->whence = in_delta_base_cache(p, obj_offset) ? OI_DBCACHED :
|
|
|
|
OI_PACKED;
|
|
|
|
|
|
|
|
out:
|
|
|
|
unuse_pack(&w_curs);
|
|
|
|
return type;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *unpack_compressed_entry(struct packed_git *p,
|
|
|
|
struct pack_window **w_curs,
|
|
|
|
off_t curpos,
|
|
|
|
unsigned long size)
|
|
|
|
{
|
|
|
|
int st;
|
|
|
|
git_zstream stream;
|
|
|
|
unsigned char *buffer, *in;
|
|
|
|
|
|
|
|
buffer = xmallocz_gently(size);
|
|
|
|
if (!buffer)
|
|
|
|
return NULL;
|
|
|
|
memset(&stream, 0, sizeof(stream));
|
|
|
|
stream.next_out = buffer;
|
|
|
|
stream.avail_out = size + 1;
|
|
|
|
|
|
|
|
git_inflate_init(&stream);
|
|
|
|
do {
|
|
|
|
in = use_pack(p, w_curs, curpos, &stream.avail_in);
|
|
|
|
stream.next_in = in;
|
object-store: allow threaded access to object reading
Allow object reading to be performed by multiple threads protecting it
with an internal lock, the obj_read_mutex. The lock usage can be toggled
with enable_obj_read_lock() and disable_obj_read_lock(). Currently, the
functions which can be safely called in parallel are:
read_object_file_extended(), repo_read_object_file(),
read_object_file(), read_object_with_reference(), read_object(),
oid_object_info() and oid_object_info_extended(). It's also possible
to use obj_read_lock() and obj_read_unlock() to protect other sections
that cannot execute in parallel with object reading.
Probably there are many spots in the functions listed above that could
be executed unlocked (and thus, in parallel). But, for now, we are most
interested in allowing parallel access to zlib inflation. This is one of
the sections where object reading spends most of the time in (e.g. up to
one-third of git-grep's execution time in the chromium repo corresponds
to inflation) and it's already thread-safe. So, to take advantage of
that, the obj_read_mutex is released when calling git_inflate() and
re-acquired right after, for every calling spot in
oid_object_info_extended()'s call chain. We may refine this lock to also
exploit other possible parallel spots in the future, but for now,
threaded zlib inflation should already give great speedups for threaded
object reading callers.
Note that add_delta_base_cache() was also modified to skip adding
already present entries to the cache. This wasn't possible before, but
it would be now, with the parallel inflation. Take for example the
following situation, where two threads - A and B - are executing the
code at unpack_entry():
1. Thread A is performing the decompression of a base O (which is not
yet in the cache) at PHASE II. Thread B is simultaneously trying to
unpack O, but just starting at PHASE I.
2. Since O is not yet in the cache, B will go to PHASE II to also
perform the decompression.
3. When they finish decompressing, one of them will get the object
reading mutex and go to PHASE III while the other waits for the
mutex. Let’s say A got the mutex first.
4. Thread A will add O to the cache, go throughout the rest of PHASE III
and return.
5. Thread B gets the mutex, also add O to the cache (if the check wasn't
there) and returns.
Finally, it is also important to highlight that the object reading lock
can only ensure thread-safety in the mentioned functions thanks to two
complementary mechanisms: the use of 'struct raw_object_store's
replace_mutex, which guards sections in the object reading machinery
that would otherwise be thread-unsafe; and the 'struct pack_window's
inuse_cnt, which protects window reading operations (such as the one
performed during the inflation of a packed object), allowing them to
execute without the acquisition of the obj_read_mutex.
Signed-off-by: Matheus Tavares <matheus.bernardino@usp.br>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-01-16 10:39:53 +08:00
|
|
|
/*
|
|
|
|
* Note: we must ensure the window section returned by
|
|
|
|
* use_pack() will be available throughout git_inflate()'s
|
|
|
|
* unlocked execution. Please refer to the comment at
|
|
|
|
* get_size_from_delta() to see how this is done.
|
|
|
|
*/
|
|
|
|
obj_read_unlock();
|
2017-08-19 06:20:30 +08:00
|
|
|
st = git_inflate(&stream, Z_FINISH);
|
object-store: allow threaded access to object reading
Allow object reading to be performed by multiple threads protecting it
with an internal lock, the obj_read_mutex. The lock usage can be toggled
with enable_obj_read_lock() and disable_obj_read_lock(). Currently, the
functions which can be safely called in parallel are:
read_object_file_extended(), repo_read_object_file(),
read_object_file(), read_object_with_reference(), read_object(),
oid_object_info() and oid_object_info_extended(). It's also possible
to use obj_read_lock() and obj_read_unlock() to protect other sections
that cannot execute in parallel with object reading.
Probably there are many spots in the functions listed above that could
be executed unlocked (and thus, in parallel). But, for now, we are most
interested in allowing parallel access to zlib inflation. This is one of
the sections where object reading spends most of the time in (e.g. up to
one-third of git-grep's execution time in the chromium repo corresponds
to inflation) and it's already thread-safe. So, to take advantage of
that, the obj_read_mutex is released when calling git_inflate() and
re-acquired right after, for every calling spot in
oid_object_info_extended()'s call chain. We may refine this lock to also
exploit other possible parallel spots in the future, but for now,
threaded zlib inflation should already give great speedups for threaded
object reading callers.
Note that add_delta_base_cache() was also modified to skip adding
already present entries to the cache. This wasn't possible before, but
it would be now, with the parallel inflation. Take for example the
following situation, where two threads - A and B - are executing the
code at unpack_entry():
1. Thread A is performing the decompression of a base O (which is not
yet in the cache) at PHASE II. Thread B is simultaneously trying to
unpack O, but just starting at PHASE I.
2. Since O is not yet in the cache, B will go to PHASE II to also
perform the decompression.
3. When they finish decompressing, one of them will get the object
reading mutex and go to PHASE III while the other waits for the
mutex. Let’s say A got the mutex first.
4. Thread A will add O to the cache, go throughout the rest of PHASE III
and return.
5. Thread B gets the mutex, also add O to the cache (if the check wasn't
there) and returns.
Finally, it is also important to highlight that the object reading lock
can only ensure thread-safety in the mentioned functions thanks to two
complementary mechanisms: the use of 'struct raw_object_store's
replace_mutex, which guards sections in the object reading machinery
that would otherwise be thread-unsafe; and the 'struct pack_window's
inuse_cnt, which protects window reading operations (such as the one
performed during the inflation of a packed object), allowing them to
execute without the acquisition of the obj_read_mutex.
Signed-off-by: Matheus Tavares <matheus.bernardino@usp.br>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-01-16 10:39:53 +08:00
|
|
|
obj_read_lock();
|
2017-08-19 06:20:30 +08:00
|
|
|
if (!stream.avail_out)
|
|
|
|
break; /* the payload is larger than it should be */
|
|
|
|
curpos += stream.next_in - in;
|
|
|
|
} while (st == Z_OK || st == Z_BUF_ERROR);
|
|
|
|
git_inflate_end(&stream);
|
|
|
|
if ((st != Z_STREAM_END) || stream.total_out != size) {
|
|
|
|
free(buffer);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-06-13 22:22:07 +08:00
|
|
|
/* versions of zlib can clobber unconsumed portion of outbuf */
|
|
|
|
buffer[size] = '\0';
|
|
|
|
|
2017-08-19 06:20:30 +08:00
|
|
|
return buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void write_pack_access_log(struct packed_git *p, off_t obj_offset)
|
|
|
|
{
|
|
|
|
static struct trace_key pack_access = TRACE_KEY_INIT(PACK_ACCESS);
|
|
|
|
trace_printf_key(&pack_access, "%s %"PRIuMAX"\n",
|
|
|
|
p->pack_name, (uintmax_t)obj_offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
int do_check_packed_object_crc;
|
|
|
|
|
|
|
|
#define UNPACK_ENTRY_STACK_PREALLOC 64
|
|
|
|
struct unpack_entry_stack_ent {
|
|
|
|
off_t obj_offset;
|
|
|
|
off_t curpos;
|
|
|
|
unsigned long size;
|
|
|
|
};
|
|
|
|
|
2018-04-26 02:21:06 +08:00
|
|
|
void *unpack_entry(struct repository *r, struct packed_git *p, off_t obj_offset,
|
2017-08-19 06:20:30 +08:00
|
|
|
enum object_type *final_type, unsigned long *final_size)
|
|
|
|
{
|
|
|
|
struct pack_window *w_curs = NULL;
|
|
|
|
off_t curpos = obj_offset;
|
|
|
|
void *data = NULL;
|
|
|
|
unsigned long size;
|
|
|
|
enum object_type type;
|
|
|
|
struct unpack_entry_stack_ent small_delta_stack[UNPACK_ENTRY_STACK_PREALLOC];
|
|
|
|
struct unpack_entry_stack_ent *delta_stack = small_delta_stack;
|
|
|
|
int delta_stack_nr = 0, delta_stack_alloc = UNPACK_ENTRY_STACK_PREALLOC;
|
|
|
|
int base_from_cache = 0;
|
|
|
|
|
|
|
|
write_pack_access_log(p, obj_offset);
|
|
|
|
|
|
|
|
/* PHASE 1: drill down to the innermost base object */
|
|
|
|
for (;;) {
|
|
|
|
off_t base_offset;
|
|
|
|
int i;
|
|
|
|
struct delta_base_cache_entry *ent;
|
|
|
|
|
|
|
|
ent = get_delta_base_cache_entry(p, curpos);
|
|
|
|
if (ent) {
|
|
|
|
type = ent->type;
|
|
|
|
data = ent->data;
|
|
|
|
size = ent->size;
|
|
|
|
detach_delta_base_cache_entry(ent);
|
|
|
|
base_from_cache = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (do_check_packed_object_crc && p->index_version > 1) {
|
2021-01-14 06:24:45 +08:00
|
|
|
uint32_t pack_pos, index_pos;
|
|
|
|
off_t len;
|
|
|
|
|
|
|
|
if (offset_to_pack_pos(p, obj_offset, &pack_pos) < 0) {
|
|
|
|
error("could not find object at offset %"PRIuMAX" in pack %s",
|
|
|
|
(uintmax_t)obj_offset, p->pack_name);
|
|
|
|
data = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
len = pack_pos_to_offset(p, pack_pos + 1) - obj_offset;
|
|
|
|
index_pos = pack_pos_to_index(p, pack_pos);
|
|
|
|
if (check_pack_crc(p, &w_curs, obj_offset, len, index_pos)) {
|
2018-03-12 10:27:44 +08:00
|
|
|
struct object_id oid;
|
2021-01-14 06:24:45 +08:00
|
|
|
nth_packed_object_id(&oid, p, index_pos);
|
2017-08-19 06:20:30 +08:00
|
|
|
error("bad packed object CRC for %s",
|
2018-03-12 10:27:44 +08:00
|
|
|
oid_to_hex(&oid));
|
2021-09-12 04:40:33 +08:00
|
|
|
mark_bad_packed_object(p, &oid);
|
2017-08-19 06:20:30 +08:00
|
|
|
data = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type = unpack_object_header(p, &w_curs, &curpos, &size);
|
|
|
|
if (type != OBJ_OFS_DELTA && type != OBJ_REF_DELTA)
|
|
|
|
break;
|
|
|
|
|
|
|
|
base_offset = get_delta_base(p, &w_curs, &curpos, type, obj_offset);
|
|
|
|
if (!base_offset) {
|
|
|
|
error("failed to validate delta base reference "
|
|
|
|
"at offset %"PRIuMAX" from %s",
|
|
|
|
(uintmax_t)curpos, p->pack_name);
|
|
|
|
/* bail to phase 2, in hopes of recovery */
|
|
|
|
data = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* push object, proceed to base */
|
|
|
|
if (delta_stack_nr >= delta_stack_alloc
|
|
|
|
&& delta_stack == small_delta_stack) {
|
|
|
|
delta_stack_alloc = alloc_nr(delta_stack_nr);
|
|
|
|
ALLOC_ARRAY(delta_stack, delta_stack_alloc);
|
2019-06-16 02:36:35 +08:00
|
|
|
COPY_ARRAY(delta_stack, small_delta_stack,
|
|
|
|
delta_stack_nr);
|
2017-08-19 06:20:30 +08:00
|
|
|
} else {
|
|
|
|
ALLOC_GROW(delta_stack, delta_stack_nr+1, delta_stack_alloc);
|
|
|
|
}
|
|
|
|
i = delta_stack_nr++;
|
|
|
|
delta_stack[i].obj_offset = obj_offset;
|
|
|
|
delta_stack[i].curpos = curpos;
|
|
|
|
delta_stack[i].size = size;
|
|
|
|
|
|
|
|
curpos = obj_offset = base_offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* PHASE 2: handle the base */
|
|
|
|
switch (type) {
|
|
|
|
case OBJ_OFS_DELTA:
|
|
|
|
case OBJ_REF_DELTA:
|
|
|
|
if (data)
|
2018-05-02 17:38:39 +08:00
|
|
|
BUG("unpack_entry: left loop at a valid delta");
|
2017-08-19 06:20:30 +08:00
|
|
|
break;
|
|
|
|
case OBJ_COMMIT:
|
|
|
|
case OBJ_TREE:
|
|
|
|
case OBJ_BLOB:
|
|
|
|
case OBJ_TAG:
|
|
|
|
if (!base_from_cache)
|
|
|
|
data = unpack_compressed_entry(p, &w_curs, curpos, size);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
data = NULL;
|
|
|
|
error("unknown object type %i at offset %"PRIuMAX" in %s",
|
|
|
|
type, (uintmax_t)obj_offset, p->pack_name);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* PHASE 3: apply deltas in order */
|
|
|
|
|
|
|
|
/* invariants:
|
|
|
|
* 'data' holds the base data, or NULL if there was corruption
|
|
|
|
*/
|
|
|
|
while (delta_stack_nr) {
|
|
|
|
void *delta_data;
|
|
|
|
void *base = data;
|
|
|
|
void *external_base = NULL;
|
|
|
|
unsigned long delta_size, base_size = size;
|
|
|
|
int i;
|
2020-09-29 08:01:52 +08:00
|
|
|
off_t base_obj_offset = obj_offset;
|
2017-08-19 06:20:30 +08:00
|
|
|
|
|
|
|
data = NULL;
|
|
|
|
|
|
|
|
if (!base) {
|
|
|
|
/*
|
|
|
|
* We're probably in deep shit, but let's try to fetch
|
|
|
|
* the required base anyway from another pack or loose.
|
|
|
|
* This is costly but should happen only in the presence
|
|
|
|
* of a corrupted pack, and is better than failing outright.
|
|
|
|
*/
|
2021-01-14 06:24:45 +08:00
|
|
|
uint32_t pos;
|
2018-03-12 10:27:44 +08:00
|
|
|
struct object_id base_oid;
|
2021-01-14 06:24:45 +08:00
|
|
|
if (!(offset_to_pack_pos(p, obj_offset, &pos))) {
|
2023-01-07 21:50:53 +08:00
|
|
|
struct object_info oi = OBJECT_INFO_INIT;
|
|
|
|
|
2021-01-14 06:24:45 +08:00
|
|
|
nth_packed_object_id(&base_oid, p,
|
|
|
|
pack_pos_to_index(p, pos));
|
2017-08-19 06:20:30 +08:00
|
|
|
error("failed to read delta base object %s"
|
|
|
|
" at offset %"PRIuMAX" from %s",
|
2018-03-12 10:27:44 +08:00
|
|
|
oid_to_hex(&base_oid), (uintmax_t)obj_offset,
|
2017-08-19 06:20:30 +08:00
|
|
|
p->pack_name);
|
2021-09-12 04:40:33 +08:00
|
|
|
mark_bad_packed_object(p, &base_oid);
|
2023-01-07 21:50:53 +08:00
|
|
|
|
|
|
|
oi.typep = &type;
|
|
|
|
oi.sizep = &base_size;
|
|
|
|
oi.contentp = &base;
|
|
|
|
if (oid_object_info_extended(r, &base_oid, &oi, 0) < 0)
|
|
|
|
base = NULL;
|
|
|
|
|
2017-08-19 06:20:30 +08:00
|
|
|
external_base = base;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
i = --delta_stack_nr;
|
|
|
|
obj_offset = delta_stack[i].obj_offset;
|
|
|
|
curpos = delta_stack[i].curpos;
|
|
|
|
delta_size = delta_stack[i].size;
|
|
|
|
|
|
|
|
if (!base)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
delta_data = unpack_compressed_entry(p, &w_curs, curpos, delta_size);
|
|
|
|
|
|
|
|
if (!delta_data) {
|
|
|
|
error("failed to unpack compressed delta "
|
|
|
|
"at offset %"PRIuMAX" from %s",
|
|
|
|
(uintmax_t)curpos, p->pack_name);
|
|
|
|
data = NULL;
|
2020-09-29 08:01:52 +08:00
|
|
|
} else {
|
|
|
|
data = patch_delta(base, base_size, delta_data,
|
|
|
|
delta_size, &size);
|
2017-08-19 06:20:30 +08:00
|
|
|
|
2020-09-29 08:01:52 +08:00
|
|
|
/*
|
|
|
|
* We could not apply the delta; warn the user, but
|
|
|
|
* keep going. Our failure will be noticed either in
|
|
|
|
* the next iteration of the loop, or if this is the
|
|
|
|
* final delta, in the caller when we return NULL.
|
|
|
|
* Those code paths will take care of making a more
|
|
|
|
* explicit warning and retrying with another copy of
|
|
|
|
* the object.
|
|
|
|
*/
|
|
|
|
if (!data)
|
|
|
|
error("failed to apply delta");
|
|
|
|
}
|
2017-08-19 06:20:30 +08:00
|
|
|
|
|
|
|
/*
|
2020-09-29 08:01:52 +08:00
|
|
|
* We delay adding `base` to the cache until the end of the loop
|
|
|
|
* because unpack_compressed_entry() momentarily releases the
|
|
|
|
* obj_read_mutex, giving another thread the chance to access
|
|
|
|
* the cache. Therefore, if `base` was already there, this other
|
|
|
|
* thread could free() it (e.g. to make space for another entry)
|
|
|
|
* before we are done using it.
|
2017-08-19 06:20:30 +08:00
|
|
|
*/
|
2020-09-29 08:01:52 +08:00
|
|
|
if (!external_base)
|
|
|
|
add_delta_base_cache(p, base_obj_offset, base, base_size, type);
|
2017-08-19 06:20:30 +08:00
|
|
|
|
|
|
|
free(delta_data);
|
|
|
|
free(external_base);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (final_type)
|
|
|
|
*final_type = type;
|
|
|
|
if (final_size)
|
|
|
|
*final_size = size;
|
|
|
|
|
|
|
|
out:
|
|
|
|
unuse_pack(&w_curs);
|
|
|
|
|
|
|
|
if (delta_stack != small_delta_stack)
|
|
|
|
free(delta_stack);
|
|
|
|
|
|
|
|
return data;
|
|
|
|
}
|
2017-08-19 06:20:31 +08:00
|
|
|
|
2018-03-23 01:40:09 +08:00
|
|
|
int bsearch_pack(const struct object_id *oid, const struct packed_git *p, uint32_t *result)
|
|
|
|
{
|
|
|
|
const unsigned char *index_fanout = p->index_data;
|
|
|
|
const unsigned char *index_lookup;
|
2018-05-02 08:25:36 +08:00
|
|
|
const unsigned int hashsz = the_hash_algo->rawsz;
|
2018-03-23 01:40:09 +08:00
|
|
|
int index_lookup_width;
|
|
|
|
|
|
|
|
if (!index_fanout)
|
|
|
|
BUG("bsearch_pack called without a valid pack-index");
|
|
|
|
|
|
|
|
index_lookup = index_fanout + 4 * 256;
|
|
|
|
if (p->index_version == 1) {
|
2018-05-02 08:25:36 +08:00
|
|
|
index_lookup_width = hashsz + 4;
|
2018-03-23 01:40:09 +08:00
|
|
|
index_lookup += 4;
|
|
|
|
} else {
|
2018-05-02 08:25:36 +08:00
|
|
|
index_lookup_width = hashsz;
|
2018-03-23 01:40:09 +08:00
|
|
|
index_fanout += 8;
|
|
|
|
index_lookup += 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
return bsearch_hash(oid->hash, (const uint32_t*)index_fanout,
|
|
|
|
index_lookup, index_lookup_width, result);
|
|
|
|
}
|
|
|
|
|
2020-02-24 12:37:54 +08:00
|
|
|
int nth_packed_object_id(struct object_id *oid,
|
|
|
|
struct packed_git *p,
|
|
|
|
uint32_t n)
|
2017-08-19 06:20:31 +08:00
|
|
|
{
|
|
|
|
const unsigned char *index = p->index_data;
|
2018-05-02 08:25:36 +08:00
|
|
|
const unsigned int hashsz = the_hash_algo->rawsz;
|
2017-08-19 06:20:31 +08:00
|
|
|
if (!index) {
|
|
|
|
if (open_pack_index(p))
|
2020-02-24 12:37:54 +08:00
|
|
|
return -1;
|
2017-08-19 06:20:31 +08:00
|
|
|
index = p->index_data;
|
|
|
|
}
|
|
|
|
if (n >= p->num_objects)
|
2020-02-24 12:37:54 +08:00
|
|
|
return -1;
|
2017-08-19 06:20:31 +08:00
|
|
|
index += 4 * 256;
|
|
|
|
if (p->index_version == 1) {
|
2024-06-14 14:49:54 +08:00
|
|
|
oidread(oid, index + st_add(st_mult(hashsz + 4, n), 4),
|
|
|
|
the_repository->hash_algo);
|
2017-08-19 06:20:31 +08:00
|
|
|
} else {
|
|
|
|
index += 8;
|
2024-06-14 14:49:54 +08:00
|
|
|
oidread(oid, index + st_mult(hashsz, n),
|
|
|
|
the_repository->hash_algo);
|
2017-08-19 06:20:31 +08:00
|
|
|
}
|
2020-02-24 12:27:36 +08:00
|
|
|
return 0;
|
2017-08-19 06:20:31 +08:00
|
|
|
}
|
2017-08-19 06:20:32 +08:00
|
|
|
|
|
|
|
void check_pack_index_ptr(const struct packed_git *p, const void *vptr)
|
|
|
|
{
|
|
|
|
const unsigned char *ptr = vptr;
|
|
|
|
const unsigned char *start = p->index_data;
|
|
|
|
const unsigned char *end = start + p->index_size;
|
|
|
|
if (ptr < start)
|
|
|
|
die(_("offset before start of pack index for %s (corrupt index?)"),
|
|
|
|
p->pack_name);
|
|
|
|
/* No need to check for underflow; .idx files must be at least 8 bytes */
|
|
|
|
if (ptr >= end - 8)
|
|
|
|
die(_("offset beyond end of pack index for %s (truncated index?)"),
|
|
|
|
p->pack_name);
|
|
|
|
}
|
|
|
|
|
|
|
|
off_t nth_packed_object_offset(const struct packed_git *p, uint32_t n)
|
|
|
|
{
|
|
|
|
const unsigned char *index = p->index_data;
|
2018-05-02 08:25:36 +08:00
|
|
|
const unsigned int hashsz = the_hash_algo->rawsz;
|
2017-08-19 06:20:32 +08:00
|
|
|
index += 4 * 256;
|
|
|
|
if (p->index_version == 1) {
|
2023-07-13 07:37:32 +08:00
|
|
|
return ntohl(*((uint32_t *)(index + st_mult(hashsz + 4, n))));
|
2017-08-19 06:20:32 +08:00
|
|
|
} else {
|
|
|
|
uint32_t off;
|
2023-07-13 07:37:32 +08:00
|
|
|
index += st_add(8, st_mult(p->num_objects, hashsz + 4));
|
|
|
|
off = ntohl(*((uint32_t *)(index + st_mult(4, n))));
|
2017-08-19 06:20:32 +08:00
|
|
|
if (!(off & 0x80000000))
|
|
|
|
return off;
|
2023-07-13 07:37:32 +08:00
|
|
|
index += st_add(st_mult(p->num_objects, 4),
|
|
|
|
st_mult(off & 0x7fffffff, 8));
|
2017-08-19 06:20:32 +08:00
|
|
|
check_pack_index_ptr(p, index);
|
2018-01-18 03:08:23 +08:00
|
|
|
return get_be64(index);
|
2017-08-19 06:20:32 +08:00
|
|
|
}
|
|
|
|
}
|
2017-08-19 06:20:33 +08:00
|
|
|
|
2024-10-25 15:06:06 +08:00
|
|
|
off_t find_pack_entry_one(const struct object_id *oid,
|
|
|
|
struct packed_git *p)
|
2017-08-19 06:20:33 +08:00
|
|
|
{
|
|
|
|
const unsigned char *index = p->index_data;
|
2018-02-14 02:39:39 +08:00
|
|
|
uint32_t result;
|
2017-08-19 06:20:33 +08:00
|
|
|
|
|
|
|
if (!index) {
|
|
|
|
if (open_pack_index(p))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-10-25 15:06:06 +08:00
|
|
|
if (bsearch_pack(oid, p, &result))
|
2018-02-14 02:39:39 +08:00
|
|
|
return nth_packed_object_offset(p, result);
|
2017-08-19 06:20:33 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int is_pack_valid(struct packed_git *p)
|
|
|
|
{
|
|
|
|
/* An already open pack is known to be valid. */
|
|
|
|
if (p->pack_fd != -1)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* If the pack has one window completely covering the
|
|
|
|
* file size, the pack is known to be valid even if
|
|
|
|
* the descriptor is not currently open.
|
|
|
|
*/
|
|
|
|
if (p->windows) {
|
|
|
|
struct pack_window *w = p->windows;
|
|
|
|
|
|
|
|
if (!w->offset && w->len == p->pack_size)
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Force the pack to open to prove its valid. */
|
|
|
|
return !open_packed_git(p);
|
|
|
|
}
|
2017-08-19 06:20:34 +08:00
|
|
|
|
packfile: convert find_sha1_pack() to use object_id
The find_sha1_pack() function has a few problems:
- it's badly named, since it works with any object hash
- it takes the hash as a bare pointer rather than an object_id struct
We can fix both of these easily, as all callers actually have a real
object_id anyway.
I also found the existence of this function somewhat confusing, as it is
about looking in an arbitrary set of linked packed_git structs. It's
good for things like dumb-http which are looking in downloaded remote
packs, and not our local packs. But despite the name, it is not a good
way to find the pack which contains a local object (it skips the use of
the midx, the pack mru list, and so on).
So let's also add an explanatory comment above the declaration that may
point people in the right direction.
I suspect the calls in fast-import.c, which use the packed_git list from
the repository struct, could actually just be using find_pack_entry().
But since we'd need to keep it anyway for dumb-http, I didn't dig
further there. If we eventually drop dumb-http support, then it might be
worth examining them to see if we can get rid of the function entirely.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Taylor Blau <me@ttaylorr.com>
2024-10-25 15:05:03 +08:00
|
|
|
struct packed_git *find_oid_pack(const struct object_id *oid,
|
|
|
|
struct packed_git *packs)
|
2017-08-19 06:20:34 +08:00
|
|
|
{
|
|
|
|
struct packed_git *p;
|
|
|
|
|
|
|
|
for (p = packs; p; p = p->next) {
|
2024-10-25 15:06:06 +08:00
|
|
|
if (find_pack_entry_one(oid, p))
|
2017-08-19 06:20:34 +08:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
}
|
2017-08-19 06:20:35 +08:00
|
|
|
|
2018-05-02 08:25:35 +08:00
|
|
|
static int fill_pack_entry(const struct object_id *oid,
|
2017-08-19 06:20:35 +08:00
|
|
|
struct pack_entry *e,
|
|
|
|
struct packed_git *p)
|
|
|
|
{
|
|
|
|
off_t offset;
|
|
|
|
|
2021-09-12 04:43:26 +08:00
|
|
|
if (oidset_size(&p->bad_objects) &&
|
|
|
|
oidset_contains(&p->bad_objects, oid))
|
|
|
|
return 0;
|
2017-08-19 06:20:35 +08:00
|
|
|
|
2024-10-25 15:06:06 +08:00
|
|
|
offset = find_pack_entry_one(oid, p);
|
2017-08-19 06:20:35 +08:00
|
|
|
if (!offset)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We are about to tell the caller where they can locate the
|
|
|
|
* requested object. We better make sure the packfile is
|
|
|
|
* still here and can be accessed before supplying that
|
|
|
|
* answer, as it may have been deleted since the index was
|
|
|
|
* loaded!
|
|
|
|
*/
|
|
|
|
if (!is_pack_valid(p))
|
|
|
|
return 0;
|
|
|
|
e->offset = offset;
|
|
|
|
e->p = p;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2018-05-02 08:25:35 +08:00
|
|
|
int find_pack_entry(struct repository *r, const struct object_id *oid, struct pack_entry *e)
|
2017-08-19 06:20:35 +08:00
|
|
|
{
|
2017-10-01 01:51:01 +08:00
|
|
|
struct list_head *pos;
|
2018-07-13 03:39:34 +08:00
|
|
|
struct multi_pack_index *m;
|
2017-08-19 06:20:35 +08:00
|
|
|
|
2018-03-24 01:45:26 +08:00
|
|
|
prepare_packed_git(r);
|
2018-07-13 03:39:34 +08:00
|
|
|
if (!r->objects->packed_git && !r->objects->multi_pack_index)
|
2017-08-19 06:20:35 +08:00
|
|
|
return 0;
|
|
|
|
|
2018-07-13 03:39:34 +08:00
|
|
|
for (m = r->objects->multi_pack_index; m; m = m->next) {
|
2019-04-30 00:18:55 +08:00
|
|
|
if (fill_midx_entry(r, oid, e, m))
|
2018-07-13 03:39:34 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2018-03-24 01:45:26 +08:00
|
|
|
list_for_each(pos, &r->objects->packed_git_mru) {
|
2018-01-24 07:46:51 +08:00
|
|
|
struct packed_git *p = list_entry(pos, struct packed_git, mru);
|
midx: add packs to packed_git linked list
The multi-pack-index allows searching for objects across multiple
packs using one object list. The original design gains many of
these performance benefits by keeping the packs in the
multi-pack-index out of the packed_git list.
Unfortunately, this has one major drawback. If the multi-pack-index
covers thousands of packs, and a command loads many of those packs,
then we can hit the limit for open file descriptors. The
close_one_pack() method is used to limit this resource, but it
only looks at the packed_git list, and uses an LRU cache to prevent
thrashing.
Instead of complicating this close_one_pack() logic to include
direct references to the multi-pack-index, simply add the packs
opened by the multi-pack-index to the packed_git list. This
immediately solves the file-descriptor limit problem, but requires
some extra steps to avoid performance issues or other problems:
1. Create a multi_pack_index bit in the packed_git struct that is
one if and only if the pack was loaded from a multi-pack-index.
2. Skip packs with the multi_pack_index bit when doing object
lookups and abbreviations. These algorithms already check the
multi-pack-index before the packed_git struct. This has a very
small performance hit, as we need to walk more packed_git
structs. This is acceptable, since these operations run binary
search on the other packs, so this walk-and-ignore logic is
very fast by comparison.
3. When closing a multi-pack-index file, do not close its packs,
as those packs will be closed using close_all_packs(). In some
cases, such as 'git repack', we run 'close_midx()' without also
closing the packs, so we need to un-set the multi_pack_index bit
in those packs. This is necessary, and caught by running
t6501-freshen-objects.sh with GIT_TEST_MULTI_PACK_INDEX=1.
To manually test this change, I inserted trace2 logging into
close_pack_fd() and set pack_max_fds to 10, then ran 'git rev-list
--all --objects' on a copy of the Git repo with 300+ pack-files and
a multi-pack-index. The logs verified the packs are closed as
we read them beyond the file descriptor limit.
Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-04-30 00:18:56 +08:00
|
|
|
if (!p->multi_pack_index && fill_pack_entry(oid, e, p)) {
|
2018-03-24 01:45:26 +08:00
|
|
|
list_move(&p->mru, &r->objects->packed_git_mru);
|
2017-08-19 06:20:35 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2017-08-19 06:20:36 +08:00
|
|
|
|
packfile: add kept-pack cache for find_kept_pack_entry()
In a recent patch we added a function 'find_kept_pack_entry()' to look
for an object only among kept packs.
While this function avoids doing any lookup work in non-kept packs, it
is still linear in the number of packs, since we have to traverse the
linked list of packs once per object. Let's cache a reduced version of
that list to save us time.
Note that this cache will last the lifetime of the program. We could
invalidate it on reprepare_packed_git(), but there's not much point in
being rigorous here:
- we might already fail to notice new .keep packs showing up after the
program starts. We only reprepare_packed_git() when we fail to find
an object. But adding a new pack won't cause that to happen.
Somebody repacking could add a new pack and delete an old one, but
most of the time we'd have a descriptor or mmap open to the old
pack anyway, so we might not even notice.
- in pack-objects we already cache the .keep state at startup, since
56dfeb6263 (pack-objects: compute local/ignore_pack_keep early,
2016-07-29). So this is just extending that concept further.
- we don't have to worry about any packed_git being removed; we always
keep the old structs around, even after reprepare_packed_git()
We do defensively invalidate the cache in case the set of kept packs
being asked for changes (e.g., only in-core kept packs were cached, but
suddenly the caller also wants on-disk kept packs, too). In theory we
could build all three caches and switch between them, but it's not
necessary, since this patch (and series) never changes the set of kept
packs that it wants to inspect from the cache.
So that "optimization" is more about being defensive in the face of
future changes than it is about asking for multiple kinds of kept packs
in this patch.
Here are p5303 results (as always, measured against the kernel):
Test HEAD^ HEAD
-----------------------------------------------------------------------------------------------
5303.5: repack (1) 57.34(54.66+10.88) 56.98(54.36+10.98) -0.6%
5303.6: repack with kept (1) 57.38(54.83+10.49) 57.17(54.97+10.26) -0.4%
5303.11: repack (50) 71.70(88.99+4.74) 71.62(88.48+5.08) -0.1%
5303.12: repack with kept (50) 72.58(89.61+4.78) 71.56(88.80+4.59) -1.4%
5303.17: repack (1000) 217.19(491.72+14.25) 217.31(490.82+14.53) +0.1%
5303.18: repack with kept (1000) 246.12(520.07+14.93) 217.08(490.37+15.10) -11.8%
and the --stdin-packs case, which scales a little bit better (although
not by that much even at 1,000 packs):
5303.7: repack with --stdin-packs (1) 0.00(0.00+0.00) 0.00(0.00+0.00) =
5303.13: repack with --stdin-packs (50) 3.43(11.75+0.24) 3.43(11.69+0.30) +0.0%
5303.19: repack with --stdin-packs (1000) 130.50(307.15+7.66) 125.13(301.36+8.04) -4.1%
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Taylor Blau <me@ttaylorr.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-02-23 10:25:23 +08:00
|
|
|
static void maybe_invalidate_kept_pack_cache(struct repository *r,
|
|
|
|
unsigned flags)
|
|
|
|
{
|
|
|
|
if (!r->objects->kept_pack_cache.packs)
|
|
|
|
return;
|
|
|
|
if (r->objects->kept_pack_cache.flags == flags)
|
|
|
|
return;
|
|
|
|
FREE_AND_NULL(r->objects->kept_pack_cache.packs);
|
|
|
|
r->objects->kept_pack_cache.flags = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct packed_git **kept_pack_cache(struct repository *r, unsigned flags)
|
packfile: introduce 'find_kept_pack_entry()'
Future callers will want a function to fill a 'struct pack_entry' for a
given object id but _only_ from its position in any kept pack(s).
In particular, an new 'git repack' mode which ensures the resulting
packs form a geometric progress by object count will mark packs that it
does not want to repack as "kept in-core", and it will want to halt a
reachability traversal as soon as it visits an object in any of the kept
packs. But, it does not want to halt the traversal at non-kept, or
.keep packs.
The obvious alternative is 'find_pack_entry()', but this doesn't quite
suffice since it only returns the first pack it finds, which may or may
not be kept (and the mru cache makes it unpredictable which one you'll
get if there are options).
Short of that, you could walk over all packs looking for the object in
each one, but it scales with the number of packs, which may be
prohibitive.
Introduce 'find_kept_pack_entry()', a function which is like
'find_pack_entry()', but only fills in objects in the kept packs.
Handle packs which have .keep files, as well as in-core kept packs
separately, since certain callers will want to distinguish one from the
other. (Though on-disk and in-core kept packs share the adjective
"kept", it is best to think of the two sets as independent.)
There is a gotcha when looking up objects that are duplicated in kept
and non-kept packs, particularly when the MIDX stores the non-kept
version and the caller asked for kept objects only. This could be
resolved by teaching the MIDX to resolve duplicates by always favoring
the kept pack (if one exists), but this breaks an assumption in existing
MIDXs, and so it would require a format change.
The benefit to changing the MIDX in this way is marginal, so we instead
have a more thorough check here which is explained with a comment.
Callers will be added in subsequent patches.
Co-authored-by: Jeff King <peff@peff.net>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Taylor Blau <me@ttaylorr.com>
Reviewed-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-02-23 10:25:03 +08:00
|
|
|
{
|
packfile: add kept-pack cache for find_kept_pack_entry()
In a recent patch we added a function 'find_kept_pack_entry()' to look
for an object only among kept packs.
While this function avoids doing any lookup work in non-kept packs, it
is still linear in the number of packs, since we have to traverse the
linked list of packs once per object. Let's cache a reduced version of
that list to save us time.
Note that this cache will last the lifetime of the program. We could
invalidate it on reprepare_packed_git(), but there's not much point in
being rigorous here:
- we might already fail to notice new .keep packs showing up after the
program starts. We only reprepare_packed_git() when we fail to find
an object. But adding a new pack won't cause that to happen.
Somebody repacking could add a new pack and delete an old one, but
most of the time we'd have a descriptor or mmap open to the old
pack anyway, so we might not even notice.
- in pack-objects we already cache the .keep state at startup, since
56dfeb6263 (pack-objects: compute local/ignore_pack_keep early,
2016-07-29). So this is just extending that concept further.
- we don't have to worry about any packed_git being removed; we always
keep the old structs around, even after reprepare_packed_git()
We do defensively invalidate the cache in case the set of kept packs
being asked for changes (e.g., only in-core kept packs were cached, but
suddenly the caller also wants on-disk kept packs, too). In theory we
could build all three caches and switch between them, but it's not
necessary, since this patch (and series) never changes the set of kept
packs that it wants to inspect from the cache.
So that "optimization" is more about being defensive in the face of
future changes than it is about asking for multiple kinds of kept packs
in this patch.
Here are p5303 results (as always, measured against the kernel):
Test HEAD^ HEAD
-----------------------------------------------------------------------------------------------
5303.5: repack (1) 57.34(54.66+10.88) 56.98(54.36+10.98) -0.6%
5303.6: repack with kept (1) 57.38(54.83+10.49) 57.17(54.97+10.26) -0.4%
5303.11: repack (50) 71.70(88.99+4.74) 71.62(88.48+5.08) -0.1%
5303.12: repack with kept (50) 72.58(89.61+4.78) 71.56(88.80+4.59) -1.4%
5303.17: repack (1000) 217.19(491.72+14.25) 217.31(490.82+14.53) +0.1%
5303.18: repack with kept (1000) 246.12(520.07+14.93) 217.08(490.37+15.10) -11.8%
and the --stdin-packs case, which scales a little bit better (although
not by that much even at 1,000 packs):
5303.7: repack with --stdin-packs (1) 0.00(0.00+0.00) 0.00(0.00+0.00) =
5303.13: repack with --stdin-packs (50) 3.43(11.75+0.24) 3.43(11.69+0.30) +0.0%
5303.19: repack with --stdin-packs (1000) 130.50(307.15+7.66) 125.13(301.36+8.04) -4.1%
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Taylor Blau <me@ttaylorr.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-02-23 10:25:23 +08:00
|
|
|
maybe_invalidate_kept_pack_cache(r, flags);
|
|
|
|
|
|
|
|
if (!r->objects->kept_pack_cache.packs) {
|
|
|
|
struct packed_git **packs = NULL;
|
|
|
|
size_t nr = 0, alloc = 0;
|
|
|
|
struct packed_git *p;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We want "all" packs here, because we need to cover ones that
|
|
|
|
* are used by a midx, as well. We need to look in every one of
|
|
|
|
* them (instead of the midx itself) to cover duplicates. It's
|
|
|
|
* possible that an object is found in two packs that the midx
|
|
|
|
* covers, one kept and one not kept, but the midx returns only
|
|
|
|
* the non-kept version.
|
|
|
|
*/
|
|
|
|
for (p = get_all_packs(r); p; p = p->next) {
|
|
|
|
if ((p->pack_keep && (flags & ON_DISK_KEEP_PACKS)) ||
|
|
|
|
(p->pack_keep_in_core && (flags & IN_CORE_KEEP_PACKS))) {
|
|
|
|
ALLOC_GROW(packs, nr + 1, alloc);
|
|
|
|
packs[nr++] = p;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ALLOC_GROW(packs, nr + 1, alloc);
|
|
|
|
packs[nr] = NULL;
|
|
|
|
|
|
|
|
r->objects->kept_pack_cache.packs = packs;
|
|
|
|
r->objects->kept_pack_cache.flags = flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
return r->objects->kept_pack_cache.packs;
|
packfile: introduce 'find_kept_pack_entry()'
Future callers will want a function to fill a 'struct pack_entry' for a
given object id but _only_ from its position in any kept pack(s).
In particular, an new 'git repack' mode which ensures the resulting
packs form a geometric progress by object count will mark packs that it
does not want to repack as "kept in-core", and it will want to halt a
reachability traversal as soon as it visits an object in any of the kept
packs. But, it does not want to halt the traversal at non-kept, or
.keep packs.
The obvious alternative is 'find_pack_entry()', but this doesn't quite
suffice since it only returns the first pack it finds, which may or may
not be kept (and the mru cache makes it unpredictable which one you'll
get if there are options).
Short of that, you could walk over all packs looking for the object in
each one, but it scales with the number of packs, which may be
prohibitive.
Introduce 'find_kept_pack_entry()', a function which is like
'find_pack_entry()', but only fills in objects in the kept packs.
Handle packs which have .keep files, as well as in-core kept packs
separately, since certain callers will want to distinguish one from the
other. (Though on-disk and in-core kept packs share the adjective
"kept", it is best to think of the two sets as independent.)
There is a gotcha when looking up objects that are duplicated in kept
and non-kept packs, particularly when the MIDX stores the non-kept
version and the caller asked for kept objects only. This could be
resolved by teaching the MIDX to resolve duplicates by always favoring
the kept pack (if one exists), but this breaks an assumption in existing
MIDXs, and so it would require a format change.
The benefit to changing the MIDX in this way is marginal, so we instead
have a more thorough check here which is explained with a comment.
Callers will be added in subsequent patches.
Co-authored-by: Jeff King <peff@peff.net>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Taylor Blau <me@ttaylorr.com>
Reviewed-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-02-23 10:25:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int find_kept_pack_entry(struct repository *r,
|
|
|
|
const struct object_id *oid,
|
|
|
|
unsigned flags,
|
|
|
|
struct pack_entry *e)
|
|
|
|
{
|
packfile: add kept-pack cache for find_kept_pack_entry()
In a recent patch we added a function 'find_kept_pack_entry()' to look
for an object only among kept packs.
While this function avoids doing any lookup work in non-kept packs, it
is still linear in the number of packs, since we have to traverse the
linked list of packs once per object. Let's cache a reduced version of
that list to save us time.
Note that this cache will last the lifetime of the program. We could
invalidate it on reprepare_packed_git(), but there's not much point in
being rigorous here:
- we might already fail to notice new .keep packs showing up after the
program starts. We only reprepare_packed_git() when we fail to find
an object. But adding a new pack won't cause that to happen.
Somebody repacking could add a new pack and delete an old one, but
most of the time we'd have a descriptor or mmap open to the old
pack anyway, so we might not even notice.
- in pack-objects we already cache the .keep state at startup, since
56dfeb6263 (pack-objects: compute local/ignore_pack_keep early,
2016-07-29). So this is just extending that concept further.
- we don't have to worry about any packed_git being removed; we always
keep the old structs around, even after reprepare_packed_git()
We do defensively invalidate the cache in case the set of kept packs
being asked for changes (e.g., only in-core kept packs were cached, but
suddenly the caller also wants on-disk kept packs, too). In theory we
could build all three caches and switch between them, but it's not
necessary, since this patch (and series) never changes the set of kept
packs that it wants to inspect from the cache.
So that "optimization" is more about being defensive in the face of
future changes than it is about asking for multiple kinds of kept packs
in this patch.
Here are p5303 results (as always, measured against the kernel):
Test HEAD^ HEAD
-----------------------------------------------------------------------------------------------
5303.5: repack (1) 57.34(54.66+10.88) 56.98(54.36+10.98) -0.6%
5303.6: repack with kept (1) 57.38(54.83+10.49) 57.17(54.97+10.26) -0.4%
5303.11: repack (50) 71.70(88.99+4.74) 71.62(88.48+5.08) -0.1%
5303.12: repack with kept (50) 72.58(89.61+4.78) 71.56(88.80+4.59) -1.4%
5303.17: repack (1000) 217.19(491.72+14.25) 217.31(490.82+14.53) +0.1%
5303.18: repack with kept (1000) 246.12(520.07+14.93) 217.08(490.37+15.10) -11.8%
and the --stdin-packs case, which scales a little bit better (although
not by that much even at 1,000 packs):
5303.7: repack with --stdin-packs (1) 0.00(0.00+0.00) 0.00(0.00+0.00) =
5303.13: repack with --stdin-packs (50) 3.43(11.75+0.24) 3.43(11.69+0.30) +0.0%
5303.19: repack with --stdin-packs (1000) 130.50(307.15+7.66) 125.13(301.36+8.04) -4.1%
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Taylor Blau <me@ttaylorr.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-02-23 10:25:23 +08:00
|
|
|
struct packed_git **cache;
|
|
|
|
|
|
|
|
for (cache = kept_pack_cache(r, flags); *cache; cache++) {
|
|
|
|
struct packed_git *p = *cache;
|
|
|
|
if (fill_pack_entry(oid, e, p))
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
packfile: introduce 'find_kept_pack_entry()'
Future callers will want a function to fill a 'struct pack_entry' for a
given object id but _only_ from its position in any kept pack(s).
In particular, an new 'git repack' mode which ensures the resulting
packs form a geometric progress by object count will mark packs that it
does not want to repack as "kept in-core", and it will want to halt a
reachability traversal as soon as it visits an object in any of the kept
packs. But, it does not want to halt the traversal at non-kept, or
.keep packs.
The obvious alternative is 'find_pack_entry()', but this doesn't quite
suffice since it only returns the first pack it finds, which may or may
not be kept (and the mru cache makes it unpredictable which one you'll
get if there are options).
Short of that, you could walk over all packs looking for the object in
each one, but it scales with the number of packs, which may be
prohibitive.
Introduce 'find_kept_pack_entry()', a function which is like
'find_pack_entry()', but only fills in objects in the kept packs.
Handle packs which have .keep files, as well as in-core kept packs
separately, since certain callers will want to distinguish one from the
other. (Though on-disk and in-core kept packs share the adjective
"kept", it is best to think of the two sets as independent.)
There is a gotcha when looking up objects that are duplicated in kept
and non-kept packs, particularly when the MIDX stores the non-kept
version and the caller asked for kept objects only. This could be
resolved by teaching the MIDX to resolve duplicates by always favoring
the kept pack (if one exists), but this breaks an assumption in existing
MIDXs, and so it would require a format change.
The benefit to changing the MIDX in this way is marginal, so we instead
have a more thorough check here which is explained with a comment.
Callers will be added in subsequent patches.
Co-authored-by: Jeff King <peff@peff.net>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Taylor Blau <me@ttaylorr.com>
Reviewed-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-02-23 10:25:03 +08:00
|
|
|
}
|
|
|
|
|
2018-05-02 08:25:33 +08:00
|
|
|
int has_object_pack(const struct object_id *oid)
|
2017-08-19 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct pack_entry e;
|
2018-05-02 08:25:35 +08:00
|
|
|
return find_pack_entry(the_repository, oid, &e);
|
2017-08-19 06:20:36 +08:00
|
|
|
}
|
2017-08-19 06:20:37 +08:00
|
|
|
|
packfile: introduce 'find_kept_pack_entry()'
Future callers will want a function to fill a 'struct pack_entry' for a
given object id but _only_ from its position in any kept pack(s).
In particular, an new 'git repack' mode which ensures the resulting
packs form a geometric progress by object count will mark packs that it
does not want to repack as "kept in-core", and it will want to halt a
reachability traversal as soon as it visits an object in any of the kept
packs. But, it does not want to halt the traversal at non-kept, or
.keep packs.
The obvious alternative is 'find_pack_entry()', but this doesn't quite
suffice since it only returns the first pack it finds, which may or may
not be kept (and the mru cache makes it unpredictable which one you'll
get if there are options).
Short of that, you could walk over all packs looking for the object in
each one, but it scales with the number of packs, which may be
prohibitive.
Introduce 'find_kept_pack_entry()', a function which is like
'find_pack_entry()', but only fills in objects in the kept packs.
Handle packs which have .keep files, as well as in-core kept packs
separately, since certain callers will want to distinguish one from the
other. (Though on-disk and in-core kept packs share the adjective
"kept", it is best to think of the two sets as independent.)
There is a gotcha when looking up objects that are duplicated in kept
and non-kept packs, particularly when the MIDX stores the non-kept
version and the caller asked for kept objects only. This could be
resolved by teaching the MIDX to resolve duplicates by always favoring
the kept pack (if one exists), but this breaks an assumption in existing
MIDXs, and so it would require a format change.
The benefit to changing the MIDX in this way is marginal, so we instead
have a more thorough check here which is explained with a comment.
Callers will be added in subsequent patches.
Co-authored-by: Jeff King <peff@peff.net>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Taylor Blau <me@ttaylorr.com>
Reviewed-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-02-23 10:25:03 +08:00
|
|
|
int has_object_kept_pack(const struct object_id *oid, unsigned flags)
|
|
|
|
{
|
|
|
|
struct pack_entry e;
|
|
|
|
return find_kept_pack_entry(the_repository, oid, flags, &e);
|
|
|
|
}
|
|
|
|
|
for_each_packed_object: support iterating in pack-order
We currently iterate over objects within a pack in .idx
order, which uses the object hashes. That means that it
is effectively random with respect to the location of the
object within the pack. If you're going to access the actual
object data, there are two reasons to move linearly through
the pack itself:
1. It improves the locality of access in the packfile. In
the cold-cache case, this may mean fewer disk seeks, or
better usage of disk cache.
2. We store related deltas together in the packfile. Which
means that the delta base cache can operate much more
efficiently if we visit all of those related deltas in
sequence, as the earlier items are likely to still be
in the cache. Whereas if we visit the objects in
random order, our cache entries are much more likely to
have been evicted by unrelated deltas in the meantime.
So in general, if you're going to access the object contents
pack order is generally going to end up more efficient.
But if you're simply generating a list of object names, or
if you're going to end up sorting the result anyway, you're
better off just using the .idx order, as finding the pack
order means generating the in-memory pack-revindex.
According to the numbers in 8b8dfd5132 (pack-revindex:
radix-sort the revindex, 2013-07-11), that takes about 200ms
for linux.git, and 20ms for git.git (those numbers are a few
years old but are still a good ballpark).
That makes it a good optimization for some cases (we can
save tens of seconds in git.git by having good locality of
delta access, for a 20ms cost), but a bad one for others
(e.g., right now "cat-file --batch-all-objects
--batch-check="%(objectname)" is 170ms in git.git, so adding
20ms to that is noticeable).
Hence this patch makes it an optional flag. You can't
actually do any interesting timings yet, as it's not plumbed
through to any user-facing tools like cat-file. That will
come in a later patch.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-11 07:15:49 +08:00
|
|
|
int for_each_object_in_pack(struct packed_git *p,
|
|
|
|
each_packed_object_fn cb, void *data,
|
|
|
|
enum for_each_object_flags flags)
|
2017-08-19 06:20:38 +08:00
|
|
|
{
|
|
|
|
uint32_t i;
|
|
|
|
int r = 0;
|
|
|
|
|
2019-04-06 02:04:24 +08:00
|
|
|
if (flags & FOR_EACH_OBJECT_PACK_ORDER) {
|
2023-04-13 06:20:24 +08:00
|
|
|
if (load_pack_revindex(the_repository, p))
|
2019-04-06 02:04:24 +08:00
|
|
|
return -1;
|
|
|
|
}
|
for_each_packed_object: support iterating in pack-order
We currently iterate over objects within a pack in .idx
order, which uses the object hashes. That means that it
is effectively random with respect to the location of the
object within the pack. If you're going to access the actual
object data, there are two reasons to move linearly through
the pack itself:
1. It improves the locality of access in the packfile. In
the cold-cache case, this may mean fewer disk seeks, or
better usage of disk cache.
2. We store related deltas together in the packfile. Which
means that the delta base cache can operate much more
efficiently if we visit all of those related deltas in
sequence, as the earlier items are likely to still be
in the cache. Whereas if we visit the objects in
random order, our cache entries are much more likely to
have been evicted by unrelated deltas in the meantime.
So in general, if you're going to access the object contents
pack order is generally going to end up more efficient.
But if you're simply generating a list of object names, or
if you're going to end up sorting the result anyway, you're
better off just using the .idx order, as finding the pack
order means generating the in-memory pack-revindex.
According to the numbers in 8b8dfd5132 (pack-revindex:
radix-sort the revindex, 2013-07-11), that takes about 200ms
for linux.git, and 20ms for git.git (those numbers are a few
years old but are still a good ballpark).
That makes it a good optimization for some cases (we can
save tens of seconds in git.git by having good locality of
delta access, for a 20ms cost), but a bad one for others
(e.g., right now "cat-file --batch-all-objects
--batch-check="%(objectname)" is 170ms in git.git, so adding
20ms to that is noticeable).
Hence this patch makes it an optional flag. You can't
actually do any interesting timings yet, as it's not plumbed
through to any user-facing tools like cat-file. That will
come in a later patch.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-11 07:15:49 +08:00
|
|
|
|
2017-08-19 06:20:38 +08:00
|
|
|
for (i = 0; i < p->num_objects; i++) {
|
2021-01-15 04:11:10 +08:00
|
|
|
uint32_t index_pos;
|
2017-08-19 06:20:38 +08:00
|
|
|
struct object_id oid;
|
|
|
|
|
2021-01-15 04:11:10 +08:00
|
|
|
/*
|
|
|
|
* We are iterating "i" from 0 up to num_objects, but its
|
|
|
|
* meaning may be different, depending on the requested output
|
|
|
|
* order:
|
|
|
|
*
|
|
|
|
* - in object-name order, it is the same as the index order
|
|
|
|
* used by nth_packed_object_id(), so we can pass it
|
|
|
|
* directly
|
|
|
|
*
|
|
|
|
* - in pack-order, it is pack position, which we must
|
|
|
|
* convert to an index position in order to get the oid.
|
|
|
|
*/
|
for_each_packed_object: support iterating in pack-order
We currently iterate over objects within a pack in .idx
order, which uses the object hashes. That means that it
is effectively random with respect to the location of the
object within the pack. If you're going to access the actual
object data, there are two reasons to move linearly through
the pack itself:
1. It improves the locality of access in the packfile. In
the cold-cache case, this may mean fewer disk seeks, or
better usage of disk cache.
2. We store related deltas together in the packfile. Which
means that the delta base cache can operate much more
efficiently if we visit all of those related deltas in
sequence, as the earlier items are likely to still be
in the cache. Whereas if we visit the objects in
random order, our cache entries are much more likely to
have been evicted by unrelated deltas in the meantime.
So in general, if you're going to access the object contents
pack order is generally going to end up more efficient.
But if you're simply generating a list of object names, or
if you're going to end up sorting the result anyway, you're
better off just using the .idx order, as finding the pack
order means generating the in-memory pack-revindex.
According to the numbers in 8b8dfd5132 (pack-revindex:
radix-sort the revindex, 2013-07-11), that takes about 200ms
for linux.git, and 20ms for git.git (those numbers are a few
years old but are still a good ballpark).
That makes it a good optimization for some cases (we can
save tens of seconds in git.git by having good locality of
delta access, for a 20ms cost), but a bad one for others
(e.g., right now "cat-file --batch-all-objects
--batch-check="%(objectname)" is 170ms in git.git, so adding
20ms to that is noticeable).
Hence this patch makes it an optional flag. You can't
actually do any interesting timings yet, as it's not plumbed
through to any user-facing tools like cat-file. That will
come in a later patch.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-11 07:15:49 +08:00
|
|
|
if (flags & FOR_EACH_OBJECT_PACK_ORDER)
|
2021-01-15 04:11:10 +08:00
|
|
|
index_pos = pack_pos_to_index(p, i);
|
for_each_packed_object: support iterating in pack-order
We currently iterate over objects within a pack in .idx
order, which uses the object hashes. That means that it
is effectively random with respect to the location of the
object within the pack. If you're going to access the actual
object data, there are two reasons to move linearly through
the pack itself:
1. It improves the locality of access in the packfile. In
the cold-cache case, this may mean fewer disk seeks, or
better usage of disk cache.
2. We store related deltas together in the packfile. Which
means that the delta base cache can operate much more
efficiently if we visit all of those related deltas in
sequence, as the earlier items are likely to still be
in the cache. Whereas if we visit the objects in
random order, our cache entries are much more likely to
have been evicted by unrelated deltas in the meantime.
So in general, if you're going to access the object contents
pack order is generally going to end up more efficient.
But if you're simply generating a list of object names, or
if you're going to end up sorting the result anyway, you're
better off just using the .idx order, as finding the pack
order means generating the in-memory pack-revindex.
According to the numbers in 8b8dfd5132 (pack-revindex:
radix-sort the revindex, 2013-07-11), that takes about 200ms
for linux.git, and 20ms for git.git (those numbers are a few
years old but are still a good ballpark).
That makes it a good optimization for some cases (we can
save tens of seconds in git.git by having good locality of
delta access, for a 20ms cost), but a bad one for others
(e.g., right now "cat-file --batch-all-objects
--batch-check="%(objectname)" is 170ms in git.git, so adding
20ms to that is noticeable).
Hence this patch makes it an optional flag. You can't
actually do any interesting timings yet, as it's not plumbed
through to any user-facing tools like cat-file. That will
come in a later patch.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-11 07:15:49 +08:00
|
|
|
else
|
2021-01-15 04:11:10 +08:00
|
|
|
index_pos = i;
|
for_each_packed_object: support iterating in pack-order
We currently iterate over objects within a pack in .idx
order, which uses the object hashes. That means that it
is effectively random with respect to the location of the
object within the pack. If you're going to access the actual
object data, there are two reasons to move linearly through
the pack itself:
1. It improves the locality of access in the packfile. In
the cold-cache case, this may mean fewer disk seeks, or
better usage of disk cache.
2. We store related deltas together in the packfile. Which
means that the delta base cache can operate much more
efficiently if we visit all of those related deltas in
sequence, as the earlier items are likely to still be
in the cache. Whereas if we visit the objects in
random order, our cache entries are much more likely to
have been evicted by unrelated deltas in the meantime.
So in general, if you're going to access the object contents
pack order is generally going to end up more efficient.
But if you're simply generating a list of object names, or
if you're going to end up sorting the result anyway, you're
better off just using the .idx order, as finding the pack
order means generating the in-memory pack-revindex.
According to the numbers in 8b8dfd5132 (pack-revindex:
radix-sort the revindex, 2013-07-11), that takes about 200ms
for linux.git, and 20ms for git.git (those numbers are a few
years old but are still a good ballpark).
That makes it a good optimization for some cases (we can
save tens of seconds in git.git by having good locality of
delta access, for a 20ms cost), but a bad one for others
(e.g., right now "cat-file --batch-all-objects
--batch-check="%(objectname)" is 170ms in git.git, so adding
20ms to that is noticeable).
Hence this patch makes it an optional flag. You can't
actually do any interesting timings yet, as it's not plumbed
through to any user-facing tools like cat-file. That will
come in a later patch.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-11 07:15:49 +08:00
|
|
|
|
2021-01-15 04:11:10 +08:00
|
|
|
if (nth_packed_object_id(&oid, p, index_pos) < 0)
|
2017-08-19 06:20:38 +08:00
|
|
|
return error("unable to get sha1 of object %u in %s",
|
2021-01-15 04:11:10 +08:00
|
|
|
index_pos, p->pack_name);
|
2017-08-19 06:20:38 +08:00
|
|
|
|
2021-01-15 04:11:10 +08:00
|
|
|
r = cb(&oid, p, index_pos, data);
|
2017-08-19 06:20:38 +08:00
|
|
|
if (r)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2018-08-11 07:09:44 +08:00
|
|
|
int for_each_packed_object(each_packed_object_fn cb, void *data,
|
|
|
|
enum for_each_object_flags flags)
|
2017-08-19 06:20:38 +08:00
|
|
|
{
|
|
|
|
struct packed_git *p;
|
|
|
|
int r = 0;
|
|
|
|
int pack_errors = 0;
|
|
|
|
|
2018-03-24 01:45:20 +08:00
|
|
|
prepare_packed_git(the_repository);
|
2018-08-21 00:52:04 +08:00
|
|
|
for (p = get_all_packs(the_repository); p; p = p->next) {
|
2017-08-19 06:20:38 +08:00
|
|
|
if ((flags & FOR_EACH_OBJECT_LOCAL_ONLY) && !p->pack_local)
|
|
|
|
continue;
|
2017-12-06 00:58:44 +08:00
|
|
|
if ((flags & FOR_EACH_OBJECT_PROMISOR_ONLY) &&
|
|
|
|
!p->pack_promisor)
|
|
|
|
continue;
|
2021-08-30 10:48:52 +08:00
|
|
|
if ((flags & FOR_EACH_OBJECT_SKIP_IN_CORE_KEPT_PACKS) &&
|
|
|
|
p->pack_keep_in_core)
|
|
|
|
continue;
|
|
|
|
if ((flags & FOR_EACH_OBJECT_SKIP_ON_DISK_KEPT_PACKS) &&
|
|
|
|
p->pack_keep)
|
|
|
|
continue;
|
2017-08-19 06:20:38 +08:00
|
|
|
if (open_pack_index(p)) {
|
|
|
|
pack_errors = 1;
|
|
|
|
continue;
|
|
|
|
}
|
for_each_packed_object: support iterating in pack-order
We currently iterate over objects within a pack in .idx
order, which uses the object hashes. That means that it
is effectively random with respect to the location of the
object within the pack. If you're going to access the actual
object data, there are two reasons to move linearly through
the pack itself:
1. It improves the locality of access in the packfile. In
the cold-cache case, this may mean fewer disk seeks, or
better usage of disk cache.
2. We store related deltas together in the packfile. Which
means that the delta base cache can operate much more
efficiently if we visit all of those related deltas in
sequence, as the earlier items are likely to still be
in the cache. Whereas if we visit the objects in
random order, our cache entries are much more likely to
have been evicted by unrelated deltas in the meantime.
So in general, if you're going to access the object contents
pack order is generally going to end up more efficient.
But if you're simply generating a list of object names, or
if you're going to end up sorting the result anyway, you're
better off just using the .idx order, as finding the pack
order means generating the in-memory pack-revindex.
According to the numbers in 8b8dfd5132 (pack-revindex:
radix-sort the revindex, 2013-07-11), that takes about 200ms
for linux.git, and 20ms for git.git (those numbers are a few
years old but are still a good ballpark).
That makes it a good optimization for some cases (we can
save tens of seconds in git.git by having good locality of
delta access, for a 20ms cost), but a bad one for others
(e.g., right now "cat-file --batch-all-objects
--batch-check="%(objectname)" is 170ms in git.git, so adding
20ms to that is noticeable).
Hence this patch makes it an optional flag. You can't
actually do any interesting timings yet, as it's not plumbed
through to any user-facing tools like cat-file. That will
come in a later patch.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-11 07:15:49 +08:00
|
|
|
r = for_each_object_in_pack(p, cb, data, flags);
|
2017-08-19 06:20:38 +08:00
|
|
|
if (r)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return r ? r : pack_errors;
|
|
|
|
}
|
2017-12-06 00:58:44 +08:00
|
|
|
|
|
|
|
static int add_promisor_object(const struct object_id *oid,
|
2023-02-24 14:39:24 +08:00
|
|
|
struct packed_git *pack UNUSED,
|
|
|
|
uint32_t pos UNUSED,
|
2017-12-06 00:58:44 +08:00
|
|
|
void *set_)
|
|
|
|
{
|
|
|
|
struct oidset *set = set_;
|
is_promisor_object(): fix use-after-free of tree buffer
Since commit fcc07e980b (is_promisor_object(): free tree buffer after
parsing, 2021-04-13), we'll always free the buffers attached to a
"struct tree" after searching them for promisor links. But there's an
important case where we don't want to do so: if somebody else is already
using the tree!
This can happen during a "rev-list --missing=allow-promisor" traversal
in a partial clone that is missing one or more trees or blobs. The
backtrace for the free looks like this:
#1 free_tree_buffer tree.c:147
#2 add_promisor_object packfile.c:2250
#3 for_each_object_in_pack packfile.c:2190
#4 for_each_packed_object packfile.c:2215
#5 is_promisor_object packfile.c:2272
#6 finish_object__ma builtin/rev-list.c:245
#7 finish_object builtin/rev-list.c:261
#8 show_object builtin/rev-list.c:274
#9 process_blob list-objects.c:63
#10 process_tree_contents list-objects.c:145
#11 process_tree list-objects.c:201
#12 traverse_trees_and_blobs list-objects.c:344
[...]
We're in the middle of walking through the entries of a tree object via
process_tree_contents(). We see a blob (or it could even be another tree
entry) that we don't have, so we call is_promisor_object() to check it.
That function loops over all of the objects in the promisor packfile,
including the tree we're currently walking. When we're done with it
there, we free the tree buffer. But as we return to the walk in
process_tree_contents(), it's still holding on to a pointer to that
buffer, via its tree_desc iterator, and it accesses the freed memory.
Even a trivial use of "--missing=allow-promisor" triggers this problem,
as the included test demonstrates (it's just a vanilla --blob:none
clone).
We can detect this case by only freeing the tree buffer if it was
allocated on our behalf. This is a little tricky since that happens
inside parse_object(), and it doesn't tell us whether the object was
already parsed, or whether it allocated the buffer itself. But by
checking for an already-parsed tree beforehand, we can distinguish the
two cases.
That feels a little hacky, and does incur an extra lookup in the
object-hash table. But that cost is fairly minimal compared to actually
loading objects (and since we're iterating the whole pack here, we're
likely to be loading most objects, rather than reusing cached results).
It may also be a good direction for this function in general, as there
are other possible optimizations that rely on doing some analysis before
parsing:
- we could detect blobs and avoid reading their contents; they can't
link to other objects, but parse_object() doesn't know that we don't
care about checking their hashes.
- we could avoid allocating object structs entirely for most objects
(since we really only need them in the oidset), which would save
some memory.
- promisor commits could use the commit-graph rather than loading the
object from disk
This commit doesn't do any of those optimizations, but I think it argues
that this direction is reasonable, rather than relying on parse_object()
and trying to teach it to give us more information about whether it
parsed.
The included test fails reliably under SANITIZE=address just when
running "rev-list --missing=allow-promisor". Checking the output isn't
strictly necessary to detect the bug, but it seems like a reasonable
addition given the general lack of coverage for "allow-promisor" in the
test suite.
Reported-by: Andrew Olsen <andrew.olsen@koordinates.com>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-08-14 14:29:15 +08:00
|
|
|
struct object *obj;
|
|
|
|
int we_parsed_object;
|
|
|
|
|
|
|
|
obj = lookup_object(the_repository, oid);
|
|
|
|
if (obj && obj->parsed) {
|
|
|
|
we_parsed_object = 0;
|
|
|
|
} else {
|
|
|
|
we_parsed_object = 1;
|
|
|
|
obj = parse_object(the_repository, oid);
|
|
|
|
}
|
|
|
|
|
2017-12-06 00:58:44 +08:00
|
|
|
if (!obj)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
oidset_insert(set, oid);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is a tree, commit, or tag, the objects it refers
|
2018-03-24 01:20:59 +08:00
|
|
|
* to are also promisor objects. (Blobs refer to no objects->)
|
2017-12-06 00:58:44 +08:00
|
|
|
*/
|
|
|
|
if (obj->type == OBJ_TREE) {
|
|
|
|
struct tree *tree = (struct tree *)obj;
|
|
|
|
struct tree_desc desc;
|
|
|
|
struct name_entry entry;
|
2023-10-02 10:40:28 +08:00
|
|
|
if (init_tree_desc_gently(&desc, &tree->object.oid,
|
|
|
|
tree->buffer, tree->size, 0))
|
2017-12-06 00:58:44 +08:00
|
|
|
/*
|
|
|
|
* Error messages are given when packs are
|
|
|
|
* verified, so do not print any here.
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
while (tree_entry_gently(&desc, &entry))
|
2019-01-15 08:39:44 +08:00
|
|
|
oidset_insert(set, &entry.oid);
|
is_promisor_object(): fix use-after-free of tree buffer
Since commit fcc07e980b (is_promisor_object(): free tree buffer after
parsing, 2021-04-13), we'll always free the buffers attached to a
"struct tree" after searching them for promisor links. But there's an
important case where we don't want to do so: if somebody else is already
using the tree!
This can happen during a "rev-list --missing=allow-promisor" traversal
in a partial clone that is missing one or more trees or blobs. The
backtrace for the free looks like this:
#1 free_tree_buffer tree.c:147
#2 add_promisor_object packfile.c:2250
#3 for_each_object_in_pack packfile.c:2190
#4 for_each_packed_object packfile.c:2215
#5 is_promisor_object packfile.c:2272
#6 finish_object__ma builtin/rev-list.c:245
#7 finish_object builtin/rev-list.c:261
#8 show_object builtin/rev-list.c:274
#9 process_blob list-objects.c:63
#10 process_tree_contents list-objects.c:145
#11 process_tree list-objects.c:201
#12 traverse_trees_and_blobs list-objects.c:344
[...]
We're in the middle of walking through the entries of a tree object via
process_tree_contents(). We see a blob (or it could even be another tree
entry) that we don't have, so we call is_promisor_object() to check it.
That function loops over all of the objects in the promisor packfile,
including the tree we're currently walking. When we're done with it
there, we free the tree buffer. But as we return to the walk in
process_tree_contents(), it's still holding on to a pointer to that
buffer, via its tree_desc iterator, and it accesses the freed memory.
Even a trivial use of "--missing=allow-promisor" triggers this problem,
as the included test demonstrates (it's just a vanilla --blob:none
clone).
We can detect this case by only freeing the tree buffer if it was
allocated on our behalf. This is a little tricky since that happens
inside parse_object(), and it doesn't tell us whether the object was
already parsed, or whether it allocated the buffer itself. But by
checking for an already-parsed tree beforehand, we can distinguish the
two cases.
That feels a little hacky, and does incur an extra lookup in the
object-hash table. But that cost is fairly minimal compared to actually
loading objects (and since we're iterating the whole pack here, we're
likely to be loading most objects, rather than reusing cached results).
It may also be a good direction for this function in general, as there
are other possible optimizations that rely on doing some analysis before
parsing:
- we could detect blobs and avoid reading their contents; they can't
link to other objects, but parse_object() doesn't know that we don't
care about checking their hashes.
- we could avoid allocating object structs entirely for most objects
(since we really only need them in the oidset), which would save
some memory.
- promisor commits could use the commit-graph rather than loading the
object from disk
This commit doesn't do any of those optimizations, but I think it argues
that this direction is reasonable, rather than relying on parse_object()
and trying to teach it to give us more information about whether it
parsed.
The included test fails reliably under SANITIZE=address just when
running "rev-list --missing=allow-promisor". Checking the output isn't
strictly necessary to detect the bug, but it seems like a reasonable
addition given the general lack of coverage for "allow-promisor" in the
test suite.
Reported-by: Andrew Olsen <andrew.olsen@koordinates.com>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-08-14 14:29:15 +08:00
|
|
|
if (we_parsed_object)
|
|
|
|
free_tree_buffer(tree);
|
2017-12-06 00:58:44 +08:00
|
|
|
} else if (obj->type == OBJ_COMMIT) {
|
|
|
|
struct commit *commit = (struct commit *) obj;
|
|
|
|
struct commit_list *parents = commit->parents;
|
|
|
|
|
2018-04-07 03:09:38 +08:00
|
|
|
oidset_insert(set, get_commit_tree_oid(commit));
|
2017-12-06 00:58:44 +08:00
|
|
|
for (; parents; parents = parents->next)
|
|
|
|
oidset_insert(set, &parents->item->object.oid);
|
|
|
|
} else if (obj->type == OBJ_TAG) {
|
|
|
|
struct tag *tag = (struct tag *) obj;
|
2019-09-06 03:59:42 +08:00
|
|
|
oidset_insert(set, get_tagged_oid(tag));
|
2017-12-06 00:58:44 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int is_promisor_object(const struct object_id *oid)
|
|
|
|
{
|
|
|
|
static struct oidset promisor_objects;
|
|
|
|
static int promisor_objects_prepared;
|
|
|
|
|
|
|
|
if (!promisor_objects_prepared) {
|
2023-03-28 21:58:53 +08:00
|
|
|
if (repo_has_promisor_remote(the_repository)) {
|
2017-12-06 00:58:44 +08:00
|
|
|
for_each_packed_object(add_promisor_object,
|
|
|
|
&promisor_objects,
|
is_promisor_object(): walk promisor packs in pack-order
When we generate the list of promisor objects, we walk every pack with a
.promisor file and examine its objects for any links to other objects.
By default, for_each_packed_object() will go in pack .idx order.
This is the worst case with respect to our delta base cache. If we have
a delta chain of A->B->C->D, then visiting A may require reconstructing
both B and C, unless we also visited B recently, in which case we may
have cached its value. Because .idx order is based on sha1, it's random
with respect to the actual object contents and deltas, and thus we're
unlikely to get many cache hits.
If we instead traverse in pack order, then we get the optimal case:
packs are written to keep delta families together, and to place bases
before their children.
Even on a modest repository like git.git, this has a noticeable speedup
on p5600.4, which runs "fsck" on a partial clone with blob:none (so lots
of trees which need to be walked, and which delta well):
Test HEAD^ HEAD
-------------------------------------------------------
5600.4: 17.87(17.83+0.04) 15.42(15.35+0.06) -13.7%
On a larger repository like linux.git, the speedup is even more
pronounced:
Test HEAD^ HEAD
-----------------------------------------------------------
5600.4: 322.47(322.01+0.42) 186.41(185.76+0.63) -42.2%
Any other operations that call is_promisor_object(), like "rev-list
--exclude-promisor-objects", would similarly benefit, but the
invocations in p5600 don't actually trigger any such cases.
Note that we may pay a small price to build a rev-index in-memory to do
the pack-order traversal. But it's still a big net win, and even that
small cost goes away if you are using pack.writeReverseIndex.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-16 14:54:41 +08:00
|
|
|
FOR_EACH_OBJECT_PROMISOR_ONLY |
|
|
|
|
FOR_EACH_OBJECT_PACK_ORDER);
|
2017-12-06 00:58:44 +08:00
|
|
|
}
|
|
|
|
promisor_objects_prepared = 1;
|
|
|
|
}
|
|
|
|
return oidset_contains(&promisor_objects, oid);
|
|
|
|
}
|