global: introduce `USE_THE_REPOSITORY_VARIABLE` macro
Use of the `the_repository` variable is deprecated nowadays, and we
slowly but steadily convert the codebase to not use it anymore. Instead,
callers should be passing down the repository to work on via parameters.
It is hard though to prove that a given code unit does not use this
variable anymore. The most trivial case, merely demonstrating that there
is no direct use of `the_repository`, is already a bit of a pain during
code reviews as the reviewer needs to manually verify claims made by the
patch author. The bigger problem though is that we have many interfaces
that implicitly rely on `the_repository`.
Introduce a new `USE_THE_REPOSITORY_VARIABLE` macro that allows code
units to opt into usage of `the_repository`. The intent of this macro is
to demonstrate that a certain code unit does not use this variable
anymore, and to keep it from new dependencies on it in future changes,
be it explicit or implicit
For now, the macro only guards `the_repository` itself as well as
`the_hash_algo`. There are many more known interfaces where we have an
implicit dependency on `the_repository`, but those are not guarded at
the current point in time. Over time though, we should start to add
guards as required (or even better, just remove them).
Define the macro as required in our code units. As expected, most of our
code still relies on the global variable. Nearly all of our builtins
rely on the variable as there is no way yet to pass `the_repository` to
their entry point. For now, declare the macro in "biultin.h" to keep the
required changes at least a little bit more contained.
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2024-06-14 14:50:23 +08:00
|
|
|
#define USE_THE_REPOSITORY_VARIABLE
|
|
|
|
|
2023-04-11 15:41:48 +08:00
|
|
|
#include "git-compat-util.h"
|
2023-03-21 14:26:03 +08:00
|
|
|
#include "environment.h"
|
2023-03-21 14:25:54 +08:00
|
|
|
#include "gettext.h"
|
2023-02-24 08:09:27 +08:00
|
|
|
#include "hex.h"
|
2007-05-03 00:13:14 +08:00
|
|
|
#include "pack.h"
|
2007-06-02 03:18:05 +08:00
|
|
|
#include "csum-file.h"
|
2021-01-12 16:21:59 +08:00
|
|
|
#include "remote.h"
|
2022-05-21 07:17:41 +08:00
|
|
|
#include "chunk-format.h"
|
pack-objects: use finalize_object_file() to rename pack/idx/etc
In most places that write files to the object database (even packfiles
via index-pack or fast-import), we use finalize_object_file(). This
prefers link()/unlink() over rename(), because it means we will prefer
data that is already in the repository to data that we are newly
writing.
We should do the same thing in pack-objects. Even though we don't think
of it as accepting outside data (and thus not being susceptible to
collision attacks), in theory a determined attacker could present just
the right set of objects to cause an incremental repack to generate
a pack with their desired hash.
This has some test and real-world fallout, as seen in the adjustment to
t5303 below. That test script assumes that we can "fix" corruption by
repacking into a good state, including when the pack generated by that
repack operation collides with a (corrupted) pack with the same hash.
This violates our assumption from the previous adjustments to
finalize_object_file() that if we're moving a new file over an existing
one, that since their checksums match, so too must their contents.
This makes "fixing" corruption like this a more explicit operation,
since the test (and users, who may fix real-life corruption using a
similar technique) must first move the broken contents out of the way.
Note also that we now call adjust_shared_perm() twice. We already call
adjust_shared_perm() in stage_tmp_packfiles(), and now call it again in
finalize_object_file(). This is somewhat wasteful, but cleaning up the
existing calls to adjust_shared_perm() is tricky (because sometimes
we're writing to a tmpfile, and sometimes we're writing directly into
the final destination), so let's tolerate some minor waste until we can
more carefully clean up the now-redundant calls.
Co-authored-by: Jeff King <peff@peff.net>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Taylor Blau <me@ttaylorr.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2024-09-26 23:22:41 +08:00
|
|
|
#include "object-file.h"
|
2022-05-21 07:17:43 +08:00
|
|
|
#include "pack-mtimes.h"
|
|
|
|
#include "pack-objects.h"
|
2023-04-11 11:00:41 +08:00
|
|
|
#include "pack-revindex.h"
|
2023-05-16 14:33:59 +08:00
|
|
|
#include "path.h"
|
2024-09-12 19:29:30 +08:00
|
|
|
#include "repository.h"
|
2023-05-16 14:34:06 +08:00
|
|
|
#include "strbuf.h"
|
2007-06-02 03:18:05 +08:00
|
|
|
|
2011-02-26 07:43:25 +08:00
|
|
|
void reset_pack_idx_option(struct pack_idx_option *opts)
|
|
|
|
{
|
|
|
|
memset(opts, 0, sizeof(*opts));
|
|
|
|
opts->version = 2;
|
|
|
|
opts->off32_limit = 0x7fffffff;
|
|
|
|
}
|
2007-06-02 03:18:05 +08:00
|
|
|
|
|
|
|
static int sha1_compare(const void *_a, const void *_b)
|
|
|
|
{
|
|
|
|
struct pack_idx_entry *a = *(struct pack_idx_entry **)_a;
|
|
|
|
struct pack_idx_entry *b = *(struct pack_idx_entry **)_b;
|
2017-05-07 06:10:11 +08:00
|
|
|
return oidcmp(&a->oid, &b->oid);
|
2007-06-02 03:18:05 +08:00
|
|
|
}
|
|
|
|
|
2011-02-26 08:55:26 +08:00
|
|
|
static int cmp_uint32(const void *a_, const void *b_)
|
|
|
|
{
|
|
|
|
uint32_t a = *((uint32_t *)a_);
|
|
|
|
uint32_t b = *((uint32_t *)b_);
|
|
|
|
|
|
|
|
return (a < b) ? -1 : (a != b);
|
|
|
|
}
|
|
|
|
|
2011-02-26 08:54:00 +08:00
|
|
|
static int need_large_offset(off_t offset, const struct pack_idx_option *opts)
|
|
|
|
{
|
2011-02-26 08:55:26 +08:00
|
|
|
uint32_t ofsval;
|
|
|
|
|
|
|
|
if ((offset >> 31) || (opts->off32_limit < offset))
|
|
|
|
return 1;
|
|
|
|
if (!opts->anomaly_nr)
|
|
|
|
return 0;
|
|
|
|
ofsval = offset;
|
|
|
|
return !!bsearch(&ofsval, opts->anomaly, opts->anomaly_nr,
|
|
|
|
sizeof(ofsval), cmp_uint32);
|
2011-02-26 08:54:00 +08:00
|
|
|
}
|
|
|
|
|
2007-06-02 03:18:05 +08:00
|
|
|
/*
|
2020-07-23 05:40:31 +08:00
|
|
|
* The *sha1 contains the pack content SHA1 hash.
|
|
|
|
* The objects array passed in will be sorted by SHA1 on exit.
|
2007-06-02 03:18:05 +08:00
|
|
|
*/
|
2010-01-22 23:55:19 +08:00
|
|
|
const char *write_idx_file(const char *index_name, struct pack_idx_entry **objects,
|
2011-02-26 07:43:25 +08:00
|
|
|
int nr_objects, const struct pack_idx_option *opts,
|
pack-objects: name pack files after trailer hash
Our current scheme for naming packfiles is to calculate the
sha1 hash of the sorted list of objects contained in the
packfile. This gives us a unique name, so we are reasonably
sure that two packs with the same name will contain the same
objects.
It does not, however, tell us that two such packs have the
exact same bytes. This makes things awkward if we repack the
same set of objects. Due to run-to-run variations, the bytes
may not be identical (e.g., changed zlib or git versions,
different source object reuse due to new packs in the
repository, or even different deltas due to races during a
multi-threaded delta search).
In theory, this could be helpful to a program that cares
that the packfile contains a certain set of objects, but
does not care about the particular representation. In
practice, no part of git makes use of that, and in many
cases it is potentially harmful. For example, if a dumb http
client fetches the .idx file, it must be sure to get the
exact .pack that matches it. Similarly, a partial transfer
of a .pack file cannot be safely resumed, as the actual
bytes may have changed. This could also affect a local
client which opened the .idx and .pack files, closes the
.pack file (due to memory or file descriptor limits), and
then re-opens a changed packfile.
In all of these cases, git can detect the problem, as we
have the sha1 of the bytes themselves in the pack trailer
(which we verify on transfer), and the .idx file references
the trailer from the matching packfile. But it would be
simpler and more efficient to actually get the correct
bytes, rather than noticing the problem and having to
restart the operation.
This patch simply uses the pack trailer sha1 as the pack
name. It should be similarly unique, but covers the exact
representation of the objects. Other parts of git should not
care, as the pack name is returned by pack-objects and is
essentially opaque.
One test needs to be updated, because it actually corrupts a
pack and expects that re-packing the corrupted bytes will
use the same name. It won't anymore, but we can easily just
use the name that pack-objects hands back.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-12-06 04:28:07 +08:00
|
|
|
const unsigned char *sha1)
|
2007-06-02 03:18:05 +08:00
|
|
|
{
|
2018-02-01 10:18:46 +08:00
|
|
|
struct hashfile *f;
|
2007-06-02 03:18:05 +08:00
|
|
|
struct pack_idx_entry **sorted_by_sha, **list, **last;
|
|
|
|
off_t last_obj_offset = 0;
|
|
|
|
int i, fd;
|
|
|
|
uint32_t index_version;
|
|
|
|
|
|
|
|
if (nr_objects) {
|
|
|
|
sorted_by_sha = objects;
|
|
|
|
list = sorted_by_sha;
|
|
|
|
last = sorted_by_sha + nr_objects;
|
|
|
|
for (i = 0; i < nr_objects; ++i) {
|
|
|
|
if (objects[i]->offset > last_obj_offset)
|
|
|
|
last_obj_offset = objects[i]->offset;
|
|
|
|
}
|
2016-09-29 23:27:31 +08:00
|
|
|
QSORT(sorted_by_sha, nr_objects, sha1_compare);
|
2007-06-02 03:18:05 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
sorted_by_sha = list = last = NULL;
|
|
|
|
|
2011-02-03 09:29:01 +08:00
|
|
|
if (opts->flags & WRITE_IDX_VERIFY) {
|
|
|
|
assert(index_name);
|
2018-02-01 10:18:46 +08:00
|
|
|
f = hashfd_check(index_name);
|
2007-06-02 03:18:05 +08:00
|
|
|
} else {
|
2011-02-03 09:29:01 +08:00
|
|
|
if (!index_name) {
|
2017-03-29 03:45:43 +08:00
|
|
|
struct strbuf tmp_file = STRBUF_INIT;
|
|
|
|
fd = odb_mkstemp(&tmp_file, "pack/tmp_idx_XXXXXX");
|
|
|
|
index_name = strbuf_detach(&tmp_file, NULL);
|
2011-02-03 09:29:01 +08:00
|
|
|
} else {
|
|
|
|
unlink(index_name);
|
2021-08-26 04:16:46 +08:00
|
|
|
fd = xopen(index_name, O_CREAT|O_EXCL|O_WRONLY, 0600);
|
2011-02-03 09:29:01 +08:00
|
|
|
}
|
2018-02-01 10:18:46 +08:00
|
|
|
f = hashfd(fd, index_name);
|
2007-06-02 03:18:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* if last object's offset is >= 2^31 we should use index V2 */
|
2011-02-26 08:54:00 +08:00
|
|
|
index_version = need_large_offset(last_obj_offset, opts) ? 2 : opts->version;
|
2007-06-02 03:18:05 +08:00
|
|
|
|
|
|
|
/* index versions 2 and above need a header */
|
|
|
|
if (index_version >= 2) {
|
|
|
|
struct pack_idx_header hdr;
|
|
|
|
hdr.idx_signature = htonl(PACK_IDX_SIGNATURE);
|
|
|
|
hdr.idx_version = htonl(index_version);
|
2018-02-01 10:18:46 +08:00
|
|
|
hashwrite(f, &hdr, sizeof(hdr));
|
2007-06-02 03:18:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write the first-level table (the list is sorted,
|
|
|
|
* but we use a 256-entry lookup to be able to avoid
|
|
|
|
* having to do eight extra binary search iterations).
|
|
|
|
*/
|
|
|
|
for (i = 0; i < 256; i++) {
|
|
|
|
struct pack_idx_entry **next = list;
|
|
|
|
while (next < last) {
|
|
|
|
struct pack_idx_entry *obj = *next;
|
2017-05-07 06:10:11 +08:00
|
|
|
if (obj->oid.hash[0] != i)
|
2007-06-02 03:18:05 +08:00
|
|
|
break;
|
|
|
|
next++;
|
|
|
|
}
|
2020-11-01 16:52:12 +08:00
|
|
|
hashwrite_be32(f, next - sorted_by_sha);
|
2007-06-02 03:18:05 +08:00
|
|
|
list = next;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write the actual SHA1 entries..
|
|
|
|
*/
|
|
|
|
list = sorted_by_sha;
|
|
|
|
for (i = 0; i < nr_objects; i++) {
|
|
|
|
struct pack_idx_entry *obj = *list++;
|
2020-09-20 02:26:36 +08:00
|
|
|
if (index_version < 2)
|
|
|
|
hashwrite_be32(f, obj->offset);
|
2018-02-01 10:18:46 +08:00
|
|
|
hashwrite(f, obj->oid.hash, the_hash_algo->rawsz);
|
2011-11-17 14:04:13 +08:00
|
|
|
if ((opts->flags & WRITE_IDX_STRICT) &&
|
convert "oidcmp() == 0" to oideq()
Using the more restrictive oideq() should, in the long run,
give the compiler more opportunities to optimize these
callsites. For now, this conversion should be a complete
noop with respect to the generated code.
The result is also perhaps a little more readable, as it
avoids the "zero is equal" idiom. Since it's so prevalent in
C, I think seasoned programmers tend not to even notice it
anymore, but it can sometimes make for awkward double
negations (e.g., we can drop a few !!oidcmp() instances
here).
This patch was generated almost entirely by the included
coccinelle patch. This mechanical conversion should be
completely safe, because we check explicitly for cases where
oidcmp() is compared to 0, which is what oideq() is doing
under the hood. Note that we don't have to catch "!oidcmp()"
separately; coccinelle's standard isomorphisms make sure the
two are treated equivalently.
I say "almost" because I did hand-edit the coccinelle output
to fix up a few style violations (it mostly keeps the
original formatting, but sometimes unwraps long lines).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-29 05:22:40 +08:00
|
|
|
(i && oideq(&list[-2]->oid, &obj->oid)))
|
2011-11-17 14:04:13 +08:00
|
|
|
die("The same object %s appears twice in the pack",
|
2017-05-07 06:10:11 +08:00
|
|
|
oid_to_hex(&obj->oid));
|
2007-06-02 03:18:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (index_version >= 2) {
|
|
|
|
unsigned int nr_large_offset = 0;
|
|
|
|
|
|
|
|
/* write the crc32 table */
|
|
|
|
list = sorted_by_sha;
|
|
|
|
for (i = 0; i < nr_objects; i++) {
|
|
|
|
struct pack_idx_entry *obj = *list++;
|
2020-09-20 02:26:36 +08:00
|
|
|
hashwrite_be32(f, obj->crc32);
|
2007-06-02 03:18:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* write the 32-bit offset table */
|
|
|
|
list = sorted_by_sha;
|
|
|
|
for (i = 0; i < nr_objects; i++) {
|
|
|
|
struct pack_idx_entry *obj = *list++;
|
2011-02-26 08:54:00 +08:00
|
|
|
uint32_t offset;
|
|
|
|
|
|
|
|
offset = (need_large_offset(obj->offset, opts)
|
|
|
|
? (0x80000000 | nr_large_offset++)
|
|
|
|
: obj->offset);
|
2020-09-20 02:26:36 +08:00
|
|
|
hashwrite_be32(f, offset);
|
2007-06-02 03:18:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* write the large offset table */
|
|
|
|
list = sorted_by_sha;
|
|
|
|
while (nr_large_offset) {
|
|
|
|
struct pack_idx_entry *obj = *list++;
|
|
|
|
uint64_t offset = obj->offset;
|
2011-02-26 08:54:00 +08:00
|
|
|
|
|
|
|
if (!need_large_offset(offset, opts))
|
|
|
|
continue;
|
2020-11-12 20:23:10 +08:00
|
|
|
hashwrite_be64(f, offset);
|
2011-02-26 08:54:00 +08:00
|
|
|
nr_large_offset--;
|
2007-06-02 03:18:05 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-01 10:18:46 +08:00
|
|
|
hashwrite(f, sha1, the_hash_algo->rawsz);
|
2022-03-11 06:43:21 +08:00
|
|
|
finalize_hashfile(f, NULL, FSYNC_COMPONENT_PACK_METADATA,
|
|
|
|
CSUM_HASH_IN_STREAM | CSUM_CLOSE |
|
|
|
|
((opts->flags & WRITE_IDX_VERIFY) ? 0 : CSUM_FSYNC));
|
2007-06-02 03:18:05 +08:00
|
|
|
return index_name;
|
|
|
|
}
|
2007-05-03 00:13:14 +08:00
|
|
|
|
2021-01-26 07:37:18 +08:00
|
|
|
static int pack_order_cmp(const void *va, const void *vb, void *ctx)
|
|
|
|
{
|
|
|
|
struct pack_idx_entry **objects = ctx;
|
|
|
|
|
|
|
|
off_t oa = objects[*(uint32_t*)va]->offset;
|
|
|
|
off_t ob = objects[*(uint32_t*)vb]->offset;
|
|
|
|
|
|
|
|
if (oa < ob)
|
|
|
|
return -1;
|
|
|
|
if (oa > ob)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void write_rev_header(struct hashfile *f)
|
|
|
|
{
|
|
|
|
hashwrite_be32(f, RIDX_SIGNATURE);
|
|
|
|
hashwrite_be32(f, RIDX_VERSION);
|
2022-05-21 07:17:41 +08:00
|
|
|
hashwrite_be32(f, oid_version(the_hash_algo));
|
2021-01-26 07:37:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void write_rev_index_positions(struct hashfile *f,
|
2021-03-30 23:04:29 +08:00
|
|
|
uint32_t *pack_order,
|
2021-01-26 07:37:18 +08:00
|
|
|
uint32_t nr_objects)
|
|
|
|
{
|
|
|
|
uint32_t i;
|
|
|
|
for (i = 0; i < nr_objects; i++)
|
|
|
|
hashwrite_be32(f, pack_order[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void write_rev_trailer(struct hashfile *f, const unsigned char *hash)
|
|
|
|
{
|
|
|
|
hashwrite(f, hash, the_hash_algo->rawsz);
|
|
|
|
}
|
|
|
|
|
2024-09-30 17:14:08 +08:00
|
|
|
char *write_rev_file(const char *rev_name,
|
|
|
|
struct pack_idx_entry **objects,
|
|
|
|
uint32_t nr_objects,
|
|
|
|
const unsigned char *hash,
|
|
|
|
unsigned flags)
|
2021-03-30 23:04:29 +08:00
|
|
|
{
|
|
|
|
uint32_t *pack_order;
|
|
|
|
uint32_t i;
|
2024-09-30 17:14:08 +08:00
|
|
|
char *ret;
|
2021-03-30 23:04:29 +08:00
|
|
|
|
2021-09-08 09:08:03 +08:00
|
|
|
if (!(flags & WRITE_REV) && !(flags & WRITE_REV_VERIFY))
|
|
|
|
return NULL;
|
|
|
|
|
2021-03-30 23:04:29 +08:00
|
|
|
ALLOC_ARRAY(pack_order, nr_objects);
|
|
|
|
for (i = 0; i < nr_objects; i++)
|
|
|
|
pack_order[i] = i;
|
|
|
|
QSORT_S(pack_order, nr_objects, pack_order_cmp, objects);
|
|
|
|
|
|
|
|
ret = write_rev_file_order(rev_name, pack_order, nr_objects, hash,
|
|
|
|
flags);
|
|
|
|
|
|
|
|
free(pack_order);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2024-09-30 17:14:08 +08:00
|
|
|
char *write_rev_file_order(const char *rev_name,
|
|
|
|
uint32_t *pack_order,
|
|
|
|
uint32_t nr_objects,
|
|
|
|
const unsigned char *hash,
|
|
|
|
unsigned flags)
|
2021-01-26 07:37:18 +08:00
|
|
|
{
|
|
|
|
struct hashfile *f;
|
2024-09-30 17:14:08 +08:00
|
|
|
char *path;
|
2021-01-26 07:37:18 +08:00
|
|
|
int fd;
|
|
|
|
|
|
|
|
if ((flags & WRITE_REV) && (flags & WRITE_REV_VERIFY))
|
|
|
|
die(_("cannot both write and verify reverse index"));
|
|
|
|
|
|
|
|
if (flags & WRITE_REV) {
|
|
|
|
if (!rev_name) {
|
|
|
|
struct strbuf tmp_file = STRBUF_INIT;
|
|
|
|
fd = odb_mkstemp(&tmp_file, "pack/tmp_rev_XXXXXX");
|
2024-09-30 17:14:08 +08:00
|
|
|
path = strbuf_detach(&tmp_file, NULL);
|
2021-01-26 07:37:18 +08:00
|
|
|
} else {
|
|
|
|
unlink(rev_name);
|
2021-08-26 04:16:46 +08:00
|
|
|
fd = xopen(rev_name, O_CREAT|O_EXCL|O_WRONLY, 0600);
|
2024-09-30 17:14:08 +08:00
|
|
|
path = xstrdup(rev_name);
|
2021-01-26 07:37:18 +08:00
|
|
|
}
|
2024-09-30 17:14:08 +08:00
|
|
|
f = hashfd(fd, path);
|
2021-01-26 07:37:18 +08:00
|
|
|
} else if (flags & WRITE_REV_VERIFY) {
|
|
|
|
struct stat statbuf;
|
|
|
|
if (stat(rev_name, &statbuf)) {
|
|
|
|
if (errno == ENOENT) {
|
|
|
|
/* .rev files are optional */
|
|
|
|
return NULL;
|
|
|
|
} else
|
|
|
|
die_errno(_("could not stat: %s"), rev_name);
|
|
|
|
}
|
|
|
|
f = hashfd_check(rev_name);
|
2024-09-30 17:14:08 +08:00
|
|
|
path = xstrdup(rev_name);
|
|
|
|
} else {
|
2021-01-26 07:37:18 +08:00
|
|
|
return NULL;
|
2024-09-30 17:14:08 +08:00
|
|
|
}
|
2021-01-26 07:37:18 +08:00
|
|
|
|
|
|
|
write_rev_header(f);
|
|
|
|
|
2021-03-30 23:04:29 +08:00
|
|
|
write_rev_index_positions(f, pack_order, nr_objects);
|
2021-01-26 07:37:18 +08:00
|
|
|
write_rev_trailer(f, hash);
|
|
|
|
|
2024-09-30 17:14:08 +08:00
|
|
|
if (adjust_shared_perm(path) < 0)
|
|
|
|
die(_("failed to make %s readable"), path);
|
2021-01-26 07:37:18 +08:00
|
|
|
|
2022-03-11 06:43:21 +08:00
|
|
|
finalize_hashfile(f, NULL, FSYNC_COMPONENT_PACK_METADATA,
|
|
|
|
CSUM_HASH_IN_STREAM | CSUM_CLOSE |
|
|
|
|
((flags & WRITE_IDX_VERIFY) ? 0 : CSUM_FSYNC));
|
2021-01-26 07:37:18 +08:00
|
|
|
|
2024-09-30 17:14:08 +08:00
|
|
|
return path;
|
2021-01-26 07:37:18 +08:00
|
|
|
}
|
|
|
|
|
2022-05-21 07:17:43 +08:00
|
|
|
static void write_mtimes_header(struct hashfile *f)
|
|
|
|
{
|
|
|
|
hashwrite_be32(f, MTIMES_SIGNATURE);
|
|
|
|
hashwrite_be32(f, MTIMES_VERSION);
|
|
|
|
hashwrite_be32(f, oid_version(the_hash_algo));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Writes the object mtimes of "objects" for use in a .mtimes file.
|
|
|
|
* Note that objects must be in lexicographic (index) order, which is
|
|
|
|
* the expected ordering of these values in the .mtimes file.
|
|
|
|
*/
|
|
|
|
static void write_mtimes_objects(struct hashfile *f,
|
|
|
|
struct packing_data *to_pack,
|
|
|
|
struct pack_idx_entry **objects,
|
|
|
|
uint32_t nr_objects)
|
|
|
|
{
|
|
|
|
uint32_t i;
|
|
|
|
for (i = 0; i < nr_objects; i++) {
|
|
|
|
struct object_entry *e = (struct object_entry*)objects[i];
|
|
|
|
hashwrite_be32(f, oe_cruft_mtime(to_pack, e));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void write_mtimes_trailer(struct hashfile *f, const unsigned char *hash)
|
|
|
|
{
|
|
|
|
hashwrite(f, hash, the_hash_algo->rawsz);
|
|
|
|
}
|
|
|
|
|
2023-04-19 04:40:32 +08:00
|
|
|
static char *write_mtimes_file(struct packing_data *to_pack,
|
|
|
|
struct pack_idx_entry **objects,
|
|
|
|
uint32_t nr_objects,
|
|
|
|
const unsigned char *hash)
|
2022-05-21 07:17:43 +08:00
|
|
|
{
|
2022-06-16 21:13:49 +08:00
|
|
|
struct strbuf tmp_file = STRBUF_INIT;
|
2023-04-19 04:40:32 +08:00
|
|
|
char *mtimes_name;
|
2022-05-21 07:17:43 +08:00
|
|
|
struct hashfile *f;
|
|
|
|
int fd;
|
|
|
|
|
|
|
|
if (!to_pack)
|
|
|
|
BUG("cannot call write_mtimes_file with NULL packing_data");
|
|
|
|
|
2022-06-16 21:13:49 +08:00
|
|
|
fd = odb_mkstemp(&tmp_file, "pack/tmp_mtimes_XXXXXX");
|
|
|
|
mtimes_name = strbuf_detach(&tmp_file, NULL);
|
2022-05-21 07:17:43 +08:00
|
|
|
f = hashfd(fd, mtimes_name);
|
|
|
|
|
|
|
|
write_mtimes_header(f);
|
|
|
|
write_mtimes_objects(f, to_pack, objects, nr_objects);
|
|
|
|
write_mtimes_trailer(f, hash);
|
|
|
|
|
|
|
|
if (adjust_shared_perm(mtimes_name) < 0)
|
|
|
|
die(_("failed to make %s readable"), mtimes_name);
|
|
|
|
|
|
|
|
finalize_hashfile(f, NULL, FSYNC_COMPONENT_PACK_METADATA,
|
|
|
|
CSUM_HASH_IN_STREAM | CSUM_CLOSE | CSUM_FSYNC);
|
|
|
|
|
|
|
|
return mtimes_name;
|
|
|
|
}
|
|
|
|
|
2018-02-01 10:18:46 +08:00
|
|
|
off_t write_pack_header(struct hashfile *f, uint32_t nr_entries)
|
2011-10-29 02:40:48 +08:00
|
|
|
{
|
|
|
|
struct pack_header hdr;
|
|
|
|
|
|
|
|
hdr.hdr_signature = htonl(PACK_SIGNATURE);
|
|
|
|
hdr.hdr_version = htonl(PACK_VERSION);
|
|
|
|
hdr.hdr_entries = htonl(nr_entries);
|
2018-02-01 10:18:46 +08:00
|
|
|
hashwrite(f, &hdr, sizeof(hdr));
|
2011-10-29 02:40:48 +08:00
|
|
|
return sizeof(hdr);
|
|
|
|
}
|
|
|
|
|
2008-08-30 04:07:59 +08:00
|
|
|
/*
|
|
|
|
* Update pack header with object_count and compute new SHA1 for pack data
|
|
|
|
* associated to pack_fd, and write that SHA1 at the end. That new SHA1
|
|
|
|
* is also returned in new_pack_sha1.
|
|
|
|
*
|
|
|
|
* If partial_pack_sha1 is non null, then the SHA1 of the existing pack
|
|
|
|
* (without the header update) is computed and validated against the
|
|
|
|
* one provided in partial_pack_sha1. The validation is performed at
|
|
|
|
* partial_pack_offset bytes in the pack file. The SHA1 of the remaining
|
|
|
|
* data (i.e. from partial_pack_offset to the end) is then computed and
|
|
|
|
* returned in partial_pack_sha1.
|
|
|
|
*
|
|
|
|
* Note that new_pack_sha1 is updated last, so both new_pack_sha1 and
|
|
|
|
* partial_pack_sha1 can refer to the same buffer if the caller is not
|
|
|
|
* interested in the resulting SHA1 of pack data above partial_pack_offset.
|
|
|
|
*/
|
2007-05-03 00:13:14 +08:00
|
|
|
void fixup_pack_header_footer(int pack_fd,
|
2018-02-01 10:18:44 +08:00
|
|
|
unsigned char *new_pack_hash,
|
2007-05-03 00:13:14 +08:00
|
|
|
const char *pack_name,
|
2008-08-30 04:07:59 +08:00
|
|
|
uint32_t object_count,
|
2018-02-01 10:18:44 +08:00
|
|
|
unsigned char *partial_pack_hash,
|
2008-08-30 04:07:59 +08:00
|
|
|
off_t partial_pack_offset)
|
2007-05-03 00:13:14 +08:00
|
|
|
{
|
2008-08-30 04:08:02 +08:00
|
|
|
int aligned_sz, buf_sz = 8 * 1024;
|
2018-02-01 10:18:44 +08:00
|
|
|
git_hash_ctx old_hash_ctx, new_hash_ctx;
|
2007-05-03 00:13:14 +08:00
|
|
|
struct pack_header hdr;
|
|
|
|
char *buf;
|
2017-09-27 14:01:07 +08:00
|
|
|
ssize_t read_result;
|
2007-05-03 00:13:14 +08:00
|
|
|
|
2018-02-01 10:18:44 +08:00
|
|
|
the_hash_algo->init_fn(&old_hash_ctx);
|
|
|
|
the_hash_algo->init_fn(&new_hash_ctx);
|
2008-08-30 04:07:59 +08:00
|
|
|
|
2007-05-03 00:13:14 +08:00
|
|
|
if (lseek(pack_fd, 0, SEEK_SET) != 0)
|
2009-06-27 23:58:46 +08:00
|
|
|
die_errno("Failed seeking to start of '%s'", pack_name);
|
2017-09-27 14:01:07 +08:00
|
|
|
read_result = read_in_full(pack_fd, &hdr, sizeof(hdr));
|
|
|
|
if (read_result < 0)
|
2009-06-27 23:58:46 +08:00
|
|
|
die_errno("Unable to reread header of '%s'", pack_name);
|
2017-09-27 14:01:07 +08:00
|
|
|
else if (read_result != sizeof(hdr))
|
|
|
|
die_errno("Unexpected short read for header of '%s'",
|
|
|
|
pack_name);
|
2007-05-03 00:13:14 +08:00
|
|
|
if (lseek(pack_fd, 0, SEEK_SET) != 0)
|
2009-06-27 23:58:46 +08:00
|
|
|
die_errno("Failed seeking to start of '%s'", pack_name);
|
2018-02-01 10:18:44 +08:00
|
|
|
the_hash_algo->update_fn(&old_hash_ctx, &hdr, sizeof(hdr));
|
2007-05-03 00:13:14 +08:00
|
|
|
hdr.hdr_entries = htonl(object_count);
|
2018-02-01 10:18:44 +08:00
|
|
|
the_hash_algo->update_fn(&new_hash_ctx, &hdr, sizeof(hdr));
|
2007-05-03 00:13:14 +08:00
|
|
|
write_or_die(pack_fd, &hdr, sizeof(hdr));
|
2008-08-30 04:07:59 +08:00
|
|
|
partial_pack_offset -= sizeof(hdr);
|
2007-05-03 00:13:14 +08:00
|
|
|
|
|
|
|
buf = xmalloc(buf_sz);
|
2008-08-30 04:08:02 +08:00
|
|
|
aligned_sz = buf_sz - sizeof(hdr);
|
2007-05-03 00:13:14 +08:00
|
|
|
for (;;) {
|
2008-08-30 04:07:59 +08:00
|
|
|
ssize_t m, n;
|
2018-02-01 10:18:44 +08:00
|
|
|
m = (partial_pack_hash && partial_pack_offset < aligned_sz) ?
|
2008-08-30 04:08:02 +08:00
|
|
|
partial_pack_offset : aligned_sz;
|
2008-08-30 04:07:59 +08:00
|
|
|
n = xread(pack_fd, buf, m);
|
2007-05-03 00:13:14 +08:00
|
|
|
if (!n)
|
|
|
|
break;
|
|
|
|
if (n < 0)
|
2009-06-27 23:58:46 +08:00
|
|
|
die_errno("Failed to checksum '%s'", pack_name);
|
2018-02-01 10:18:44 +08:00
|
|
|
the_hash_algo->update_fn(&new_hash_ctx, buf, n);
|
2008-08-30 04:07:59 +08:00
|
|
|
|
2008-08-30 04:08:02 +08:00
|
|
|
aligned_sz -= n;
|
|
|
|
if (!aligned_sz)
|
|
|
|
aligned_sz = buf_sz;
|
|
|
|
|
2018-02-01 10:18:44 +08:00
|
|
|
if (!partial_pack_hash)
|
2008-08-30 04:07:59 +08:00
|
|
|
continue;
|
|
|
|
|
2018-02-01 10:18:44 +08:00
|
|
|
the_hash_algo->update_fn(&old_hash_ctx, buf, n);
|
2008-08-30 04:07:59 +08:00
|
|
|
partial_pack_offset -= n;
|
|
|
|
if (partial_pack_offset == 0) {
|
2018-02-01 10:18:44 +08:00
|
|
|
unsigned char hash[GIT_MAX_RAWSZ];
|
|
|
|
the_hash_algo->final_fn(hash, &old_hash_ctx);
|
2024-06-14 14:49:50 +08:00
|
|
|
if (!hasheq(hash, partial_pack_hash,
|
|
|
|
the_repository->hash_algo))
|
2008-08-30 04:07:59 +08:00
|
|
|
die("Unexpected checksum for %s "
|
|
|
|
"(disk corruption?)", pack_name);
|
|
|
|
/*
|
|
|
|
* Now let's compute the SHA1 of the remainder of the
|
|
|
|
* pack, which also means making partial_pack_offset
|
|
|
|
* big enough not to matter anymore.
|
|
|
|
*/
|
2018-02-01 10:18:44 +08:00
|
|
|
the_hash_algo->init_fn(&old_hash_ctx);
|
2008-08-30 04:07:59 +08:00
|
|
|
partial_pack_offset = ~partial_pack_offset;
|
|
|
|
partial_pack_offset -= MSB(partial_pack_offset, 1);
|
|
|
|
}
|
2007-05-03 00:13:14 +08:00
|
|
|
}
|
|
|
|
free(buf);
|
|
|
|
|
2018-02-01 10:18:44 +08:00
|
|
|
if (partial_pack_hash)
|
|
|
|
the_hash_algo->final_fn(partial_pack_hash, &old_hash_ctx);
|
|
|
|
the_hash_algo->final_fn(new_pack_hash, &new_hash_ctx);
|
|
|
|
write_or_die(pack_fd, new_pack_hash, the_hash_algo->rawsz);
|
2022-03-11 06:43:21 +08:00
|
|
|
fsync_component_or_die(FSYNC_COMPONENT_PACK, pack_fd, pack_name);
|
2007-05-03 00:13:14 +08:00
|
|
|
}
|
2007-09-14 15:31:16 +08:00
|
|
|
|
2021-02-23 03:20:09 +08:00
|
|
|
char *index_pack_lockfile(int ip_out, int *is_well_formed)
|
2007-09-14 15:31:16 +08:00
|
|
|
{
|
2018-02-01 10:18:44 +08:00
|
|
|
char packname[GIT_MAX_HEXSZ + 6];
|
|
|
|
const int len = the_hash_algo->hexsz + 6;
|
2007-09-14 15:31:16 +08:00
|
|
|
|
|
|
|
/*
|
2009-02-25 15:11:29 +08:00
|
|
|
* The first thing we expect from index-pack's output
|
2007-09-14 15:31:16 +08:00
|
|
|
* is "pack\t%40s\n" or "keep\t%40s\n" (46 bytes) where
|
|
|
|
* %40s is the newly created pack SHA1 name. In the "keep"
|
|
|
|
* case, we need it to remove the corresponding .keep file
|
|
|
|
* later on. If we don't get that then tough luck with it.
|
|
|
|
*/
|
2018-02-01 10:18:44 +08:00
|
|
|
if (read_in_full(ip_out, packname, len) == len && packname[len-1] == '\n') {
|
2014-08-30 17:47:19 +08:00
|
|
|
const char *name;
|
2021-02-23 03:20:09 +08:00
|
|
|
|
|
|
|
if (is_well_formed)
|
|
|
|
*is_well_formed = 1;
|
2018-02-01 10:18:44 +08:00
|
|
|
packname[len-1] = 0;
|
2014-08-30 17:47:19 +08:00
|
|
|
if (skip_prefix(packname, "keep\t", &name))
|
|
|
|
return xstrfmt("%s/pack/pack-%s.keep",
|
2024-09-12 19:29:30 +08:00
|
|
|
repo_get_object_directory(the_repository), name);
|
2021-02-23 03:20:09 +08:00
|
|
|
return NULL;
|
2007-09-14 15:31:16 +08:00
|
|
|
}
|
2021-02-23 03:20:09 +08:00
|
|
|
if (is_well_formed)
|
|
|
|
*is_well_formed = 0;
|
2007-09-14 15:31:16 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
2010-02-24 04:02:37 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The per-object header is a pretty dense thing, which is
|
|
|
|
* - first byte: low four bits are "size", then three bits of "type",
|
|
|
|
* and the high bit is "size continues".
|
|
|
|
* - each byte afterwards: low seven bits are size continuation,
|
|
|
|
* with the high bit being "size continues"
|
|
|
|
*/
|
encode_in_pack_object_header: respect output buffer length
The encode_in_pack_object_header() writes a variable-length
header to an output buffer, but it doesn't actually know
long the buffer is. At first glance, this looks like it
might be possible to overflow.
In practice, this is probably impossible. The smallest
buffer we use is 10 bytes, which would hold the header for
an object up to 2^67 bytes. Obviously we're not likely to
see such an object, but we might worry that an object could
lie about its size (causing us to overflow before we realize
it does not actually have that many bytes). But the argument
is passed as a uintmax_t. Even on systems that have __int128
available, uintmax_t is typically restricted to 64-bit by
the ABI.
So it's unlikely that a system exists where this could be
exploited. Still, it's easy enough to use a normal out/len
pair and make sure we don't write too far. That protects the
hypothetical 128-bit system, makes it harder for callers to
accidentally specify a too-small buffer, and makes the
resulting code easier to audit.
Note that the one caller in fast-import tried to catch such
a case, but did so _after_ the call (at which point we'd
have already overflowed!). This check can now go away.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-03-25 01:26:40 +08:00
|
|
|
int encode_in_pack_object_header(unsigned char *hdr, int hdr_len,
|
|
|
|
enum object_type type, uintmax_t size)
|
2010-02-24 04:02:37 +08:00
|
|
|
{
|
|
|
|
int n = 1;
|
|
|
|
unsigned char c;
|
|
|
|
|
|
|
|
if (type < OBJ_COMMIT || type > OBJ_REF_DELTA)
|
|
|
|
die("bad type %d", type);
|
|
|
|
|
|
|
|
c = (type << 4) | (size & 15);
|
|
|
|
size >>= 4;
|
|
|
|
while (size) {
|
encode_in_pack_object_header: respect output buffer length
The encode_in_pack_object_header() writes a variable-length
header to an output buffer, but it doesn't actually know
long the buffer is. At first glance, this looks like it
might be possible to overflow.
In practice, this is probably impossible. The smallest
buffer we use is 10 bytes, which would hold the header for
an object up to 2^67 bytes. Obviously we're not likely to
see such an object, but we might worry that an object could
lie about its size (causing us to overflow before we realize
it does not actually have that many bytes). But the argument
is passed as a uintmax_t. Even on systems that have __int128
available, uintmax_t is typically restricted to 64-bit by
the ABI.
So it's unlikely that a system exists where this could be
exploited. Still, it's easy enough to use a normal out/len
pair and make sure we don't write too far. That protects the
hypothetical 128-bit system, makes it harder for callers to
accidentally specify a too-small buffer, and makes the
resulting code easier to audit.
Note that the one caller in fast-import tried to catch such
a case, but did so _after_ the call (at which point we'd
have already overflowed!). This check can now go away.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-03-25 01:26:40 +08:00
|
|
|
if (n == hdr_len)
|
|
|
|
die("object size is too enormous to format");
|
2010-02-24 04:02:37 +08:00
|
|
|
*hdr++ = c | 0x80;
|
|
|
|
c = size & 0x7f;
|
|
|
|
size >>= 7;
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
*hdr = c;
|
|
|
|
return n;
|
|
|
|
}
|
2011-10-29 02:52:14 +08:00
|
|
|
|
2018-02-01 10:18:46 +08:00
|
|
|
struct hashfile *create_tmp_packfile(char **pack_tmp_name)
|
2011-10-29 02:52:14 +08:00
|
|
|
{
|
2017-03-29 03:45:43 +08:00
|
|
|
struct strbuf tmpname = STRBUF_INIT;
|
2011-10-29 02:52:14 +08:00
|
|
|
int fd;
|
|
|
|
|
2017-03-29 03:45:43 +08:00
|
|
|
fd = odb_mkstemp(&tmpname, "pack/tmp_pack_XXXXXX");
|
|
|
|
*pack_tmp_name = strbuf_detach(&tmpname, NULL);
|
2018-02-01 10:18:46 +08:00
|
|
|
return hashfd(fd, *pack_tmp_name);
|
2011-10-29 02:52:14 +08:00
|
|
|
}
|
2011-10-29 03:34:09 +08:00
|
|
|
|
2021-09-10 07:24:37 +08:00
|
|
|
static void rename_tmp_packfile(struct strbuf *name_prefix, const char *source,
|
|
|
|
const char *ext)
|
|
|
|
{
|
|
|
|
size_t name_prefix_len = name_prefix->len;
|
|
|
|
|
|
|
|
strbuf_addstr(name_prefix, ext);
|
pack-objects: use finalize_object_file() to rename pack/idx/etc
In most places that write files to the object database (even packfiles
via index-pack or fast-import), we use finalize_object_file(). This
prefers link()/unlink() over rename(), because it means we will prefer
data that is already in the repository to data that we are newly
writing.
We should do the same thing in pack-objects. Even though we don't think
of it as accepting outside data (and thus not being susceptible to
collision attacks), in theory a determined attacker could present just
the right set of objects to cause an incremental repack to generate
a pack with their desired hash.
This has some test and real-world fallout, as seen in the adjustment to
t5303 below. That test script assumes that we can "fix" corruption by
repacking into a good state, including when the pack generated by that
repack operation collides with a (corrupted) pack with the same hash.
This violates our assumption from the previous adjustments to
finalize_object_file() that if we're moving a new file over an existing
one, that since their checksums match, so too must their contents.
This makes "fixing" corruption like this a more explicit operation,
since the test (and users, who may fix real-life corruption using a
similar technique) must first move the broken contents out of the way.
Note also that we now call adjust_shared_perm() twice. We already call
adjust_shared_perm() in stage_tmp_packfiles(), and now call it again in
finalize_object_file(). This is somewhat wasteful, but cleaning up the
existing calls to adjust_shared_perm() is tricky (because sometimes
we're writing to a tmpfile, and sometimes we're writing directly into
the final destination), so let's tolerate some minor waste until we can
more carefully clean up the now-redundant calls.
Co-authored-by: Jeff King <peff@peff.net>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Taylor Blau <me@ttaylorr.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2024-09-26 23:22:41 +08:00
|
|
|
if (finalize_object_file(source, name_prefix->buf))
|
|
|
|
die("unable to rename temporary file to '%s'",
|
|
|
|
name_prefix->buf);
|
2021-09-10 07:24:37 +08:00
|
|
|
strbuf_setlen(name_prefix, name_prefix_len);
|
|
|
|
}
|
|
|
|
|
2021-09-10 07:24:56 +08:00
|
|
|
void rename_tmp_packfile_idx(struct strbuf *name_buffer,
|
|
|
|
char **idx_tmp_name)
|
|
|
|
{
|
|
|
|
rename_tmp_packfile(name_buffer, *idx_tmp_name, "idx");
|
|
|
|
}
|
|
|
|
|
|
|
|
void stage_tmp_packfiles(struct strbuf *name_buffer,
|
2011-10-29 03:34:09 +08:00
|
|
|
const char *pack_tmp_name,
|
|
|
|
struct pack_idx_entry **written_list,
|
|
|
|
uint32_t nr_written,
|
2022-05-21 07:17:38 +08:00
|
|
|
struct packing_data *to_pack,
|
2011-10-29 03:34:09 +08:00
|
|
|
struct pack_idx_option *pack_idx_opts,
|
2021-09-10 07:24:56 +08:00
|
|
|
unsigned char hash[],
|
|
|
|
char **idx_tmp_name)
|
2011-10-29 03:34:09 +08:00
|
|
|
{
|
2024-09-30 17:14:08 +08:00
|
|
|
char *rev_tmp_name = NULL;
|
2023-04-19 04:40:32 +08:00
|
|
|
char *mtimes_tmp_name = NULL;
|
2011-10-29 03:34:09 +08:00
|
|
|
|
|
|
|
if (adjust_shared_perm(pack_tmp_name))
|
|
|
|
die_errno("unable to make temporary pack file readable");
|
|
|
|
|
2021-09-10 07:24:56 +08:00
|
|
|
*idx_tmp_name = (char *)write_idx_file(NULL, written_list, nr_written,
|
|
|
|
pack_idx_opts, hash);
|
|
|
|
if (adjust_shared_perm(*idx_tmp_name))
|
2011-10-29 03:34:09 +08:00
|
|
|
die_errno("unable to make temporary index file readable");
|
|
|
|
|
2021-01-26 07:37:18 +08:00
|
|
|
rev_tmp_name = write_rev_file(NULL, written_list, nr_written, hash,
|
|
|
|
pack_idx_opts->flags);
|
|
|
|
|
2022-05-21 07:17:43 +08:00
|
|
|
if (pack_idx_opts->flags & WRITE_MTIMES) {
|
2022-06-16 21:13:49 +08:00
|
|
|
mtimes_tmp_name = write_mtimes_file(to_pack, written_list,
|
2022-05-21 07:17:43 +08:00
|
|
|
nr_written,
|
|
|
|
hash);
|
|
|
|
}
|
|
|
|
|
2021-09-10 07:24:37 +08:00
|
|
|
rename_tmp_packfile(name_buffer, pack_tmp_name, "pack");
|
|
|
|
if (rev_tmp_name)
|
|
|
|
rename_tmp_packfile(name_buffer, rev_tmp_name, "rev");
|
2022-05-21 07:17:43 +08:00
|
|
|
if (mtimes_tmp_name)
|
|
|
|
rename_tmp_packfile(name_buffer, mtimes_tmp_name, "mtimes");
|
2023-04-13 06:20:17 +08:00
|
|
|
|
2024-09-30 17:14:08 +08:00
|
|
|
free(rev_tmp_name);
|
2023-04-19 04:40:32 +08:00
|
|
|
free(mtimes_tmp_name);
|
2011-10-29 03:34:09 +08:00
|
|
|
}
|
2021-01-12 16:21:59 +08:00
|
|
|
|
|
|
|
void write_promisor_file(const char *promisor_name, struct ref **sought, int nr_sought)
|
|
|
|
{
|
2021-01-14 23:50:16 +08:00
|
|
|
int i, err;
|
2021-01-12 16:21:59 +08:00
|
|
|
FILE *output = xfopen(promisor_name, "w");
|
|
|
|
|
|
|
|
for (i = 0; i < nr_sought; i++)
|
|
|
|
fprintf(output, "%s %s\n", oid_to_hex(&sought[i]->old_oid),
|
|
|
|
sought[i]->name);
|
2021-01-14 23:50:16 +08:00
|
|
|
|
|
|
|
err = ferror(output);
|
|
|
|
err |= fclose(output);
|
|
|
|
if (err)
|
|
|
|
die(_("could not write '%s' promisor file"), promisor_name);
|
2021-01-12 16:21:59 +08:00
|
|
|
}
|