mirror of
https://github.com/git/git.git
synced 2024-11-23 18:05:29 +08:00
Merge branch 'jk/loose-object-cache-oid'
Code clean-up. * jk/loose-object-cache-oid: prefer "hash mismatch" to "sha1 mismatch" sha1-file: avoid "sha1 file" for generic use in messages sha1-file: prefer "loose object file" to "sha1 file" in messages sha1-file: drop has_sha1_file() convert has_sha1_file() callers to has_object_file() sha1-file: convert pass-through functions to object_id sha1-file: modernize loose header/stream functions sha1-file: modernize loose object file functions http: use struct object_id instead of bare sha1 update comment references to sha1_object_info() sha1-file: fix outdated sha1 comment references
This commit is contained in:
commit
cba595ab1a
@ -140,9 +140,9 @@ dangling <type> <object>::
|
||||
The <type> object <object>, is present in the database but never
|
||||
'directly' used. A dangling commit could be a root node.
|
||||
|
||||
sha1 mismatch <object>::
|
||||
The database has an object who's sha1 doesn't match the
|
||||
database value.
|
||||
hash mismatch <object>::
|
||||
The database has an object whose hash doesn't match the
|
||||
object database value.
|
||||
This indicates a serious data integrity problem.
|
||||
|
||||
Environment Variables
|
||||
|
2
apply.c
2
apply.c
@ -3182,7 +3182,7 @@ static int apply_binary(struct apply_state *state,
|
||||
return 0; /* deletion patch */
|
||||
}
|
||||
|
||||
if (has_sha1_file(oid.hash)) {
|
||||
if (has_object_file(&oid)) {
|
||||
/* We already have the postimage */
|
||||
enum object_type type;
|
||||
unsigned long size;
|
||||
|
@ -211,14 +211,14 @@ struct expand_data {
|
||||
|
||||
/*
|
||||
* After a mark_query run, this object_info is set up to be
|
||||
* passed to sha1_object_info_extended. It will point to the data
|
||||
* passed to oid_object_info_extended. It will point to the data
|
||||
* elements above, so you can retrieve the response from there.
|
||||
*/
|
||||
struct object_info info;
|
||||
|
||||
/*
|
||||
* This flag will be true if the requested batch format and options
|
||||
* don't require us to call sha1_object_info, which can then be
|
||||
* don't require us to call oid_object_info, which can then be
|
||||
* optimized out.
|
||||
*/
|
||||
unsigned skip_object_info : 1;
|
||||
@ -496,7 +496,7 @@ static int batch_objects(struct batch_options *opt)
|
||||
|
||||
/*
|
||||
* Expand once with our special mark_query flag, which will prime the
|
||||
* object_info to be handed to sha1_object_info_extended for each
|
||||
* object_info to be handed to oid_object_info_extended for each
|
||||
* object.
|
||||
*/
|
||||
memset(&data, 0, sizeof(data));
|
||||
|
@ -317,8 +317,7 @@ static void find_non_local_tags(const struct ref *refs,
|
||||
!has_object_file_with_flags(&ref->old_oid,
|
||||
OBJECT_INFO_QUICK) &&
|
||||
!will_fetch(head, ref->old_oid.hash) &&
|
||||
!has_sha1_file_with_flags(item->oid.hash,
|
||||
OBJECT_INFO_QUICK) &&
|
||||
!has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) &&
|
||||
!will_fetch(head, item->oid.hash))
|
||||
oidclr(&item->oid);
|
||||
item = NULL;
|
||||
@ -332,7 +331,7 @@ static void find_non_local_tags(const struct ref *refs,
|
||||
* fetch.
|
||||
*/
|
||||
if (item &&
|
||||
!has_sha1_file_with_flags(item->oid.hash, OBJECT_INFO_QUICK) &&
|
||||
!has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) &&
|
||||
!will_fetch(head, item->oid.hash))
|
||||
oidclr(&item->oid);
|
||||
|
||||
@ -353,7 +352,7 @@ static void find_non_local_tags(const struct ref *refs,
|
||||
* checked to see if it needs fetching.
|
||||
*/
|
||||
if (item &&
|
||||
!has_sha1_file_with_flags(item->oid.hash, OBJECT_INFO_QUICK) &&
|
||||
!has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) &&
|
||||
!will_fetch(head, item->oid.hash))
|
||||
oidclr(&item->oid);
|
||||
|
||||
|
@ -772,7 +772,7 @@ static void sha1_object(const void *data, struct object_entry *obj_entry,
|
||||
if (startup_info->have_repository) {
|
||||
read_lock();
|
||||
collision_test_needed =
|
||||
has_sha1_file_with_flags(oid->hash, OBJECT_INFO_QUICK);
|
||||
has_object_file_with_flags(oid, OBJECT_INFO_QUICK);
|
||||
read_unlock();
|
||||
}
|
||||
|
||||
|
@ -1643,7 +1643,7 @@ static void check_object(struct object_entry *entry)
|
||||
|
||||
/*
|
||||
* No choice but to fall back to the recursive delta walk
|
||||
* with sha1_object_info() to find about the object type
|
||||
* with oid_object_info() to find about the object type
|
||||
* at this point...
|
||||
*/
|
||||
give_up:
|
||||
@ -1719,7 +1719,7 @@ static void drop_reused_delta(struct object_entry *entry)
|
||||
if (packed_object_info(the_repository, IN_PACK(entry), entry->in_pack_offset, &oi) < 0) {
|
||||
/*
|
||||
* We failed to get the info from this pack for some reason;
|
||||
* fall back to sha1_object_info, which may find another copy.
|
||||
* fall back to oid_object_info, which may find another copy.
|
||||
* And if that fails, the error will be recorded in oe_type(entry)
|
||||
* and dealt with in prepare_pack().
|
||||
*/
|
||||
|
@ -94,7 +94,7 @@ static int tree_is_complete(const struct object_id *oid)
|
||||
init_tree_desc(&desc, tree->buffer, tree->size);
|
||||
complete = 1;
|
||||
while (tree_entry(&desc, &entry)) {
|
||||
if (!has_sha1_file(entry.oid.hash) ||
|
||||
if (!has_object_file(&entry.oid) ||
|
||||
(S_ISDIR(entry.mode) && !tree_is_complete(&entry.oid))) {
|
||||
tree->object.flags |= INCOMPLETE;
|
||||
complete = 0;
|
||||
|
@ -23,7 +23,7 @@ static void show_one(const char *refname, const struct object_id *oid)
|
||||
const char *hex;
|
||||
struct object_id peeled;
|
||||
|
||||
if (!has_sha1_file(oid->hash))
|
||||
if (!has_object_file(oid))
|
||||
die("git show-ref: bad ref %s (%s)", refname,
|
||||
oid_to_hex(oid));
|
||||
|
||||
|
@ -67,7 +67,7 @@ static int already_written(struct bulk_checkin_state *state, struct object_id *o
|
||||
int i;
|
||||
|
||||
/* The object may already exist in the repository */
|
||||
if (has_sha1_file(oid->hash))
|
||||
if (has_object_file(oid))
|
||||
return 1;
|
||||
|
||||
/* Might want to keep the list sorted */
|
||||
|
@ -225,7 +225,7 @@ int cache_tree_fully_valid(struct cache_tree *it)
|
||||
int i;
|
||||
if (!it)
|
||||
return 0;
|
||||
if (it->entry_count < 0 || !has_sha1_file(it->oid.hash))
|
||||
if (it->entry_count < 0 || !has_object_file(&it->oid))
|
||||
return 0;
|
||||
for (i = 0; i < it->subtree_nr; i++) {
|
||||
if (!cache_tree_fully_valid(it->down[i]->cache_tree))
|
||||
@ -253,7 +253,7 @@ static int update_one(struct cache_tree *it,
|
||||
|
||||
*skip_count = 0;
|
||||
|
||||
if (0 <= it->entry_count && has_sha1_file(it->oid.hash))
|
||||
if (0 <= it->entry_count && has_object_file(&it->oid))
|
||||
return it->entry_count;
|
||||
|
||||
/*
|
||||
|
6
cache.h
6
cache.h
@ -1271,8 +1271,8 @@ extern char *xdg_cache_home(const char *filename);
|
||||
|
||||
extern int git_open_cloexec(const char *name, int flags);
|
||||
#define git_open(name) git_open_cloexec(name, O_RDONLY)
|
||||
extern int unpack_sha1_header(git_zstream *stream, unsigned char *map, unsigned long mapsize, void *buffer, unsigned long bufsiz);
|
||||
extern int parse_sha1_header(const char *hdr, unsigned long *sizep);
|
||||
extern int unpack_loose_header(git_zstream *stream, unsigned char *map, unsigned long mapsize, void *buffer, unsigned long bufsiz);
|
||||
extern int parse_loose_header(const char *hdr, unsigned long *sizep);
|
||||
|
||||
extern int check_object_signature(const struct object_id *oid, void *buf, unsigned long size, const char *type);
|
||||
|
||||
@ -1619,7 +1619,7 @@ extern int odb_mkstemp(struct strbuf *temp_filename, const char *pattern);
|
||||
extern int odb_pack_keep(const char *name);
|
||||
|
||||
/*
|
||||
* Set this to 0 to prevent sha1_object_info_extended() from fetching missing
|
||||
* Set this to 0 to prevent oid_object_info_extended() from fetching missing
|
||||
* blobs. This has a difference only if extensions.partialClone is set.
|
||||
*
|
||||
* Its default value is 1.
|
||||
|
@ -255,7 +255,7 @@ static void start_fetch_loose(struct transfer_request *request)
|
||||
struct active_request_slot *slot;
|
||||
struct http_object_request *obj_req;
|
||||
|
||||
obj_req = new_http_object_request(repo->url, request->obj->oid.hash);
|
||||
obj_req = new_http_object_request(repo->url, &request->obj->oid);
|
||||
if (obj_req == NULL) {
|
||||
request->state = ABORTED;
|
||||
return;
|
||||
|
@ -58,7 +58,7 @@ static void start_object_request(struct walker *walker,
|
||||
struct active_request_slot *slot;
|
||||
struct http_object_request *req;
|
||||
|
||||
req = new_http_object_request(obj_req->repo->base, obj_req->oid.hash);
|
||||
req = new_http_object_request(obj_req->repo->base, &obj_req->oid);
|
||||
if (req == NULL) {
|
||||
obj_req->state = ABORTED;
|
||||
return;
|
||||
@ -131,7 +131,7 @@ static int fill_active_slot(struct walker *walker)
|
||||
list_for_each_safe(pos, tmp, head) {
|
||||
obj_req = list_entry(pos, struct object_request, node);
|
||||
if (obj_req->state == WAITING) {
|
||||
if (has_sha1_file(obj_req->oid.hash))
|
||||
if (has_object_file(&obj_req->oid))
|
||||
obj_req->state = COMPLETE;
|
||||
else {
|
||||
start_object_request(walker, obj_req);
|
||||
@ -489,7 +489,7 @@ static int fetch_object(struct walker *walker, unsigned char *sha1)
|
||||
if (obj_req == NULL)
|
||||
return error("Couldn't find request for %s in the queue", hex);
|
||||
|
||||
if (has_sha1_file(obj_req->oid.hash)) {
|
||||
if (has_object_file(&obj_req->oid)) {
|
||||
if (obj_req->req != NULL)
|
||||
abort_http_object_request(obj_req->req);
|
||||
abort_object_request(obj_req);
|
||||
@ -543,11 +543,11 @@ static int fetch_object(struct walker *walker, unsigned char *sha1)
|
||||
} else if (req->zret != Z_STREAM_END) {
|
||||
walker->corrupt_object_found++;
|
||||
ret = error("File %s (%s) corrupt", hex, req->url);
|
||||
} else if (!hasheq(obj_req->oid.hash, req->real_sha1)) {
|
||||
} else if (!oideq(&obj_req->oid, &req->real_oid)) {
|
||||
ret = error("File %s has bad hash", hex);
|
||||
} else if (req->rename < 0) {
|
||||
struct strbuf buf = STRBUF_INIT;
|
||||
loose_object_path(the_repository, &buf, req->sha1);
|
||||
loose_object_path(the_repository, &buf, &req->oid);
|
||||
ret = error("unable to write sha1 filename %s", buf.buf);
|
||||
strbuf_release(&buf);
|
||||
}
|
||||
|
14
http.c
14
http.c
@ -2343,9 +2343,9 @@ static size_t fwrite_sha1_file(char *ptr, size_t eltsize, size_t nmemb,
|
||||
}
|
||||
|
||||
struct http_object_request *new_http_object_request(const char *base_url,
|
||||
unsigned char *sha1)
|
||||
const struct object_id *oid)
|
||||
{
|
||||
char *hex = sha1_to_hex(sha1);
|
||||
char *hex = oid_to_hex(oid);
|
||||
struct strbuf filename = STRBUF_INIT;
|
||||
struct strbuf prevfile = STRBUF_INIT;
|
||||
int prevlocal;
|
||||
@ -2356,10 +2356,10 @@ struct http_object_request *new_http_object_request(const char *base_url,
|
||||
|
||||
freq = xcalloc(1, sizeof(*freq));
|
||||
strbuf_init(&freq->tmpfile, 0);
|
||||
hashcpy(freq->sha1, sha1);
|
||||
oidcpy(&freq->oid, oid);
|
||||
freq->localfile = -1;
|
||||
|
||||
loose_object_path(the_repository, &filename, sha1);
|
||||
loose_object_path(the_repository, &filename, oid);
|
||||
strbuf_addf(&freq->tmpfile, "%s.temp", filename.buf);
|
||||
|
||||
strbuf_addf(&prevfile, "%s.prev", filename.buf);
|
||||
@ -2501,16 +2501,16 @@ int finish_http_object_request(struct http_object_request *freq)
|
||||
}
|
||||
|
||||
git_inflate_end(&freq->stream);
|
||||
git_SHA1_Final(freq->real_sha1, &freq->c);
|
||||
git_SHA1_Final(freq->real_oid.hash, &freq->c);
|
||||
if (freq->zret != Z_STREAM_END) {
|
||||
unlink_or_warn(freq->tmpfile.buf);
|
||||
return -1;
|
||||
}
|
||||
if (!hasheq(freq->sha1, freq->real_sha1)) {
|
||||
if (!oideq(&freq->oid, &freq->real_oid)) {
|
||||
unlink_or_warn(freq->tmpfile.buf);
|
||||
return -1;
|
||||
}
|
||||
loose_object_path(the_repository, &filename, freq->sha1);
|
||||
loose_object_path(the_repository, &filename, &freq->oid);
|
||||
freq->rename = finalize_object_file(freq->tmpfile.buf, filename.buf);
|
||||
strbuf_release(&filename);
|
||||
|
||||
|
6
http.h
6
http.h
@ -223,8 +223,8 @@ struct http_object_request {
|
||||
CURLcode curl_result;
|
||||
char errorstr[CURL_ERROR_SIZE];
|
||||
long http_code;
|
||||
unsigned char sha1[20];
|
||||
unsigned char real_sha1[20];
|
||||
struct object_id oid;
|
||||
struct object_id real_oid;
|
||||
git_SHA_CTX c;
|
||||
git_zstream stream;
|
||||
int zret;
|
||||
@ -233,7 +233,7 @@ struct http_object_request {
|
||||
};
|
||||
|
||||
extern struct http_object_request *new_http_object_request(
|
||||
const char *base_url, unsigned char *sha1);
|
||||
const char *base_url, const struct object_id *oid);
|
||||
extern void process_http_object_request(struct http_object_request *freq);
|
||||
extern int finish_http_object_request(struct http_object_request *freq);
|
||||
extern void abort_http_object_request(struct http_object_request *freq);
|
||||
|
@ -154,11 +154,13 @@ void raw_object_store_clear(struct raw_object_store *o);
|
||||
|
||||
/*
|
||||
* Put in `buf` the name of the file in the local object database that
|
||||
* would be used to store a loose object with the specified sha1.
|
||||
* would be used to store a loose object with the specified oid.
|
||||
*/
|
||||
const char *loose_object_path(struct repository *r, struct strbuf *buf, const unsigned char *sha1);
|
||||
const char *loose_object_path(struct repository *r, struct strbuf *buf,
|
||||
const struct object_id *oid);
|
||||
|
||||
void *map_sha1_file(struct repository *r, const unsigned char *sha1, unsigned long *size);
|
||||
void *map_loose_object(struct repository *r, const struct object_id *oid,
|
||||
unsigned long *size);
|
||||
|
||||
extern void *read_object_file_extended(struct repository *r,
|
||||
const struct object_id *oid,
|
||||
@ -206,19 +208,6 @@ int read_loose_object(const char *path,
|
||||
unsigned long *size,
|
||||
void **contents);
|
||||
|
||||
/*
|
||||
* Convenience for sha1_object_info_extended() with a NULL struct
|
||||
* object_info. OBJECT_INFO_SKIP_CACHED is automatically set; pass
|
||||
* nonzero flags to also set other flags.
|
||||
*/
|
||||
int repo_has_sha1_file_with_flags(struct repository *r,
|
||||
const unsigned char *sha1, int flags);
|
||||
static inline int repo_has_sha1_file(struct repository *r,
|
||||
const unsigned char *sha1)
|
||||
{
|
||||
return repo_has_sha1_file_with_flags(r, sha1, 0);
|
||||
}
|
||||
|
||||
#ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS
|
||||
#define has_sha1_file_with_flags(sha1, flags) repo_has_sha1_file_with_flags(the_repository, sha1, flags)
|
||||
#define has_sha1_file(sha1) repo_has_sha1_file(the_repository, sha1)
|
||||
|
4
object.c
4
object.c
@ -264,7 +264,7 @@ struct object *parse_object(struct repository *r, const struct object_id *oid)
|
||||
(!obj && repo_has_object_file(r, oid) &&
|
||||
oid_object_info(r, oid, NULL) == OBJ_BLOB)) {
|
||||
if (check_object_signature(repl, NULL, 0, NULL) < 0) {
|
||||
error(_("sha1 mismatch %s"), oid_to_hex(oid));
|
||||
error(_("hash mismatch %s"), oid_to_hex(oid));
|
||||
return NULL;
|
||||
}
|
||||
parse_blob_buffer(lookup_blob(r, oid), NULL, 0);
|
||||
@ -275,7 +275,7 @@ struct object *parse_object(struct repository *r, const struct object_id *oid)
|
||||
if (buffer) {
|
||||
if (check_object_signature(repl, buffer, size, type_name(type)) < 0) {
|
||||
free(buffer);
|
||||
error(_("sha1 mismatch %s"), oid_to_hex(repl));
|
||||
error(_("hash mismatch %s"), oid_to_hex(repl));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
2
refs.c
2
refs.c
@ -188,7 +188,7 @@ int ref_resolves_to_object(const char *refname,
|
||||
{
|
||||
if (flags & REF_ISBROKEN)
|
||||
return 0;
|
||||
if (!has_sha1_file(oid->hash)) {
|
||||
if (!has_object_file(oid)) {
|
||||
error(_("%s does not point to a valid object!"), refname);
|
||||
return 0;
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ int option_parse_push_signed(const struct option *opt,
|
||||
|
||||
static void feed_object(const struct object_id *oid, FILE *fh, int negative)
|
||||
{
|
||||
if (negative && !has_sha1_file(oid->hash))
|
||||
if (negative && !has_object_file(oid))
|
||||
return;
|
||||
|
||||
if (negative)
|
||||
|
220
sha1-file.c
220
sha1-file.c
@ -192,7 +192,7 @@ int hash_algo_by_id(uint32_t format_id)
|
||||
|
||||
/*
|
||||
* This is meant to hold a *small* number of objects that you would
|
||||
* want read_sha1_file() to be able to return, but yet you do not want
|
||||
* want read_object_file() to be able to return, but yet you do not want
|
||||
* to write them into the object store (e.g. a browse-only
|
||||
* application).
|
||||
*/
|
||||
@ -401,12 +401,12 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void fill_sha1_path(struct strbuf *buf, const unsigned char *sha1)
|
||||
static void fill_loose_path(struct strbuf *buf, const struct object_id *oid)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < the_hash_algo->rawsz; i++) {
|
||||
static char hex[] = "0123456789abcdef";
|
||||
unsigned int val = sha1[i];
|
||||
unsigned int val = oid->hash[i];
|
||||
strbuf_addch(buf, hex[val >> 4]);
|
||||
strbuf_addch(buf, hex[val & 0xf]);
|
||||
if (!i)
|
||||
@ -416,19 +416,19 @@ static void fill_sha1_path(struct strbuf *buf, const unsigned char *sha1)
|
||||
|
||||
static const char *odb_loose_path(struct object_directory *odb,
|
||||
struct strbuf *buf,
|
||||
const unsigned char *sha1)
|
||||
const struct object_id *oid)
|
||||
{
|
||||
strbuf_reset(buf);
|
||||
strbuf_addstr(buf, odb->path);
|
||||
strbuf_addch(buf, '/');
|
||||
fill_sha1_path(buf, sha1);
|
||||
fill_loose_path(buf, oid);
|
||||
return buf->buf;
|
||||
}
|
||||
|
||||
const char *loose_object_path(struct repository *r, struct strbuf *buf,
|
||||
const unsigned char *sha1)
|
||||
const struct object_id *oid)
|
||||
{
|
||||
return odb_loose_path(r->objects->odb, buf, sha1);
|
||||
return odb_loose_path(r->objects->odb, buf, oid);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -789,7 +789,7 @@ static int check_and_freshen_odb(struct object_directory *odb,
|
||||
int freshen)
|
||||
{
|
||||
static struct strbuf path = STRBUF_INIT;
|
||||
odb_loose_path(odb, &path, oid->hash);
|
||||
odb_loose_path(odb, &path, oid);
|
||||
return check_and_freshen_file(path.buf, freshen);
|
||||
}
|
||||
|
||||
@ -866,8 +866,8 @@ void *xmmap(void *start, size_t length,
|
||||
|
||||
/*
|
||||
* With an in-core object data in "map", rehash it to make sure the
|
||||
* object name actually matches "sha1" to detect object corruption.
|
||||
* With "map" == NULL, try reading the object named with "sha1" using
|
||||
* object name actually matches "oid" to detect object corruption.
|
||||
* With "map" == NULL, try reading the object named with "oid" using
|
||||
* the streaming interface and rehash it to do the same.
|
||||
*/
|
||||
int check_object_signature(const struct object_id *oid, void *map,
|
||||
@ -940,22 +940,22 @@ int git_open_cloexec(const char *name, int flags)
|
||||
}
|
||||
|
||||
/*
|
||||
* Find "sha1" as a loose object in the local repository or in an alternate.
|
||||
* Find "oid" as a loose object in the local repository or in an alternate.
|
||||
* Returns 0 on success, negative on failure.
|
||||
*
|
||||
* The "path" out-parameter will give the path of the object we found (if any).
|
||||
* Note that it may point to static storage and is only valid until another
|
||||
* call to stat_sha1_file().
|
||||
* call to stat_loose_object().
|
||||
*/
|
||||
static int stat_sha1_file(struct repository *r, const unsigned char *sha1,
|
||||
struct stat *st, const char **path)
|
||||
static int stat_loose_object(struct repository *r, const struct object_id *oid,
|
||||
struct stat *st, const char **path)
|
||||
{
|
||||
struct object_directory *odb;
|
||||
static struct strbuf buf = STRBUF_INIT;
|
||||
|
||||
prepare_alt_odb(r);
|
||||
for (odb = r->objects->odb; odb; odb = odb->next) {
|
||||
*path = odb_loose_path(odb, &buf, sha1);
|
||||
*path = odb_loose_path(odb, &buf, oid);
|
||||
if (!lstat(*path, st))
|
||||
return 0;
|
||||
}
|
||||
@ -964,11 +964,11 @@ static int stat_sha1_file(struct repository *r, const unsigned char *sha1,
|
||||
}
|
||||
|
||||
/*
|
||||
* Like stat_sha1_file(), but actually open the object and return the
|
||||
* Like stat_loose_object(), but actually open the object and return the
|
||||
* descriptor. See the caveats on the "path" parameter above.
|
||||
*/
|
||||
static int open_sha1_file(struct repository *r,
|
||||
const unsigned char *sha1, const char **path)
|
||||
static int open_loose_object(struct repository *r,
|
||||
const struct object_id *oid, const char **path)
|
||||
{
|
||||
int fd;
|
||||
struct object_directory *odb;
|
||||
@ -977,7 +977,7 @@ static int open_sha1_file(struct repository *r,
|
||||
|
||||
prepare_alt_odb(r);
|
||||
for (odb = r->objects->odb; odb; odb = odb->next) {
|
||||
*path = odb_loose_path(odb, &buf, sha1);
|
||||
*path = odb_loose_path(odb, &buf, oid);
|
||||
fd = git_open(*path);
|
||||
if (fd >= 0)
|
||||
return fd;
|
||||
@ -990,16 +990,13 @@ static int open_sha1_file(struct repository *r,
|
||||
}
|
||||
|
||||
static int quick_has_loose(struct repository *r,
|
||||
const unsigned char *sha1)
|
||||
const struct object_id *oid)
|
||||
{
|
||||
struct object_id oid;
|
||||
struct object_directory *odb;
|
||||
|
||||
hashcpy(oid.hash, sha1);
|
||||
|
||||
prepare_alt_odb(r);
|
||||
for (odb = r->objects->odb; odb; odb = odb->next) {
|
||||
if (oid_array_lookup(odb_loose_cache(odb, &oid), &oid) >= 0)
|
||||
if (oid_array_lookup(odb_loose_cache(odb, oid), oid) >= 0)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
@ -1007,10 +1004,10 @@ static int quick_has_loose(struct repository *r,
|
||||
|
||||
/*
|
||||
* Map the loose object at "path" if it is not NULL, or the path found by
|
||||
* searching for a loose object named "sha1".
|
||||
* searching for a loose object named "oid".
|
||||
*/
|
||||
static void *map_sha1_file_1(struct repository *r, const char *path,
|
||||
const unsigned char *sha1, unsigned long *size)
|
||||
static void *map_loose_object_1(struct repository *r, const char *path,
|
||||
const struct object_id *oid, unsigned long *size)
|
||||
{
|
||||
void *map;
|
||||
int fd;
|
||||
@ -1018,7 +1015,7 @@ static void *map_sha1_file_1(struct repository *r, const char *path,
|
||||
if (path)
|
||||
fd = git_open(path);
|
||||
else
|
||||
fd = open_sha1_file(r, sha1, &path);
|
||||
fd = open_loose_object(r, oid, &path);
|
||||
map = NULL;
|
||||
if (fd >= 0) {
|
||||
struct stat st;
|
||||
@ -1038,15 +1035,16 @@ static void *map_sha1_file_1(struct repository *r, const char *path,
|
||||
return map;
|
||||
}
|
||||
|
||||
void *map_sha1_file(struct repository *r,
|
||||
const unsigned char *sha1, unsigned long *size)
|
||||
void *map_loose_object(struct repository *r,
|
||||
const struct object_id *oid,
|
||||
unsigned long *size)
|
||||
{
|
||||
return map_sha1_file_1(r, NULL, sha1, size);
|
||||
return map_loose_object_1(r, NULL, oid, size);
|
||||
}
|
||||
|
||||
static int unpack_sha1_short_header(git_zstream *stream,
|
||||
unsigned char *map, unsigned long mapsize,
|
||||
void *buffer, unsigned long bufsiz)
|
||||
static int unpack_loose_short_header(git_zstream *stream,
|
||||
unsigned char *map, unsigned long mapsize,
|
||||
void *buffer, unsigned long bufsiz)
|
||||
{
|
||||
/* Get the data stream */
|
||||
memset(stream, 0, sizeof(*stream));
|
||||
@ -1059,12 +1057,12 @@ static int unpack_sha1_short_header(git_zstream *stream,
|
||||
return git_inflate(stream, 0);
|
||||
}
|
||||
|
||||
int unpack_sha1_header(git_zstream *stream,
|
||||
unsigned char *map, unsigned long mapsize,
|
||||
void *buffer, unsigned long bufsiz)
|
||||
int unpack_loose_header(git_zstream *stream,
|
||||
unsigned char *map, unsigned long mapsize,
|
||||
void *buffer, unsigned long bufsiz)
|
||||
{
|
||||
int status = unpack_sha1_short_header(stream, map, mapsize,
|
||||
buffer, bufsiz);
|
||||
int status = unpack_loose_short_header(stream, map, mapsize,
|
||||
buffer, bufsiz);
|
||||
|
||||
if (status < Z_OK)
|
||||
return status;
|
||||
@ -1075,13 +1073,13 @@ int unpack_sha1_header(git_zstream *stream,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int unpack_sha1_header_to_strbuf(git_zstream *stream, unsigned char *map,
|
||||
unsigned long mapsize, void *buffer,
|
||||
unsigned long bufsiz, struct strbuf *header)
|
||||
static int unpack_loose_header_to_strbuf(git_zstream *stream, unsigned char *map,
|
||||
unsigned long mapsize, void *buffer,
|
||||
unsigned long bufsiz, struct strbuf *header)
|
||||
{
|
||||
int status;
|
||||
|
||||
status = unpack_sha1_short_header(stream, map, mapsize, buffer, bufsiz);
|
||||
status = unpack_loose_short_header(stream, map, mapsize, buffer, bufsiz);
|
||||
if (status < Z_OK)
|
||||
return -1;
|
||||
|
||||
@ -1111,7 +1109,9 @@ static int unpack_sha1_header_to_strbuf(git_zstream *stream, unsigned char *map,
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void *unpack_sha1_rest(git_zstream *stream, void *buffer, unsigned long size, const unsigned char *sha1)
|
||||
static void *unpack_loose_rest(git_zstream *stream,
|
||||
void *buffer, unsigned long size,
|
||||
const struct object_id *oid)
|
||||
{
|
||||
int bytes = strlen(buffer) + 1;
|
||||
unsigned char *buf = xmallocz(size);
|
||||
@ -1148,10 +1148,10 @@ static void *unpack_sha1_rest(git_zstream *stream, void *buffer, unsigned long s
|
||||
}
|
||||
|
||||
if (status < 0)
|
||||
error(_("corrupt loose object '%s'"), sha1_to_hex(sha1));
|
||||
error(_("corrupt loose object '%s'"), oid_to_hex(oid));
|
||||
else if (stream->avail_in)
|
||||
error(_("garbage at end of loose object '%s'"),
|
||||
sha1_to_hex(sha1));
|
||||
oid_to_hex(oid));
|
||||
free(buf);
|
||||
return NULL;
|
||||
}
|
||||
@ -1161,8 +1161,8 @@ static void *unpack_sha1_rest(git_zstream *stream, void *buffer, unsigned long s
|
||||
* too permissive for what we want to check. So do an anal
|
||||
* object header parse by hand.
|
||||
*/
|
||||
static int parse_sha1_header_extended(const char *hdr, struct object_info *oi,
|
||||
unsigned int flags)
|
||||
static int parse_loose_header_extended(const char *hdr, struct object_info *oi,
|
||||
unsigned int flags)
|
||||
{
|
||||
const char *type_buf = hdr;
|
||||
unsigned long size;
|
||||
@ -1222,17 +1222,17 @@ static int parse_sha1_header_extended(const char *hdr, struct object_info *oi,
|
||||
return *hdr ? -1 : type;
|
||||
}
|
||||
|
||||
int parse_sha1_header(const char *hdr, unsigned long *sizep)
|
||||
int parse_loose_header(const char *hdr, unsigned long *sizep)
|
||||
{
|
||||
struct object_info oi = OBJECT_INFO_INIT;
|
||||
|
||||
oi.sizep = sizep;
|
||||
return parse_sha1_header_extended(hdr, &oi, 0);
|
||||
return parse_loose_header_extended(hdr, &oi, 0);
|
||||
}
|
||||
|
||||
static int sha1_loose_object_info(struct repository *r,
|
||||
const unsigned char *sha1,
|
||||
struct object_info *oi, int flags)
|
||||
static int loose_object_info(struct repository *r,
|
||||
const struct object_id *oid,
|
||||
struct object_info *oi, int flags)
|
||||
{
|
||||
int status = 0;
|
||||
unsigned long mapsize;
|
||||
@ -1257,15 +1257,15 @@ static int sha1_loose_object_info(struct repository *r,
|
||||
const char *path;
|
||||
struct stat st;
|
||||
if (!oi->disk_sizep && (flags & OBJECT_INFO_QUICK))
|
||||
return quick_has_loose(r, sha1) ? 0 : -1;
|
||||
if (stat_sha1_file(r, sha1, &st, &path) < 0)
|
||||
return quick_has_loose(r, oid) ? 0 : -1;
|
||||
if (stat_loose_object(r, oid, &st, &path) < 0)
|
||||
return -1;
|
||||
if (oi->disk_sizep)
|
||||
*oi->disk_sizep = st.st_size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
map = map_sha1_file(r, sha1, &mapsize);
|
||||
map = map_loose_object(r, oid, &mapsize);
|
||||
if (!map)
|
||||
return -1;
|
||||
|
||||
@ -1275,24 +1275,24 @@ static int sha1_loose_object_info(struct repository *r,
|
||||
if (oi->disk_sizep)
|
||||
*oi->disk_sizep = mapsize;
|
||||
if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE)) {
|
||||
if (unpack_sha1_header_to_strbuf(&stream, map, mapsize, hdr, sizeof(hdr), &hdrbuf) < 0)
|
||||
if (unpack_loose_header_to_strbuf(&stream, map, mapsize, hdr, sizeof(hdr), &hdrbuf) < 0)
|
||||
status = error(_("unable to unpack %s header with --allow-unknown-type"),
|
||||
sha1_to_hex(sha1));
|
||||
} else if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0)
|
||||
oid_to_hex(oid));
|
||||
} else if (unpack_loose_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0)
|
||||
status = error(_("unable to unpack %s header"),
|
||||
sha1_to_hex(sha1));
|
||||
oid_to_hex(oid));
|
||||
if (status < 0)
|
||||
; /* Do nothing */
|
||||
else if (hdrbuf.len) {
|
||||
if ((status = parse_sha1_header_extended(hdrbuf.buf, oi, flags)) < 0)
|
||||
if ((status = parse_loose_header_extended(hdrbuf.buf, oi, flags)) < 0)
|
||||
status = error(_("unable to parse %s header with --allow-unknown-type"),
|
||||
sha1_to_hex(sha1));
|
||||
} else if ((status = parse_sha1_header_extended(hdr, oi, flags)) < 0)
|
||||
status = error(_("unable to parse %s header"), sha1_to_hex(sha1));
|
||||
oid_to_hex(oid));
|
||||
} else if ((status = parse_loose_header_extended(hdr, oi, flags)) < 0)
|
||||
status = error(_("unable to parse %s header"), oid_to_hex(oid));
|
||||
|
||||
if (status >= 0 && oi->contentp) {
|
||||
*oi->contentp = unpack_sha1_rest(&stream, hdr,
|
||||
*oi->sizep, sha1);
|
||||
*oi->contentp = unpack_loose_rest(&stream, hdr,
|
||||
*oi->sizep, oid);
|
||||
if (!*oi->contentp) {
|
||||
git_inflate_end(&stream);
|
||||
status = -1;
|
||||
@ -1358,7 +1358,7 @@ int oid_object_info_extended(struct repository *r, const struct object_id *oid,
|
||||
return -1;
|
||||
|
||||
/* Most likely it's a loose object. */
|
||||
if (!sha1_loose_object_info(r, real->hash, oi, flags))
|
||||
if (!loose_object_info(r, real, oi, flags))
|
||||
return 0;
|
||||
|
||||
/* Not a loose object; someone else may have just packed it. */
|
||||
@ -1422,20 +1422,16 @@ int oid_object_info(struct repository *r,
|
||||
}
|
||||
|
||||
static void *read_object(struct repository *r,
|
||||
const unsigned char *sha1,
|
||||
enum object_type *type,
|
||||
const struct object_id *oid, enum object_type *type,
|
||||
unsigned long *size)
|
||||
{
|
||||
struct object_id oid;
|
||||
struct object_info oi = OBJECT_INFO_INIT;
|
||||
void *content;
|
||||
oi.typep = type;
|
||||
oi.sizep = size;
|
||||
oi.contentp = &content;
|
||||
|
||||
hashcpy(oid.hash, sha1);
|
||||
|
||||
if (oid_object_info_extended(r, &oid, &oi, 0) < 0)
|
||||
if (oid_object_info_extended(r, oid, &oi, 0) < 0)
|
||||
return NULL;
|
||||
return content;
|
||||
}
|
||||
@ -1446,7 +1442,7 @@ int pretend_object_file(void *buf, unsigned long len, enum object_type type,
|
||||
struct cached_object *co;
|
||||
|
||||
hash_object_file(buf, len, type_name(type), oid);
|
||||
if (has_sha1_file(oid->hash) || find_cached_object(oid))
|
||||
if (has_object_file(oid) || find_cached_object(oid))
|
||||
return 0;
|
||||
ALLOC_GROW(cached_objects, cached_object_nr + 1, cached_object_alloc);
|
||||
co = &cached_objects[cached_object_nr++];
|
||||
@ -1477,7 +1473,7 @@ void *read_object_file_extended(struct repository *r,
|
||||
lookup_replace_object(r, oid) : oid;
|
||||
|
||||
errno = 0;
|
||||
data = read_object(r, repl->hash, type, size);
|
||||
data = read_object(r, repl, type, size);
|
||||
if (data)
|
||||
return data;
|
||||
|
||||
@ -1489,7 +1485,7 @@ void *read_object_file_extended(struct repository *r,
|
||||
die(_("replacement %s not found for %s"),
|
||||
oid_to_hex(repl), oid_to_hex(oid));
|
||||
|
||||
if (!stat_sha1_file(r, repl->hash, &st, &path))
|
||||
if (!stat_loose_object(r, repl, &st, &path))
|
||||
die(_("loose object %s (stored in %s) is corrupt"),
|
||||
oid_to_hex(repl), path);
|
||||
|
||||
@ -1596,7 +1592,7 @@ int finalize_object_file(const char *tmpfile, const char *filename)
|
||||
unlink_or_warn(tmpfile);
|
||||
if (ret) {
|
||||
if (ret != EEXIST) {
|
||||
return error_errno(_("unable to write sha1 filename %s"), filename);
|
||||
return error_errno(_("unable to write file %s"), filename);
|
||||
}
|
||||
/* FIXME!!! Collision check here ? */
|
||||
}
|
||||
@ -1624,12 +1620,12 @@ int hash_object_file(const void *buf, unsigned long len, const char *type,
|
||||
}
|
||||
|
||||
/* Finalize a file on disk, and close it. */
|
||||
static void close_sha1_file(int fd)
|
||||
static void close_loose_object(int fd)
|
||||
{
|
||||
if (fsync_object_files)
|
||||
fsync_or_die(fd, "sha1 file");
|
||||
fsync_or_die(fd, "loose object file");
|
||||
if (close(fd) != 0)
|
||||
die_errno(_("error when closing sha1 file"));
|
||||
die_errno(_("error when closing loose object file"));
|
||||
}
|
||||
|
||||
/* Size of directory component, including the ending '/' */
|
||||
@ -1689,7 +1685,7 @@ static int write_loose_object(const struct object_id *oid, char *hdr,
|
||||
static struct strbuf tmp_file = STRBUF_INIT;
|
||||
static struct strbuf filename = STRBUF_INIT;
|
||||
|
||||
loose_object_path(the_repository, &filename, oid->hash);
|
||||
loose_object_path(the_repository, &filename, oid);
|
||||
|
||||
fd = create_tmpfile(&tmp_file, filename.buf);
|
||||
if (fd < 0) {
|
||||
@ -1720,7 +1716,7 @@ static int write_loose_object(const struct object_id *oid, char *hdr,
|
||||
ret = git_deflate(&stream, Z_FINISH);
|
||||
the_hash_algo->update_fn(&c, in0, stream.next_in - in0);
|
||||
if (write_buffer(fd, compressed, stream.next_out - compressed) < 0)
|
||||
die(_("unable to write sha1 file"));
|
||||
die(_("unable to write loose object file"));
|
||||
stream.next_out = compressed;
|
||||
stream.avail_out = sizeof(compressed);
|
||||
} while (ret == Z_OK);
|
||||
@ -1737,7 +1733,7 @@ static int write_loose_object(const struct object_id *oid, char *hdr,
|
||||
die(_("confused by unstable object source data for %s"),
|
||||
oid_to_hex(oid));
|
||||
|
||||
close_sha1_file(fd);
|
||||
close_loose_object(fd);
|
||||
|
||||
if (mtime) {
|
||||
struct utimbuf utb;
|
||||
@ -1817,9 +1813,9 @@ int force_object_loose(const struct object_id *oid, time_t mtime)
|
||||
|
||||
if (has_loose_object(oid))
|
||||
return 0;
|
||||
buf = read_object(the_repository, oid->hash, &type, &len);
|
||||
buf = read_object(the_repository, oid, &type, &len);
|
||||
if (!buf)
|
||||
return error(_("cannot read sha1_file for %s"), oid_to_hex(oid));
|
||||
return error(_("cannot read object for %s"), oid_to_hex(oid));
|
||||
hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %"PRIuMAX , type_name(type), (uintmax_t)len) + 1;
|
||||
ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime);
|
||||
free(buf);
|
||||
@ -1827,27 +1823,19 @@ int force_object_loose(const struct object_id *oid, time_t mtime)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int repo_has_sha1_file_with_flags(struct repository *r,
|
||||
const unsigned char *sha1, int flags)
|
||||
int repo_has_object_file_with_flags(struct repository *r,
|
||||
const struct object_id *oid, int flags)
|
||||
{
|
||||
struct object_id oid;
|
||||
if (!startup_info->have_repository)
|
||||
return 0;
|
||||
hashcpy(oid.hash, sha1);
|
||||
return oid_object_info_extended(r, &oid, NULL,
|
||||
return oid_object_info_extended(r, oid, NULL,
|
||||
flags | OBJECT_INFO_SKIP_CACHED) >= 0;
|
||||
}
|
||||
|
||||
int repo_has_object_file(struct repository *r,
|
||||
const struct object_id *oid)
|
||||
{
|
||||
return repo_has_sha1_file(r, oid->hash);
|
||||
}
|
||||
|
||||
int repo_has_object_file_with_flags(struct repository *r,
|
||||
const struct object_id *oid, int flags)
|
||||
{
|
||||
return repo_has_sha1_file_with_flags(r, oid->hash, flags);
|
||||
return repo_has_object_file_with_flags(r, oid, 0);
|
||||
}
|
||||
|
||||
static void check_tree(const void *buf, size_t size)
|
||||
@ -2258,14 +2246,14 @@ void odb_clear_loose_cache(struct object_directory *odb)
|
||||
sizeof(odb->loose_objects_subdir_seen));
|
||||
}
|
||||
|
||||
static int check_stream_sha1(git_zstream *stream,
|
||||
const char *hdr,
|
||||
unsigned long size,
|
||||
const char *path,
|
||||
const unsigned char *expected_sha1)
|
||||
static int check_stream_oid(git_zstream *stream,
|
||||
const char *hdr,
|
||||
unsigned long size,
|
||||
const char *path,
|
||||
const struct object_id *expected_oid)
|
||||
{
|
||||
git_hash_ctx c;
|
||||
unsigned char real_sha1[GIT_MAX_RAWSZ];
|
||||
struct object_id real_oid;
|
||||
unsigned char buf[4096];
|
||||
unsigned long total_read;
|
||||
int status = Z_OK;
|
||||
@ -2281,7 +2269,7 @@ static int check_stream_sha1(git_zstream *stream,
|
||||
|
||||
/*
|
||||
* This size comparison must be "<=" to read the final zlib packets;
|
||||
* see the comment in unpack_sha1_rest for details.
|
||||
* see the comment in unpack_loose_rest for details.
|
||||
*/
|
||||
while (total_read <= size &&
|
||||
(status == Z_OK ||
|
||||
@ -2297,19 +2285,19 @@ static int check_stream_sha1(git_zstream *stream,
|
||||
git_inflate_end(stream);
|
||||
|
||||
if (status != Z_STREAM_END) {
|
||||
error(_("corrupt loose object '%s'"), sha1_to_hex(expected_sha1));
|
||||
error(_("corrupt loose object '%s'"), oid_to_hex(expected_oid));
|
||||
return -1;
|
||||
}
|
||||
if (stream->avail_in) {
|
||||
error(_("garbage at end of loose object '%s'"),
|
||||
sha1_to_hex(expected_sha1));
|
||||
oid_to_hex(expected_oid));
|
||||
return -1;
|
||||
}
|
||||
|
||||
the_hash_algo->final_fn(real_sha1, &c);
|
||||
if (!hasheq(expected_sha1, real_sha1)) {
|
||||
error(_("sha1 mismatch for %s (expected %s)"), path,
|
||||
sha1_to_hex(expected_sha1));
|
||||
the_hash_algo->final_fn(real_oid.hash, &c);
|
||||
if (!oideq(expected_oid, &real_oid)) {
|
||||
error(_("hash mismatch for %s (expected %s)"), path,
|
||||
oid_to_hex(expected_oid));
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -2330,18 +2318,18 @@ int read_loose_object(const char *path,
|
||||
|
||||
*contents = NULL;
|
||||
|
||||
map = map_sha1_file_1(the_repository, path, NULL, &mapsize);
|
||||
map = map_loose_object_1(the_repository, path, NULL, &mapsize);
|
||||
if (!map) {
|
||||
error_errno(_("unable to mmap %s"), path);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0) {
|
||||
if (unpack_loose_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0) {
|
||||
error(_("unable to unpack header of %s"), path);
|
||||
goto out;
|
||||
}
|
||||
|
||||
*type = parse_sha1_header(hdr, size);
|
||||
*type = parse_loose_header(hdr, size);
|
||||
if (*type < 0) {
|
||||
error(_("unable to parse header of %s"), path);
|
||||
git_inflate_end(&stream);
|
||||
@ -2349,10 +2337,10 @@ int read_loose_object(const char *path,
|
||||
}
|
||||
|
||||
if (*type == OBJ_BLOB && *size > big_file_threshold) {
|
||||
if (check_stream_sha1(&stream, hdr, *size, path, expected_oid->hash) < 0)
|
||||
if (check_stream_oid(&stream, hdr, *size, path, expected_oid) < 0)
|
||||
goto out;
|
||||
} else {
|
||||
*contents = unpack_sha1_rest(&stream, hdr, *size, expected_oid->hash);
|
||||
*contents = unpack_loose_rest(&stream, hdr, *size, expected_oid);
|
||||
if (!*contents) {
|
||||
error(_("unable to unpack contents of %s"), path);
|
||||
git_inflate_end(&stream);
|
||||
@ -2360,7 +2348,7 @@ int read_loose_object(const char *path,
|
||||
}
|
||||
if (check_object_signature(expected_oid, *contents,
|
||||
*size, type_name(*type))) {
|
||||
error(_("sha1 mismatch for %s (expected %s)"), path,
|
||||
error(_("hash mismatch for %s (expected %s)"), path,
|
||||
oid_to_hex(expected_oid));
|
||||
free(*contents);
|
||||
goto out;
|
||||
|
16
streaming.c
16
streaming.c
@ -338,16 +338,16 @@ static struct stream_vtbl loose_vtbl = {
|
||||
|
||||
static open_method_decl(loose)
|
||||
{
|
||||
st->u.loose.mapped = map_sha1_file(the_repository,
|
||||
oid->hash, &st->u.loose.mapsize);
|
||||
st->u.loose.mapped = map_loose_object(the_repository,
|
||||
oid, &st->u.loose.mapsize);
|
||||
if (!st->u.loose.mapped)
|
||||
return -1;
|
||||
if ((unpack_sha1_header(&st->z,
|
||||
st->u.loose.mapped,
|
||||
st->u.loose.mapsize,
|
||||
st->u.loose.hdr,
|
||||
sizeof(st->u.loose.hdr)) < 0) ||
|
||||
(parse_sha1_header(st->u.loose.hdr, &st->size) < 0)) {
|
||||
if ((unpack_loose_header(&st->z,
|
||||
st->u.loose.mapped,
|
||||
st->u.loose.mapsize,
|
||||
st->u.loose.hdr,
|
||||
sizeof(st->u.loose.hdr)) < 0) ||
|
||||
(parse_loose_header(st->u.loose.hdr, &st->size) < 0)) {
|
||||
git_inflate_end(&st->z);
|
||||
munmap(st->u.loose.mapped, st->u.loose.mapsize);
|
||||
return -1;
|
||||
|
@ -406,7 +406,7 @@ test_expect_success 'rev-list --verify-objects with bad sha1' '
|
||||
|
||||
test_might_fail git rev-list --verify-objects refs/heads/bogus >/dev/null 2>out &&
|
||||
cat out &&
|
||||
test_i18ngrep -q "error: sha1 mismatch 63ffffffffffffffffffffffffffffffffffffff" out
|
||||
test_i18ngrep -q "error: hash mismatch 63ffffffffffffffffffffffffffffffffffffff" out
|
||||
'
|
||||
|
||||
test_expect_success 'force fsck to ignore double author' '
|
||||
|
Loading…
Reference in New Issue
Block a user