mirror of
https://github.com/git/git.git
synced 2024-11-23 09:56:28 +08:00
cocci: apply the "object-store.h" part of "the_repository.pending"
Apply the part of "the_repository.pending.cocci" pertaining to "object-store.h". Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
This commit is contained in:
parent
085390328f
commit
bc726bd075
9
apply.c
9
apply.c
@ -3201,7 +3201,8 @@ static int apply_binary(struct apply_state *state,
|
||||
unsigned long size;
|
||||
char *result;
|
||||
|
||||
result = read_object_file(&oid, &type, &size);
|
||||
result = repo_read_object_file(the_repository, &oid, &type,
|
||||
&size);
|
||||
if (!result)
|
||||
return error(_("the necessary postimage %s for "
|
||||
"'%s' cannot be read"),
|
||||
@ -3264,7 +3265,8 @@ static int read_blob_object(struct strbuf *buf, const struct object_id *oid, uns
|
||||
unsigned long sz;
|
||||
char *result;
|
||||
|
||||
result = read_object_file(oid, &type, &sz);
|
||||
result = repo_read_object_file(the_repository, oid, &type,
|
||||
&sz);
|
||||
if (!result)
|
||||
return -1;
|
||||
/* XXX read_sha1_file NUL-terminates */
|
||||
@ -3492,7 +3494,8 @@ static int resolve_to(struct image *image, const struct object_id *result_id)
|
||||
|
||||
clear_image(image);
|
||||
|
||||
image->buf = read_object_file(result_id, &type, &size);
|
||||
image->buf = repo_read_object_file(the_repository, result_id, &type,
|
||||
&size);
|
||||
if (!image->buf || type != OBJ_BLOB)
|
||||
die("unable to read blob object %s", oid_to_hex(result_id));
|
||||
image->len = size;
|
||||
|
@ -84,7 +84,7 @@ static void *object_file_to_archive(const struct archiver_args *args,
|
||||
(args->tree ? &args->tree->object.oid : NULL), oid);
|
||||
|
||||
path += args->baselen;
|
||||
buffer = read_object_file(oid, type, sizep);
|
||||
buffer = repo_read_object_file(the_repository, oid, type, sizep);
|
||||
if (buffer && S_ISREG(mode)) {
|
||||
struct strbuf buf = STRBUF_INIT;
|
||||
size_t size = 0;
|
||||
|
5
bisect.c
5
bisect.c
@ -148,8 +148,9 @@ static void show_list(const char *debug, int counted, int nr,
|
||||
unsigned commit_flags = commit->object.flags;
|
||||
enum object_type type;
|
||||
unsigned long size;
|
||||
char *buf = read_object_file(&commit->object.oid, &type,
|
||||
&size);
|
||||
char *buf = repo_read_object_file(the_repository,
|
||||
&commit->object.oid, &type,
|
||||
&size);
|
||||
const char *subject_start;
|
||||
int subject_len;
|
||||
|
||||
|
11
blame.c
11
blame.c
@ -1028,8 +1028,9 @@ static void fill_origin_blob(struct diff_options *opt,
|
||||
&o->blob_oid, 1, &file->ptr, &file_size))
|
||||
;
|
||||
else
|
||||
file->ptr = read_object_file(&o->blob_oid, &type,
|
||||
&file_size);
|
||||
file->ptr = repo_read_object_file(the_repository,
|
||||
&o->blob_oid, &type,
|
||||
&file_size);
|
||||
file->size = file_size;
|
||||
|
||||
if (!file->ptr)
|
||||
@ -2838,8 +2839,10 @@ void setup_scoreboard(struct blame_scoreboard *sb,
|
||||
&sb->final_buf_size))
|
||||
;
|
||||
else
|
||||
sb->final_buf = read_object_file(&o->blob_oid, &type,
|
||||
&sb->final_buf_size);
|
||||
sb->final_buf = repo_read_object_file(the_repository,
|
||||
&o->blob_oid,
|
||||
&type,
|
||||
&sb->final_buf_size);
|
||||
|
||||
if (!sb->final_buf)
|
||||
die(_("cannot read blob %s for path %s"),
|
||||
|
@ -60,7 +60,7 @@ static int filter_object(const char *path, unsigned mode,
|
||||
{
|
||||
enum object_type type;
|
||||
|
||||
*buf = read_object_file(oid, &type, size);
|
||||
*buf = repo_read_object_file(the_repository, oid, &type, size);
|
||||
if (!*buf)
|
||||
return error(_("cannot read object %s '%s'"),
|
||||
oid_to_hex(oid), path);
|
||||
@ -152,7 +152,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
|
||||
goto cleanup;
|
||||
|
||||
case 'e':
|
||||
return !has_object_file(&oid);
|
||||
return !repo_has_object_file(the_repository, &oid);
|
||||
|
||||
case 'w':
|
||||
|
||||
@ -187,7 +187,8 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
|
||||
ret = stream_blob(&oid);
|
||||
goto cleanup;
|
||||
}
|
||||
buf = read_object_file(&oid, &type, &size);
|
||||
buf = repo_read_object_file(the_repository, &oid, &type,
|
||||
&size);
|
||||
if (!buf)
|
||||
die("Cannot read object %s", obj_name);
|
||||
|
||||
@ -207,8 +208,10 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
|
||||
if (exp_type_id == OBJ_BLOB) {
|
||||
struct object_id blob_oid;
|
||||
if (oid_object_info(the_repository, &oid, NULL) == OBJ_TAG) {
|
||||
char *buffer = read_object_file(&oid, &type,
|
||||
&size);
|
||||
char *buffer = repo_read_object_file(the_repository,
|
||||
&oid,
|
||||
&type,
|
||||
&size);
|
||||
const char *target;
|
||||
if (!skip_prefix(buffer, "object ", &target) ||
|
||||
get_oid_hex(target, &blob_oid))
|
||||
@ -383,9 +386,10 @@ static void print_object_or_die(struct batch_options *opt, struct expand_data *d
|
||||
if (!textconv_object(the_repository,
|
||||
data->rest, 0100644, oid,
|
||||
1, &contents, &size))
|
||||
contents = read_object_file(oid,
|
||||
&type,
|
||||
&size);
|
||||
contents = repo_read_object_file(the_repository,
|
||||
oid,
|
||||
&type,
|
||||
&size);
|
||||
if (!contents)
|
||||
die("could not convert '%s' %s",
|
||||
oid_to_hex(oid), data->rest);
|
||||
@ -402,7 +406,8 @@ static void print_object_or_die(struct batch_options *opt, struct expand_data *d
|
||||
unsigned long size;
|
||||
void *contents;
|
||||
|
||||
contents = read_object_file(oid, &type, &size);
|
||||
contents = repo_read_object_file(the_repository, oid, &type,
|
||||
&size);
|
||||
|
||||
if (use_mailmap) {
|
||||
size_t s = size;
|
||||
|
@ -547,9 +547,9 @@ static void write_followtags(const struct ref *refs, const char *msg)
|
||||
continue;
|
||||
if (ends_with(ref->name, "^{}"))
|
||||
continue;
|
||||
if (!has_object_file_with_flags(&ref->old_oid,
|
||||
OBJECT_INFO_QUICK |
|
||||
OBJECT_INFO_SKIP_FETCH_OBJECT))
|
||||
if (!repo_has_object_file_with_flags(the_repository, &ref->old_oid,
|
||||
OBJECT_INFO_QUICK |
|
||||
OBJECT_INFO_SKIP_FETCH_OBJECT))
|
||||
continue;
|
||||
update_ref(msg, ref->name, &ref->old_oid, NULL, 0,
|
||||
UPDATE_REFS_DIE_ON_ERR);
|
||||
|
@ -295,7 +295,8 @@ static char *get_symlink(const struct object_id *oid, const char *path)
|
||||
} else {
|
||||
enum object_type type;
|
||||
unsigned long size;
|
||||
data = read_object_file(oid, &type, &size);
|
||||
data = repo_read_object_file(the_repository, oid, &type,
|
||||
&size);
|
||||
if (!data)
|
||||
die(_("could not read object %s for symlink %s"),
|
||||
oid_to_hex(oid), path);
|
||||
|
@ -296,7 +296,7 @@ static void export_blob(const struct object_id *oid)
|
||||
object = (struct object *)lookup_blob(the_repository, oid);
|
||||
eaten = 0;
|
||||
} else {
|
||||
buf = read_object_file(oid, &type, &size);
|
||||
buf = repo_read_object_file(the_repository, oid, &type, &size);
|
||||
if (!buf)
|
||||
die("could not read blob %s", oid_to_hex(oid));
|
||||
if (check_object_signature(the_repository, oid, buf, size,
|
||||
@ -766,7 +766,8 @@ static void handle_tag(const char *name, struct tag *tag)
|
||||
return;
|
||||
}
|
||||
|
||||
buf = read_object_file(&tag->object.oid, &type, &size);
|
||||
buf = repo_read_object_file(the_repository, &tag->object.oid, &type,
|
||||
&size);
|
||||
if (!buf)
|
||||
die("could not read tag %s", oid_to_hex(&tag->object.oid));
|
||||
message = memmem(buf, size, "\n\n", 2);
|
||||
|
@ -1265,7 +1265,7 @@ static void load_tree(struct tree_entry *root)
|
||||
die("Can't load tree %s", oid_to_hex(oid));
|
||||
} else {
|
||||
enum object_type type;
|
||||
buf = read_object_file(oid, &type, &size);
|
||||
buf = repo_read_object_file(the_repository, oid, &type, &size);
|
||||
if (!buf || type != OBJ_TREE)
|
||||
die("Can't load tree %s", oid_to_hex(oid));
|
||||
}
|
||||
@ -2936,7 +2936,7 @@ static void cat_blob(struct object_entry *oe, struct object_id *oid)
|
||||
char *buf;
|
||||
|
||||
if (!oe || oe->pack_id == MAX_PACK_ID) {
|
||||
buf = read_object_file(oid, &type, &size);
|
||||
buf = repo_read_object_file(the_repository, oid, &type, &size);
|
||||
} else {
|
||||
type = oe->type;
|
||||
buf = gfi_unpack_entry(oe, &size);
|
||||
@ -3044,7 +3044,8 @@ static struct object_entry *dereference(struct object_entry *oe,
|
||||
buf = gfi_unpack_entry(oe, &size);
|
||||
} else {
|
||||
enum object_type unused;
|
||||
buf = read_object_file(oid, &unused, &size);
|
||||
buf = repo_read_object_file(the_repository, oid, &unused,
|
||||
&size);
|
||||
}
|
||||
if (!buf)
|
||||
die("Can't load object %s", oid_to_hex(oid));
|
||||
|
@ -407,9 +407,9 @@ static void find_non_local_tags(const struct ref *refs,
|
||||
*/
|
||||
if (ends_with(ref->name, "^{}")) {
|
||||
if (item &&
|
||||
!has_object_file_with_flags(&ref->old_oid, quick_flags) &&
|
||||
!repo_has_object_file_with_flags(the_repository, &ref->old_oid, quick_flags) &&
|
||||
!oidset_contains(&fetch_oids, &ref->old_oid) &&
|
||||
!has_object_file_with_flags(&item->oid, quick_flags) &&
|
||||
!repo_has_object_file_with_flags(the_repository, &item->oid, quick_flags) &&
|
||||
!oidset_contains(&fetch_oids, &item->oid))
|
||||
clear_item(item);
|
||||
item = NULL;
|
||||
@ -423,7 +423,7 @@ static void find_non_local_tags(const struct ref *refs,
|
||||
* fetch.
|
||||
*/
|
||||
if (item &&
|
||||
!has_object_file_with_flags(&item->oid, quick_flags) &&
|
||||
!repo_has_object_file_with_flags(the_repository, &item->oid, quick_flags) &&
|
||||
!oidset_contains(&fetch_oids, &item->oid))
|
||||
clear_item(item);
|
||||
|
||||
@ -444,7 +444,7 @@ static void find_non_local_tags(const struct ref *refs,
|
||||
* checked to see if it needs fetching.
|
||||
*/
|
||||
if (item &&
|
||||
!has_object_file_with_flags(&item->oid, quick_flags) &&
|
||||
!repo_has_object_file_with_flags(the_repository, &item->oid, quick_flags) &&
|
||||
!oidset_contains(&fetch_oids, &item->oid))
|
||||
clear_item(item);
|
||||
|
||||
@ -1320,8 +1320,8 @@ static int check_exist_and_connected(struct ref *ref_map)
|
||||
* we need all direct targets to exist.
|
||||
*/
|
||||
for (r = rm; r; r = r->next) {
|
||||
if (!has_object_file_with_flags(&r->old_oid,
|
||||
OBJECT_INFO_SKIP_FETCH_OBJECT))
|
||||
if (!repo_has_object_file_with_flags(the_repository, &r->old_oid,
|
||||
OBJECT_INFO_SKIP_FETCH_OBJECT))
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -560,7 +560,8 @@ static int grep_cache(struct grep_opt *opt,
|
||||
void *data;
|
||||
unsigned long size;
|
||||
|
||||
data = read_object_file(&ce->oid, &type, &size);
|
||||
data = repo_read_object_file(the_repository, &ce->oid,
|
||||
&type, &size);
|
||||
init_tree_desc(&tree, data, size);
|
||||
|
||||
hit |= grep_tree(opt, pathspec, &tree, &name, 0, 0);
|
||||
@ -650,7 +651,8 @@ static int grep_tree(struct grep_opt *opt, const struct pathspec *pathspec,
|
||||
void *data;
|
||||
unsigned long size;
|
||||
|
||||
data = read_object_file(&entry.oid, &type, &size);
|
||||
data = repo_read_object_file(the_repository,
|
||||
&entry.oid, &type, &size);
|
||||
if (!data)
|
||||
die(_("unable to read tree (%s)"),
|
||||
oid_to_hex(&entry.oid));
|
||||
|
@ -801,7 +801,8 @@ static void sha1_object(const void *data, struct object_entry *obj_entry,
|
||||
if (startup_info->have_repository) {
|
||||
read_lock();
|
||||
collision_test_needed =
|
||||
has_object_file_with_flags(oid, OBJECT_INFO_QUICK);
|
||||
repo_has_object_file_with_flags(the_repository, oid,
|
||||
OBJECT_INFO_QUICK);
|
||||
read_unlock();
|
||||
}
|
||||
|
||||
@ -821,7 +822,8 @@ static void sha1_object(const void *data, struct object_entry *obj_entry,
|
||||
die(_("cannot read existing object info %s"), oid_to_hex(oid));
|
||||
if (has_type != type || has_size != size)
|
||||
die(_("SHA1 COLLISION FOUND WITH %s !"), oid_to_hex(oid));
|
||||
has_data = read_object_file(oid, &has_type, &has_size);
|
||||
has_data = repo_read_object_file(the_repository, oid,
|
||||
&has_type, &has_size);
|
||||
read_unlock();
|
||||
if (!data)
|
||||
data = new_data = get_data_from_pack(obj_entry);
|
||||
@ -1414,7 +1416,8 @@ static void fix_unresolved_deltas(struct hashfile *f)
|
||||
|
||||
if (objects[d->obj_no].real_type != OBJ_REF_DELTA)
|
||||
continue;
|
||||
data = read_object_file(&d->oid, &type, &size);
|
||||
data = repo_read_object_file(the_repository, &d->oid, &type,
|
||||
&size);
|
||||
if (!data)
|
||||
continue;
|
||||
|
||||
|
@ -675,7 +675,7 @@ static int show_tag_object(const struct object_id *oid, struct rev_info *rev)
|
||||
{
|
||||
unsigned long size;
|
||||
enum object_type type;
|
||||
char *buf = read_object_file(oid, &type, &size);
|
||||
char *buf = repo_read_object_file(the_repository, oid, &type, &size);
|
||||
int offset = 0;
|
||||
|
||||
if (!buf)
|
||||
|
@ -69,7 +69,9 @@ static void *result(struct merge_list *entry, unsigned long *size)
|
||||
const char *path = entry->path;
|
||||
|
||||
if (!entry->stage)
|
||||
return read_object_file(&entry->blob->object.oid, &type, size);
|
||||
return repo_read_object_file(the_repository,
|
||||
&entry->blob->object.oid, &type,
|
||||
size);
|
||||
base = NULL;
|
||||
if (entry->stage == 1) {
|
||||
base = entry->blob;
|
||||
@ -92,8 +94,9 @@ static void *origin(struct merge_list *entry, unsigned long *size)
|
||||
enum object_type type;
|
||||
while (entry) {
|
||||
if (entry->stage == 2)
|
||||
return read_object_file(&entry->blob->object.oid,
|
||||
&type, size);
|
||||
return repo_read_object_file(the_repository,
|
||||
&entry->blob->object.oid,
|
||||
&type, size);
|
||||
entry = entry->link;
|
||||
}
|
||||
return NULL;
|
||||
|
@ -51,7 +51,8 @@ static int verify_object_in_tag(struct object_id *tagged_oid, int *tagged_type)
|
||||
void *buffer;
|
||||
const struct object_id *repl;
|
||||
|
||||
buffer = read_object_file(tagged_oid, &type, &size);
|
||||
buffer = repo_read_object_file(the_repository, tagged_oid, &type,
|
||||
&size);
|
||||
if (!buffer)
|
||||
die(_("could not read tagged object '%s'"),
|
||||
oid_to_hex(tagged_oid));
|
||||
|
@ -124,7 +124,7 @@ static void copy_obj_to_fd(int fd, const struct object_id *oid)
|
||||
{
|
||||
unsigned long size;
|
||||
enum object_type type;
|
||||
char *buf = read_object_file(oid, &type, &size);
|
||||
char *buf = repo_read_object_file(the_repository, oid, &type, &size);
|
||||
if (buf) {
|
||||
if (size)
|
||||
write_or_die(fd, buf, size);
|
||||
@ -259,7 +259,7 @@ static int parse_reuse_arg(const struct option *opt, const char *arg, int unset)
|
||||
|
||||
if (repo_get_oid(the_repository, arg, &object))
|
||||
die(_("failed to resolve '%s' as a valid ref."), arg);
|
||||
if (!(buf = read_object_file(&object, &type, &len)))
|
||||
if (!(buf = repo_read_object_file(the_repository, &object, &type, &len)))
|
||||
die(_("failed to read object '%s'."), arg);
|
||||
if (type != OBJ_BLOB) {
|
||||
free(buf);
|
||||
@ -616,7 +616,8 @@ static int append_edit(int argc, const char **argv, const char *prefix)
|
||||
/* Append buf to previous note contents */
|
||||
unsigned long size;
|
||||
enum object_type type;
|
||||
char *prev_buf = read_object_file(note, &type, &size);
|
||||
char *prev_buf = repo_read_object_file(the_repository, note,
|
||||
&type, &size);
|
||||
|
||||
strbuf_grow(&d.buf, size + 1);
|
||||
if (d.buf.len && prev_buf && size)
|
||||
|
@ -288,11 +288,13 @@ static void *get_delta(struct object_entry *entry)
|
||||
void *buf, *base_buf, *delta_buf;
|
||||
enum object_type type;
|
||||
|
||||
buf = read_object_file(&entry->idx.oid, &type, &size);
|
||||
buf = repo_read_object_file(the_repository, &entry->idx.oid, &type,
|
||||
&size);
|
||||
if (!buf)
|
||||
die(_("unable to read %s"), oid_to_hex(&entry->idx.oid));
|
||||
base_buf = read_object_file(&DELTA(entry)->idx.oid, &type,
|
||||
&base_size);
|
||||
base_buf = repo_read_object_file(the_repository,
|
||||
&DELTA(entry)->idx.oid, &type,
|
||||
&base_size);
|
||||
if (!base_buf)
|
||||
die("unable to read %s",
|
||||
oid_to_hex(&DELTA(entry)->idx.oid));
|
||||
@ -454,7 +456,9 @@ static unsigned long write_no_reuse_object(struct hashfile *f, struct object_ent
|
||||
&size, NULL)) != NULL)
|
||||
buf = NULL;
|
||||
else {
|
||||
buf = read_object_file(&entry->idx.oid, &type, &size);
|
||||
buf = repo_read_object_file(the_repository,
|
||||
&entry->idx.oid, &type,
|
||||
&size);
|
||||
if (!buf)
|
||||
die(_("unable to read %s"),
|
||||
oid_to_hex(&entry->idx.oid));
|
||||
@ -1665,7 +1669,7 @@ static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid)
|
||||
/* Did not find one. Either we got a bogus request or
|
||||
* we need to read and perhaps cache.
|
||||
*/
|
||||
data = read_object_file(oid, &type, &size);
|
||||
data = repo_read_object_file(the_repository, oid, &type, &size);
|
||||
if (!data)
|
||||
return NULL;
|
||||
if (type != OBJ_TREE) {
|
||||
@ -2525,7 +2529,9 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
|
||||
/* Load data if not already done */
|
||||
if (!trg->data) {
|
||||
packing_data_lock(&to_pack);
|
||||
trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz);
|
||||
trg->data = repo_read_object_file(the_repository,
|
||||
&trg_entry->idx.oid, &type,
|
||||
&sz);
|
||||
packing_data_unlock(&to_pack);
|
||||
if (!trg->data)
|
||||
die(_("object %s cannot be read"),
|
||||
@ -2538,7 +2544,9 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
|
||||
}
|
||||
if (!src->data) {
|
||||
packing_data_lock(&to_pack);
|
||||
src->data = read_object_file(&src_entry->idx.oid, &type, &sz);
|
||||
src->data = repo_read_object_file(the_repository,
|
||||
&src_entry->idx.oid, &type,
|
||||
&sz);
|
||||
packing_data_unlock(&to_pack);
|
||||
if (!src->data) {
|
||||
if (src_entry->preferred_base) {
|
||||
|
@ -1494,7 +1494,7 @@ static const char *update(struct command *cmd, struct shallow_info *si)
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_null_oid(new_oid) && !has_object_file(new_oid)) {
|
||||
if (!is_null_oid(new_oid) && !repo_has_object_file(the_repository, new_oid)) {
|
||||
error("unpack should have generated %s, "
|
||||
"but I can't find it!", oid_to_hex(new_oid));
|
||||
ret = "bad pack";
|
||||
|
@ -443,7 +443,7 @@ static int get_push_ref_states(const struct ref *remote_refs,
|
||||
info->status = PUSH_STATUS_UPTODATE;
|
||||
else if (is_null_oid(&ref->old_oid))
|
||||
info->status = PUSH_STATUS_CREATE;
|
||||
else if (has_object_file(&ref->old_oid) &&
|
||||
else if (repo_has_object_file(the_repository, &ref->old_oid) &&
|
||||
ref_newer(&ref->new_oid, &ref->old_oid))
|
||||
info->status = PUSH_STATUS_FASTFORWARD;
|
||||
else
|
||||
|
@ -26,7 +26,7 @@ static void show_one(const char *refname, const struct object_id *oid)
|
||||
const char *hex;
|
||||
struct object_id peeled;
|
||||
|
||||
if (!has_object_file(oid))
|
||||
if (!repo_has_object_file(the_repository, oid))
|
||||
die("git show-ref: bad ref %s (%s)", refname,
|
||||
oid_to_hex(oid));
|
||||
|
||||
|
@ -215,7 +215,7 @@ static void write_tag_body(int fd, const struct object_id *oid)
|
||||
struct strbuf payload = STRBUF_INIT;
|
||||
struct strbuf signature = STRBUF_INIT;
|
||||
|
||||
orig = buf = read_object_file(oid, &type, &size);
|
||||
orig = buf = repo_read_object_file(the_repository, oid, &type, &size);
|
||||
if (!buf)
|
||||
return;
|
||||
if (parse_signature(buf, size, &payload, &signature)) {
|
||||
@ -366,7 +366,7 @@ static void create_reflog_msg(const struct object_id *oid, struct strbuf *sb)
|
||||
strbuf_addstr(sb, "object of unknown type");
|
||||
break;
|
||||
case OBJ_COMMIT:
|
||||
if ((buf = read_object_file(oid, &type, &size))) {
|
||||
if ((buf = repo_read_object_file(the_repository, oid, &type, &size))) {
|
||||
subject_len = find_commit_subject(buf, &subject_start);
|
||||
strbuf_insert(sb, sb->len, subject_start, subject_len);
|
||||
} else {
|
||||
|
@ -10,7 +10,7 @@ static char *create_temp_file(struct object_id *oid)
|
||||
unsigned long size;
|
||||
int fd;
|
||||
|
||||
buf = read_object_file(oid, &type, &size);
|
||||
buf = repo_read_object_file(the_repository, oid, &type, &size);
|
||||
if (!buf || type != OBJ_BLOB)
|
||||
die("unable to read blob object %s", oid_to_hex(oid));
|
||||
|
||||
|
@ -442,7 +442,7 @@ static void unpack_delta_entry(enum object_type type, unsigned long delta_size,
|
||||
delta_data = get_data(delta_size);
|
||||
if (!delta_data)
|
||||
return;
|
||||
if (has_object_file(&base_oid))
|
||||
if (repo_has_object_file(the_repository, &base_oid))
|
||||
; /* Ok we have this one */
|
||||
else if (resolve_against_held(nr, &base_oid,
|
||||
delta_data, delta_size))
|
||||
@ -508,7 +508,8 @@ static void unpack_delta_entry(enum object_type type, unsigned long delta_size,
|
||||
if (resolve_against_held(nr, &base_oid, delta_data, delta_size))
|
||||
return;
|
||||
|
||||
base = read_object_file(&base_oid, &type, &base_size);
|
||||
base = repo_read_object_file(the_repository, &base_oid, &type,
|
||||
&base_size);
|
||||
if (!base) {
|
||||
error("failed to read delta-pack base object %s",
|
||||
oid_to_hex(&base_oid));
|
||||
|
@ -124,7 +124,7 @@ static int already_written(struct bulk_checkin_packfile *state, struct object_id
|
||||
int i;
|
||||
|
||||
/* The object may already exist in the repository */
|
||||
if (has_object_file(oid))
|
||||
if (repo_has_object_file(the_repository, oid))
|
||||
return 1;
|
||||
|
||||
/* Might want to keep the list sorted */
|
||||
|
2
bundle.c
2
bundle.c
@ -293,7 +293,7 @@ static int is_tag_in_date_range(struct object *tag, struct rev_info *revs)
|
||||
if (revs->max_age == -1 && revs->min_age == -1)
|
||||
goto out;
|
||||
|
||||
buf = read_object_file(&tag->oid, &type, &size);
|
||||
buf = repo_read_object_file(the_repository, &tag->oid, &type, &size);
|
||||
if (!buf)
|
||||
goto out;
|
||||
line = memmem(buf, size, "\ntagger ", 8);
|
||||
|
@ -229,7 +229,7 @@ int cache_tree_fully_valid(struct cache_tree *it)
|
||||
int i;
|
||||
if (!it)
|
||||
return 0;
|
||||
if (it->entry_count < 0 || !has_object_file(&it->oid))
|
||||
if (it->entry_count < 0 || !repo_has_object_file(the_repository, &it->oid))
|
||||
return 0;
|
||||
for (i = 0; i < it->subtree_nr; i++) {
|
||||
if (!cache_tree_fully_valid(it->down[i]->cache_tree))
|
||||
@ -280,7 +280,7 @@ static int update_one(struct cache_tree *it,
|
||||
}
|
||||
}
|
||||
|
||||
if (0 <= it->entry_count && has_object_file(&it->oid))
|
||||
if (0 <= it->entry_count && repo_has_object_file(the_repository, &it->oid))
|
||||
return it->entry_count;
|
||||
|
||||
/*
|
||||
@ -386,7 +386,7 @@ static int update_one(struct cache_tree *it,
|
||||
ce_missing_ok = mode == S_IFGITLINK || missing_ok ||
|
||||
!must_check_existence(ce);
|
||||
if (is_null_oid(oid) ||
|
||||
(!ce_missing_ok && !has_object_file(oid))) {
|
||||
(!ce_missing_ok && !repo_has_object_file(the_repository, oid))) {
|
||||
strbuf_release(&buffer);
|
||||
if (expected_missing)
|
||||
return -1;
|
||||
@ -434,7 +434,7 @@ static int update_one(struct cache_tree *it,
|
||||
struct object_id oid;
|
||||
hash_object_file(the_hash_algo, buffer.buf, buffer.len,
|
||||
OBJ_TREE, &oid);
|
||||
if (has_object_file_with_flags(&oid, OBJECT_INFO_SKIP_FETCH_OBJECT))
|
||||
if (repo_has_object_file_with_flags(the_repository, &oid, OBJECT_INFO_SKIP_FETCH_OBJECT))
|
||||
oidcpy(&it->oid, &oid);
|
||||
else
|
||||
to_invalidate = 1;
|
||||
|
@ -332,7 +332,7 @@ static char *grab_blob(struct repository *r,
|
||||
*size = fill_textconv(r, textconv, df, &blob);
|
||||
free_filespec(df);
|
||||
} else {
|
||||
blob = read_object_file(oid, &type, size);
|
||||
blob = repo_read_object_file(the_repository, oid, &type, size);
|
||||
if (type != OBJ_BLOB)
|
||||
die("object '%s' is not a blob!", oid_to_hex(oid));
|
||||
}
|
||||
|
3
commit.c
3
commit.c
@ -1212,7 +1212,8 @@ static void handle_signed_tag(struct commit *parent, struct commit_extra_header
|
||||
desc = merge_remote_util(parent);
|
||||
if (!desc || !desc->obj)
|
||||
return;
|
||||
buf = read_object_file(&desc->obj->oid, &type, &size);
|
||||
buf = repo_read_object_file(the_repository, &desc->obj->oid, &type,
|
||||
&size);
|
||||
if (!buf || type != OBJ_TAG)
|
||||
goto free_return;
|
||||
if (!parse_signature(buf, size, &payload, &signature))
|
||||
|
@ -77,6 +77,16 @@
|
||||
|
|
||||
- diff_setup
|
||||
+ repo_diff_setup
|
||||
// object-store.h
|
||||
|
|
||||
- read_object_file
|
||||
+ repo_read_object_file
|
||||
|
|
||||
- has_object_file
|
||||
+ repo_has_object_file
|
||||
|
|
||||
- has_object_file_with_flags
|
||||
+ repo_has_object_file_with_flags
|
||||
)
|
||||
(
|
||||
+ the_repository,
|
||||
|
@ -5,17 +5,7 @@
|
||||
@@
|
||||
@@
|
||||
(
|
||||
// object-store.h
|
||||
- read_object_file
|
||||
+ repo_read_object_file
|
||||
|
|
||||
- has_object_file
|
||||
+ repo_has_object_file
|
||||
|
|
||||
- has_object_file_with_flags
|
||||
+ repo_has_object_file_with_flags
|
||||
// pretty.h
|
||||
|
|
||||
- format_commit_message
|
||||
+ repo_format_commit_message
|
||||
// packfile.h
|
||||
|
2
dir.c
2
dir.c
@ -267,7 +267,7 @@ static int do_read_blob(const struct object_id *oid, struct oid_stat *oid_stat,
|
||||
*size_out = 0;
|
||||
*data_out = NULL;
|
||||
|
||||
data = read_object_file(oid, &type, &sz);
|
||||
data = repo_read_object_file(the_repository, oid, &type, &sz);
|
||||
if (!data || type != OBJ_BLOB) {
|
||||
free(data);
|
||||
return -1;
|
||||
|
3
entry.c
3
entry.c
@ -86,7 +86,8 @@ void *read_blob_entry(const struct cache_entry *ce, size_t *size)
|
||||
{
|
||||
enum object_type type;
|
||||
unsigned long ul;
|
||||
void *blob_data = read_object_file(&ce->oid, &type, &ul);
|
||||
void *blob_data = repo_read_object_file(the_repository, &ce->oid,
|
||||
&type, &ul);
|
||||
|
||||
*size = ul;
|
||||
if (blob_data) {
|
||||
|
@ -762,9 +762,9 @@ static void mark_complete_and_common_ref(struct fetch_negotiator *negotiator,
|
||||
if (!commit) {
|
||||
struct object *o;
|
||||
|
||||
if (!has_object_file_with_flags(&ref->old_oid,
|
||||
OBJECT_INFO_QUICK |
|
||||
OBJECT_INFO_SKIP_FETCH_OBJECT))
|
||||
if (!repo_has_object_file_with_flags(the_repository, &ref->old_oid,
|
||||
OBJECT_INFO_QUICK |
|
||||
OBJECT_INFO_SKIP_FETCH_OBJECT))
|
||||
continue;
|
||||
o = parse_object(the_repository, &ref->old_oid);
|
||||
if (!o || o->type != OBJ_COMMIT)
|
||||
@ -1963,7 +1963,7 @@ static void update_shallow(struct fetch_pack_args *args,
|
||||
struct oid_array extra = OID_ARRAY_INIT;
|
||||
struct object_id *oid = si->shallow->oid;
|
||||
for (i = 0; i < si->shallow->nr; i++)
|
||||
if (has_object_file(&oid[i]))
|
||||
if (repo_has_object_file(the_repository, &oid[i]))
|
||||
oid_array_append(&extra, &oid[i]);
|
||||
if (extra.nr) {
|
||||
setup_alternate_shallow(&shallow_lock,
|
||||
|
@ -520,7 +520,8 @@ static void fmt_merge_msg_sigs(struct strbuf *out)
|
||||
struct object_id *oid = origins.items[i].util;
|
||||
enum object_type type;
|
||||
unsigned long size;
|
||||
char *buf = read_object_file(oid, &type, &size);
|
||||
char *buf = repo_read_object_file(the_repository, oid, &type,
|
||||
&size);
|
||||
char *origbuf = buf;
|
||||
unsigned long len = size;
|
||||
struct signature_check sigc = { NULL };
|
||||
|
2
fsck.c
2
fsck.c
@ -1332,7 +1332,7 @@ static int fsck_blobs(struct oidset *blobs_found, struct oidset *blobs_done,
|
||||
if (oidset_contains(blobs_done, oid))
|
||||
continue;
|
||||
|
||||
buf = read_object_file(oid, &type, &size);
|
||||
buf = repo_read_object_file(the_repository, oid, &type, &size);
|
||||
if (!buf) {
|
||||
if (is_promisor_object(oid))
|
||||
continue;
|
||||
|
11
http-push.c
11
http-push.c
@ -362,7 +362,8 @@ static void start_put(struct transfer_request *request)
|
||||
ssize_t size;
|
||||
git_zstream stream;
|
||||
|
||||
unpacked = read_object_file(&request->obj->oid, &type, &len);
|
||||
unpacked = repo_read_object_file(the_repository, &request->obj->oid,
|
||||
&type, &len);
|
||||
hdrlen = format_object_header(hdr, sizeof(hdr), type, len);
|
||||
|
||||
/* Set it up */
|
||||
@ -1427,7 +1428,7 @@ static void one_remote_ref(const char *refname)
|
||||
* Fetch a copy of the object if it doesn't exist locally - it
|
||||
* may be required for updating server info later.
|
||||
*/
|
||||
if (repo->can_update_info_refs && !has_object_file(&ref->old_oid)) {
|
||||
if (repo->can_update_info_refs && !repo_has_object_file(the_repository, &ref->old_oid)) {
|
||||
obj = lookup_unknown_object(the_repository, &ref->old_oid);
|
||||
fprintf(stderr, " fetch %s for %s\n",
|
||||
oid_to_hex(&ref->old_oid), refname);
|
||||
@ -1628,14 +1629,14 @@ static int delete_remote_branch(const char *pattern, int force)
|
||||
return error("Remote HEAD symrefs too deep");
|
||||
if (is_null_oid(&head_oid))
|
||||
return error("Unable to resolve remote HEAD");
|
||||
if (!has_object_file(&head_oid))
|
||||
if (!repo_has_object_file(the_repository, &head_oid))
|
||||
return error("Remote HEAD resolves to object %s\nwhich does not exist locally, perhaps you need to fetch?", oid_to_hex(&head_oid));
|
||||
|
||||
/* Remote branch must resolve to a known object */
|
||||
if (is_null_oid(&remote_ref->old_oid))
|
||||
return error("Unable to resolve remote branch %s",
|
||||
remote_ref->name);
|
||||
if (!has_object_file(&remote_ref->old_oid))
|
||||
if (!repo_has_object_file(the_repository, &remote_ref->old_oid))
|
||||
return error("Remote branch %s resolves to object %s\nwhich does not exist locally, perhaps you need to fetch?", remote_ref->name, oid_to_hex(&remote_ref->old_oid));
|
||||
|
||||
/* Remote branch must be an ancestor of remote HEAD */
|
||||
@ -1855,7 +1856,7 @@ int cmd_main(int argc, const char **argv)
|
||||
if (!force_all &&
|
||||
!is_null_oid(&ref->old_oid) &&
|
||||
!ref->force) {
|
||||
if (!has_object_file(&ref->old_oid) ||
|
||||
if (!repo_has_object_file(the_repository, &ref->old_oid) ||
|
||||
!ref_newer(&ref->peer_ref->new_oid,
|
||||
&ref->old_oid)) {
|
||||
/*
|
||||
|
@ -135,7 +135,7 @@ static int fill_active_slot(struct walker *walker)
|
||||
list_for_each_safe(pos, tmp, head) {
|
||||
obj_req = list_entry(pos, struct object_request, node);
|
||||
if (obj_req->state == WAITING) {
|
||||
if (has_object_file(&obj_req->oid))
|
||||
if (repo_has_object_file(the_repository, &obj_req->oid))
|
||||
obj_req->state = COMPLETE;
|
||||
else {
|
||||
start_object_request(walker, obj_req);
|
||||
@ -492,7 +492,7 @@ static int fetch_object(struct walker *walker, unsigned char *hash)
|
||||
if (!obj_req)
|
||||
return error("Couldn't find request for %s in the queue", hex);
|
||||
|
||||
if (has_object_file(&obj_req->oid)) {
|
||||
if (repo_has_object_file(the_repository, &obj_req->oid)) {
|
||||
if (obj_req->req)
|
||||
abort_http_object_request(obj_req->req);
|
||||
abort_object_request(obj_req);
|
||||
|
@ -64,7 +64,7 @@ static void process_blob(struct traversal_context *ctx,
|
||||
* of missing objects.
|
||||
*/
|
||||
if (ctx->revs->exclude_promisor_objects &&
|
||||
!has_object_file(&obj->oid) &&
|
||||
!repo_has_object_file(the_repository, &obj->oid) &&
|
||||
is_promisor_object(&obj->oid))
|
||||
return;
|
||||
|
||||
|
@ -216,7 +216,7 @@ static int read_mailmap_blob(struct string_list *map, const char *name)
|
||||
if (repo_get_oid(the_repository, name, &oid) < 0)
|
||||
return 0;
|
||||
|
||||
buf = read_object_file(&oid, &type, &size);
|
||||
buf = repo_read_object_file(the_repository, &oid, &type, &size);
|
||||
if (!buf)
|
||||
return error("unable to read mailmap object at %s", name);
|
||||
if (type != OBJ_BLOB)
|
||||
|
@ -55,7 +55,7 @@ static void *fill_tree_desc_strict(struct tree_desc *desc,
|
||||
enum object_type type;
|
||||
unsigned long size;
|
||||
|
||||
buffer = read_object_file(hash, &type, &size);
|
||||
buffer = repo_read_object_file(the_repository, hash, &type, &size);
|
||||
if (!buffer)
|
||||
die("unable to read tree (%s)", oid_to_hex(hash));
|
||||
if (type != OBJ_TREE)
|
||||
@ -188,7 +188,7 @@ static int splice_tree(const struct object_id *oid1, const char *prefix,
|
||||
if (*subpath)
|
||||
subpath++;
|
||||
|
||||
buf = read_object_file(oid1, &type, &sz);
|
||||
buf = repo_read_object_file(the_repository, oid1, &type, &sz);
|
||||
if (!buf)
|
||||
die("cannot read tree %s", oid_to_hex(oid1));
|
||||
init_tree_desc(&desc, buf, sz);
|
||||
|
@ -12,7 +12,8 @@ static int fill_mmfile_blob(mmfile_t *f, struct blob *obj)
|
||||
unsigned long size;
|
||||
enum object_type type;
|
||||
|
||||
buf = read_object_file(&obj->object.oid, &type, &size);
|
||||
buf = repo_read_object_file(the_repository, &obj->object.oid, &type,
|
||||
&size);
|
||||
if (!buf)
|
||||
return -1;
|
||||
if (type != OBJ_BLOB) {
|
||||
@ -78,7 +79,8 @@ void *merge_blobs(struct index_state *istate, const char *path,
|
||||
return NULL;
|
||||
if (!our)
|
||||
our = their;
|
||||
return read_object_file(&our->object.oid, &type, size);
|
||||
return repo_read_object_file(the_repository, &our->object.oid,
|
||||
&type, size);
|
||||
}
|
||||
|
||||
if (fill_mmfile_blob(&f1, our) < 0)
|
||||
|
@ -3505,7 +3505,7 @@ static int read_oid_strbuf(struct merge_options *opt,
|
||||
void *buf;
|
||||
enum object_type type;
|
||||
unsigned long size;
|
||||
buf = read_object_file(oid, &type, &size);
|
||||
buf = repo_read_object_file(the_repository, oid, &type, &size);
|
||||
if (!buf)
|
||||
return err(opt, _("cannot read object %s"), oid_to_hex(oid));
|
||||
if (type != OBJ_BLOB) {
|
||||
|
@ -951,7 +951,8 @@ static int update_file_flags(struct merge_options *opt,
|
||||
goto update_index;
|
||||
}
|
||||
|
||||
buf = read_object_file(&contents->oid, &type, &size);
|
||||
buf = repo_read_object_file(the_repository, &contents->oid,
|
||||
&type, &size);
|
||||
if (!buf) {
|
||||
ret = err(opt, _("cannot read object %s '%s'"),
|
||||
oid_to_hex(&contents->oid), path);
|
||||
@ -3021,7 +3022,7 @@ static int read_oid_strbuf(struct merge_options *opt,
|
||||
void *buf;
|
||||
enum object_type type;
|
||||
unsigned long size;
|
||||
buf = read_object_file(oid, &type, &size);
|
||||
buf = repo_read_object_file(the_repository, oid, &type, &size);
|
||||
if (!buf)
|
||||
return err(opt, _("cannot read object %s"), oid_to_hex(oid));
|
||||
if (type != OBJ_BLOB) {
|
||||
|
@ -81,7 +81,7 @@ char *notes_cache_get(struct notes_cache *c, struct object_id *key_oid,
|
||||
value_oid = get_note(&c->tree, key_oid);
|
||||
if (!value_oid)
|
||||
return NULL;
|
||||
value = read_object_file(value_oid, &type, &size);
|
||||
value = repo_read_object_file(the_repository, value_oid, &type, &size);
|
||||
|
||||
*outsize = size;
|
||||
return value;
|
||||
|
@ -326,7 +326,7 @@ static void write_note_to_worktree(const struct object_id *obj,
|
||||
{
|
||||
enum object_type type;
|
||||
unsigned long size;
|
||||
void *buf = read_object_file(note, &type, &size);
|
||||
void *buf = repo_read_object_file(the_repository, note, &type, &size);
|
||||
|
||||
if (!buf)
|
||||
die("cannot read note %s for object %s",
|
||||
|
12
notes.c
12
notes.c
@ -786,7 +786,7 @@ static int prune_notes_helper(const struct object_id *object_oid,
|
||||
struct note_delete_list **l = (struct note_delete_list **) cb_data;
|
||||
struct note_delete_list *n;
|
||||
|
||||
if (has_object_file(object_oid))
|
||||
if (repo_has_object_file(the_repository, object_oid))
|
||||
return 0; /* nothing to do for this note */
|
||||
|
||||
/* failed to find object => prune this note */
|
||||
@ -807,13 +807,15 @@ int combine_notes_concatenate(struct object_id *cur_oid,
|
||||
|
||||
/* read in both note blob objects */
|
||||
if (!is_null_oid(new_oid))
|
||||
new_msg = read_object_file(new_oid, &new_type, &new_len);
|
||||
new_msg = repo_read_object_file(the_repository, new_oid,
|
||||
&new_type, &new_len);
|
||||
if (!new_msg || !new_len || new_type != OBJ_BLOB) {
|
||||
free(new_msg);
|
||||
return 0;
|
||||
}
|
||||
if (!is_null_oid(cur_oid))
|
||||
cur_msg = read_object_file(cur_oid, &cur_type, &cur_len);
|
||||
cur_msg = repo_read_object_file(the_repository, cur_oid,
|
||||
&cur_type, &cur_len);
|
||||
if (!cur_msg || !cur_len || cur_type != OBJ_BLOB) {
|
||||
free(cur_msg);
|
||||
free(new_msg);
|
||||
@ -869,7 +871,7 @@ static int string_list_add_note_lines(struct string_list *list,
|
||||
return 0;
|
||||
|
||||
/* read_sha1_file NUL-terminates */
|
||||
data = read_object_file(oid, &t, &len);
|
||||
data = repo_read_object_file(the_repository, oid, &t, &len);
|
||||
if (t != OBJ_BLOB || !data || !len) {
|
||||
free(data);
|
||||
return t != OBJ_BLOB || !data;
|
||||
@ -1264,7 +1266,7 @@ static void format_note(struct notes_tree *t, const struct object_id *object_oid
|
||||
if (!oid)
|
||||
return;
|
||||
|
||||
if (!(msg = read_object_file(oid, &type, &msglen)) || type != OBJ_BLOB) {
|
||||
if (!(msg = repo_read_object_file(the_repository, oid, &type, &msglen)) || type != OBJ_BLOB) {
|
||||
free(msg);
|
||||
return;
|
||||
}
|
||||
|
@ -1678,7 +1678,7 @@ int pretend_object_file(void *buf, unsigned long len, enum object_type type,
|
||||
struct cached_object *co;
|
||||
|
||||
hash_object_file(the_hash_algo, buf, len, type, oid);
|
||||
if (has_object_file_with_flags(oid, OBJECT_INFO_QUICK | OBJECT_INFO_SKIP_FETCH_OBJECT) ||
|
||||
if (repo_has_object_file_with_flags(the_repository, oid, OBJECT_INFO_QUICK | OBJECT_INFO_SKIP_FETCH_OBJECT) ||
|
||||
find_cached_object(oid))
|
||||
return 0;
|
||||
ALLOC_GROW(cached_objects, cached_object_nr + 1, cached_object_alloc);
|
||||
|
@ -245,9 +245,6 @@ void *repo_read_object_file(struct repository *r,
|
||||
const struct object_id *oid,
|
||||
enum object_type *type,
|
||||
unsigned long *size);
|
||||
#ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS
|
||||
#define read_object_file(oid, type, size) repo_read_object_file(the_repository, oid, type, size)
|
||||
#endif
|
||||
|
||||
/* Read and unpack an object file into memory, write memory to an object file */
|
||||
int oid_object_info(struct repository *r, const struct object_id *, unsigned long *);
|
||||
@ -324,10 +321,6 @@ int has_object(struct repository *r, const struct object_id *oid,
|
||||
int repo_has_object_file(struct repository *r, const struct object_id *oid);
|
||||
int repo_has_object_file_with_flags(struct repository *r,
|
||||
const struct object_id *oid, int flags);
|
||||
#ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS
|
||||
#define has_object_file(oid) repo_has_object_file(the_repository, oid)
|
||||
#define has_object_file_with_flags(oid, flags) repo_has_object_file_with_flags(the_repository, oid, flags)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Return true iff an alternate object database has a loose object
|
||||
|
@ -263,7 +263,7 @@ static int ce_compare_link(const struct cache_entry *ce, size_t expected_size)
|
||||
if (strbuf_readlink(&sb, ce->name, expected_size))
|
||||
return -1;
|
||||
|
||||
buffer = read_object_file(&ce->oid, &type, &size);
|
||||
buffer = repo_read_object_file(the_repository, &ce->oid, &type, &size);
|
||||
if (buffer) {
|
||||
if (size == sb.len)
|
||||
match = memcmp(buffer, sb.buf, size);
|
||||
@ -3553,7 +3553,8 @@ void *read_blob_data_from_index(struct index_state *istate,
|
||||
}
|
||||
if (pos < 0)
|
||||
return NULL;
|
||||
data = read_object_file(&istate->cache[pos]->oid, &type, &sz);
|
||||
data = repo_read_object_file(the_repository, &istate->cache[pos]->oid,
|
||||
&type, &sz);
|
||||
if (!data || type != OBJ_BLOB) {
|
||||
free(data);
|
||||
return NULL;
|
||||
|
5
reflog.c
5
reflog.c
@ -28,7 +28,8 @@ static int tree_is_complete(const struct object_id *oid)
|
||||
if (!tree->buffer) {
|
||||
enum object_type type;
|
||||
unsigned long size;
|
||||
void *data = read_object_file(oid, &type, &size);
|
||||
void *data = repo_read_object_file(the_repository, oid, &type,
|
||||
&size);
|
||||
if (!data) {
|
||||
tree->object.flags |= INCOMPLETE;
|
||||
return 0;
|
||||
@ -39,7 +40,7 @@ static int tree_is_complete(const struct object_id *oid)
|
||||
init_tree_desc(&desc, tree->buffer, tree->size);
|
||||
complete = 1;
|
||||
while (tree_entry(&desc, &entry)) {
|
||||
if (!has_object_file(&entry.oid) ||
|
||||
if (!repo_has_object_file(the_repository, &entry.oid) ||
|
||||
(S_ISDIR(entry.mode) && !tree_is_complete(&entry.oid))) {
|
||||
tree->object.flags |= INCOMPLETE;
|
||||
complete = 0;
|
||||
|
2
remote.c
2
remote.c
@ -1759,7 +1759,7 @@ void set_ref_status_for_push(struct ref *remote_refs, int send_mirror,
|
||||
if (!reject_reason && !ref->deletion && !is_null_oid(&ref->old_oid)) {
|
||||
if (starts_with(ref->name, "refs/tags/"))
|
||||
reject_reason = REF_STATUS_REJECT_ALREADY_EXISTS;
|
||||
else if (!has_object_file(&ref->old_oid))
|
||||
else if (!repo_has_object_file(the_repository, &ref->old_oid))
|
||||
reject_reason = REF_STATUS_REJECT_FETCH_FIRST;
|
||||
else if (!lookup_commit_reference_gently(the_repository, &ref->old_oid, 1) ||
|
||||
!lookup_commit_reference_gently(the_repository, &ref->new_oid, 1))
|
||||
|
5
rerere.c
5
rerere.c
@ -965,8 +965,9 @@ static int handle_cache(struct index_state *istate,
|
||||
break;
|
||||
i = ce_stage(ce) - 1;
|
||||
if (!mmfile[i].ptr) {
|
||||
mmfile[i].ptr = read_object_file(&ce->oid, &type,
|
||||
&size);
|
||||
mmfile[i].ptr = repo_read_object_file(the_repository,
|
||||
&ce->oid, &type,
|
||||
&size);
|
||||
mmfile[i].size = size;
|
||||
}
|
||||
}
|
||||
|
@ -42,9 +42,9 @@ int option_parse_push_signed(const struct option *opt,
|
||||
static void feed_object(const struct object_id *oid, FILE *fh, int negative)
|
||||
{
|
||||
if (negative &&
|
||||
!has_object_file_with_flags(oid,
|
||||
OBJECT_INFO_SKIP_FETCH_OBJECT |
|
||||
OBJECT_INFO_QUICK))
|
||||
!repo_has_object_file_with_flags(the_repository, oid,
|
||||
OBJECT_INFO_SKIP_FETCH_OBJECT |
|
||||
OBJECT_INFO_QUICK))
|
||||
return;
|
||||
|
||||
if (negative)
|
||||
|
@ -301,7 +301,7 @@ static int write_one_shallow(const struct commit_graft *graft, void *cb_data)
|
||||
if (graft->nr_parent != -1)
|
||||
return 0;
|
||||
if (data->flags & QUICK) {
|
||||
if (!has_object_file(&graft->oid))
|
||||
if (!repo_has_object_file(the_repository, &graft->oid))
|
||||
return 0;
|
||||
} else if (data->flags & SEEN_ONLY) {
|
||||
struct commit *c = lookup_commit(the_repository, &graft->oid);
|
||||
@ -466,7 +466,7 @@ void prepare_shallow_info(struct shallow_info *info, struct oid_array *sa)
|
||||
ALLOC_ARRAY(info->ours, sa->nr);
|
||||
ALLOC_ARRAY(info->theirs, sa->nr);
|
||||
for (i = 0; i < sa->nr; i++) {
|
||||
if (has_object_file(sa->oid + i)) {
|
||||
if (repo_has_object_file(the_repository, sa->oid + i)) {
|
||||
struct commit_graft *graft;
|
||||
graft = lookup_commit_graft(the_repository,
|
||||
&sa->oid[i]);
|
||||
@ -494,7 +494,7 @@ void remove_nonexistent_theirs_shallow(struct shallow_info *info)
|
||||
for (i = dst = 0; i < info->nr_theirs; i++) {
|
||||
if (i != dst)
|
||||
info->theirs[dst] = info->theirs[i];
|
||||
if (has_object_file(oid + info->theirs[i]))
|
||||
if (repo_has_object_file(the_repository, oid + info->theirs[i]))
|
||||
dst++;
|
||||
}
|
||||
info->nr_theirs = dst;
|
||||
|
@ -588,7 +588,8 @@ static const struct submodule *config_from(struct submodule_cache *cache,
|
||||
if (submodule)
|
||||
goto out;
|
||||
|
||||
config = read_object_file(&oid, &type, &config_size);
|
||||
config = repo_read_object_file(the_repository, &oid, &type,
|
||||
&config_size);
|
||||
if (!config || type != OBJ_BLOB)
|
||||
goto out;
|
||||
|
||||
|
5
tag.c
5
tag.c
@ -54,7 +54,7 @@ int gpg_verify_tag(const struct object_id *oid, const char *name_to_report,
|
||||
repo_find_unique_abbrev(the_repository, oid, DEFAULT_ABBREV),
|
||||
type_name(type));
|
||||
|
||||
buf = read_object_file(oid, &type, &size);
|
||||
buf = repo_read_object_file(the_repository, oid, &type, &size);
|
||||
if (!buf)
|
||||
return error("%s: unable to read file.",
|
||||
name_to_report ?
|
||||
@ -216,7 +216,8 @@ int parse_tag(struct tag *item)
|
||||
|
||||
if (item->object.parsed)
|
||||
return 0;
|
||||
data = read_object_file(&item->object.oid, &type, &size);
|
||||
data = repo_read_object_file(the_repository, &item->object.oid, &type,
|
||||
&size);
|
||||
if (!data)
|
||||
return error("Could not read %s",
|
||||
oid_to_hex(&item->object.oid));
|
||||
|
3
tree.c
3
tree.c
@ -129,7 +129,8 @@ int parse_tree_gently(struct tree *item, int quiet_on_missing)
|
||||
|
||||
if (item->object.parsed)
|
||||
return 0;
|
||||
buffer = read_object_file(&item->object.oid, &type, &size);
|
||||
buffer = repo_read_object_file(the_repository, &item->object.oid,
|
||||
&type, &size);
|
||||
if (!buffer)
|
||||
return quiet_on_missing ? -1 :
|
||||
error("Could not read %s",
|
||||
|
@ -499,8 +499,8 @@ static int got_oid(struct upload_pack_data *data,
|
||||
{
|
||||
if (get_oid_hex(hex, oid))
|
||||
die("git upload-pack: expected SHA1 object, got '%s'", hex);
|
||||
if (!has_object_file_with_flags(oid,
|
||||
OBJECT_INFO_QUICK | OBJECT_INFO_SKIP_FETCH_OBJECT))
|
||||
if (!repo_has_object_file_with_flags(the_repository, oid,
|
||||
OBJECT_INFO_QUICK | OBJECT_INFO_SKIP_FETCH_OBJECT))
|
||||
return -1;
|
||||
return do_got_oid(data, oid);
|
||||
}
|
||||
@ -1600,8 +1600,8 @@ static int process_haves(struct upload_pack_data *data, struct oid_array *common
|
||||
for (i = 0; i < data->haves.nr; i++) {
|
||||
const struct object_id *oid = &data->haves.oid[i];
|
||||
|
||||
if (!has_object_file_with_flags(oid,
|
||||
OBJECT_INFO_QUICK | OBJECT_INFO_SKIP_FETCH_OBJECT))
|
||||
if (!repo_has_object_file_with_flags(the_repository, oid,
|
||||
OBJECT_INFO_QUICK | OBJECT_INFO_SKIP_FETCH_OBJECT))
|
||||
continue;
|
||||
|
||||
oid_array_append(common, oid);
|
||||
|
2
walker.c
2
walker.c
@ -145,7 +145,7 @@ static int process(struct walker *walker, struct object *obj)
|
||||
return 0;
|
||||
obj->flags |= SEEN;
|
||||
|
||||
if (has_object_file(&obj->oid)) {
|
||||
if (repo_has_object_file(the_repository, &obj->oid)) {
|
||||
/* We already have it, so we should scan it now. */
|
||||
obj->flags |= TO_SCAN;
|
||||
}
|
||||
|
@ -183,7 +183,7 @@ void read_mmblob(mmfile_t *ptr, const struct object_id *oid)
|
||||
return;
|
||||
}
|
||||
|
||||
ptr->ptr = read_object_file(oid, &type, &size);
|
||||
ptr->ptr = repo_read_object_file(the_repository, oid, &type, &size);
|
||||
if (!ptr->ptr || type != OBJ_BLOB)
|
||||
die("unable to read blob object %s", oid_to_hex(oid));
|
||||
ptr->size = size;
|
||||
|
Loading…
Reference in New Issue
Block a user