2005-04-28 22:46:33 +08:00
|
|
|
#include "cache.h"
|
2006-01-07 17:33:54 +08:00
|
|
|
#include "tag.h"
|
2007-02-27 03:56:00 +08:00
|
|
|
#include "commit.h"
|
|
|
|
#include "tree.h"
|
|
|
|
#include "blob.h"
|
2017-01-18 07:37:18 +08:00
|
|
|
#include "gpg-interface.h"
|
2005-04-28 22:46:33 +08:00
|
|
|
|
|
|
|
const char *tag_type = "tag";
|
|
|
|
|
2016-04-22 22:52:04 +08:00
|
|
|
static int run_gpg_verify(const char *buf, unsigned long size, unsigned flags)
|
|
|
|
{
|
|
|
|
struct signature_check sigc;
|
|
|
|
size_t payload_size;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
memset(&sigc, 0, sizeof(sigc));
|
|
|
|
|
|
|
|
payload_size = parse_signature(buf, size);
|
|
|
|
|
|
|
|
if (size == payload_size) {
|
|
|
|
if (flags & GPG_VERIFY_VERBOSE)
|
|
|
|
write_in_full(1, buf, payload_size);
|
|
|
|
return error("no signature found");
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = check_signature(buf, payload_size, buf + payload_size,
|
|
|
|
size - payload_size, &sigc);
|
2017-01-18 07:37:18 +08:00
|
|
|
|
|
|
|
if (!(flags & GPG_VERIFY_OMIT_STATUS))
|
|
|
|
print_signature_buffer(&sigc, flags);
|
2016-04-22 22:52:04 +08:00
|
|
|
|
|
|
|
signature_check_clear(&sigc);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-07-13 08:44:15 +08:00
|
|
|
int gpg_verify_tag(const struct object_id *oid, const char *name_to_report,
|
2016-04-22 22:52:04 +08:00
|
|
|
unsigned flags)
|
|
|
|
{
|
|
|
|
enum object_type type;
|
|
|
|
char *buf;
|
|
|
|
unsigned long size;
|
|
|
|
int ret;
|
|
|
|
|
sha1_file: convert sha1_object_info* to object_id
Convert sha1_object_info and sha1_object_info_extended to take pointers
to struct object_id and rename them to use "oid" instead of "sha1" in
their names. Update the declaration and definition and apply the
following semantic patch, plus the standard object_id transforms:
@@
expression E1, E2;
@@
- sha1_object_info(E1.hash, E2)
+ oid_object_info(&E1, E2)
@@
expression E1, E2;
@@
- sha1_object_info(E1->hash, E2)
+ oid_object_info(E1, E2)
@@
expression E1, E2, E3;
@@
- sha1_object_info_extended(E1.hash, E2, E3)
+ oid_object_info_extended(&E1, E2, E3)
@@
expression E1, E2, E3;
@@
- sha1_object_info_extended(E1->hash, E2, E3)
+ oid_object_info_extended(E1, E2, E3)
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-03-12 10:27:46 +08:00
|
|
|
type = oid_object_info(oid, NULL);
|
2016-04-22 22:52:04 +08:00
|
|
|
if (type != OBJ_TAG)
|
|
|
|
return error("%s: cannot verify a non-tag object of type %s.",
|
|
|
|
name_to_report ?
|
|
|
|
name_to_report :
|
2018-03-12 10:27:30 +08:00
|
|
|
find_unique_abbrev(oid, DEFAULT_ABBREV),
|
2018-02-15 02:59:24 +08:00
|
|
|
type_name(type));
|
2016-04-22 22:52:04 +08:00
|
|
|
|
sha1_file: convert read_sha1_file to struct object_id
Convert read_sha1_file to take a pointer to struct object_id and rename
it read_object_file. Do the same for read_sha1_file_extended.
Convert one use in grep.c to use the new function without any other code
change, since the pointer being passed is a void pointer that is already
initialized with a pointer to struct object_id. Update the declaration
and definitions of the modified functions, and apply the following
semantic patch to convert the remaining callers:
@@
expression E1, E2, E3;
@@
- read_sha1_file(E1.hash, E2, E3)
+ read_object_file(&E1, E2, E3)
@@
expression E1, E2, E3;
@@
- read_sha1_file(E1->hash, E2, E3)
+ read_object_file(E1, E2, E3)
@@
expression E1, E2, E3, E4;
@@
- read_sha1_file_extended(E1.hash, E2, E3, E4)
+ read_object_file_extended(&E1, E2, E3, E4)
@@
expression E1, E2, E3, E4;
@@
- read_sha1_file_extended(E1->hash, E2, E3, E4)
+ read_object_file_extended(E1, E2, E3, E4)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-03-12 10:27:53 +08:00
|
|
|
buf = read_object_file(oid, &type, &size);
|
2016-04-22 22:52:04 +08:00
|
|
|
if (!buf)
|
|
|
|
return error("%s: unable to read file.",
|
|
|
|
name_to_report ?
|
|
|
|
name_to_report :
|
2018-03-12 10:27:30 +08:00
|
|
|
find_unique_abbrev(oid, DEFAULT_ABBREV));
|
2016-04-22 22:52:04 +08:00
|
|
|
|
|
|
|
ret = run_gpg_verify(buf, size, flags);
|
|
|
|
|
|
|
|
free(buf);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-11-03 07:19:13 +08:00
|
|
|
struct object *deref_tag(struct object *o, const char *warn, int warnlen)
|
2005-08-05 15:47:56 +08:00
|
|
|
{
|
2006-07-12 11:45:31 +08:00
|
|
|
while (o && o->type == OBJ_TAG)
|
2008-02-18 15:31:55 +08:00
|
|
|
if (((struct tag *)o)->tagged)
|
object: convert parse_object* to take struct object_id
Make parse_object, parse_object_or_die, and parse_object_buffer take a
pointer to struct object_id. Remove the temporary variables inserted
earlier, since they are no longer necessary. Transform all of the
callers using the following semantic patch:
@@
expression E1;
@@
- parse_object(E1.hash)
+ parse_object(&E1)
@@
expression E1;
@@
- parse_object(E1->hash)
+ parse_object(E1)
@@
expression E1, E2;
@@
- parse_object_or_die(E1.hash, E2)
+ parse_object_or_die(&E1, E2)
@@
expression E1, E2;
@@
- parse_object_or_die(E1->hash, E2)
+ parse_object_or_die(E1, E2)
@@
expression E1, E2, E3, E4, E5;
@@
- parse_object_buffer(E1.hash, E2, E3, E4, E5)
+ parse_object_buffer(&E1, E2, E3, E4, E5)
@@
expression E1, E2, E3, E4, E5;
@@
- parse_object_buffer(E1->hash, E2, E3, E4, E5)
+ parse_object_buffer(E1, E2, E3, E4, E5)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-05-07 06:10:38 +08:00
|
|
|
o = parse_object(&((struct tag *)o)->tagged->oid);
|
2008-02-18 15:31:55 +08:00
|
|
|
else
|
|
|
|
o = NULL;
|
2005-11-03 07:19:13 +08:00
|
|
|
if (!o && warn) {
|
|
|
|
if (!warnlen)
|
|
|
|
warnlen = strlen(warn);
|
|
|
|
error("missing object referenced by '%.*s'", warnlen, warn);
|
|
|
|
}
|
2005-08-05 15:47:56 +08:00
|
|
|
return o;
|
|
|
|
}
|
|
|
|
|
upload-pack: avoid parsing tag destinations
When upload-pack advertises refs, it dereferences any tags
it sees, and shows the resulting sha1 to the client. It does
this by calling deref_tag. That function must load and parse
each tag object to find the sha1 of the tagged object.
However, it also ends up parsing the tagged object itself,
which is not strictly necessary for upload-pack's use.
Each tag produces two object loads (assuming it is not a
recursive tag), when it could get away with only a single
one. Dropping the second load halves the effort we spend.
The downside is that we are no longer verifying the
resulting object by loading it. In particular:
1. We never cross-check the "type" field given in the tag
object with the type of the pointed-to object. If the
tag says it points to a tag but doesn't, then we will
keep peeling and realize the error. If the tag says it
points to a non-tag but actually points to a tag, we
will stop peeling and just advertise the pointed-to
tag.
2. If we are missing the pointed-to object, we will not
realize (because we never even look it up in the object
db).
However, both of these are errors in the object database,
and both will be detected if a client actually requests the
broken objects in question. So we are simply pushing the
verification away from the advertising stage, and down to
the actual fetching stage.
On my test repo with 120K refs, this drops the time to
advertise the refs from ~3.2s to ~2.0s.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-07 03:18:01 +08:00
|
|
|
struct object *deref_tag_noverify(struct object *o)
|
|
|
|
{
|
|
|
|
while (o && o->type == OBJ_TAG) {
|
object: convert parse_object* to take struct object_id
Make parse_object, parse_object_or_die, and parse_object_buffer take a
pointer to struct object_id. Remove the temporary variables inserted
earlier, since they are no longer necessary. Transform all of the
callers using the following semantic patch:
@@
expression E1;
@@
- parse_object(E1.hash)
+ parse_object(&E1)
@@
expression E1;
@@
- parse_object(E1->hash)
+ parse_object(E1)
@@
expression E1, E2;
@@
- parse_object_or_die(E1.hash, E2)
+ parse_object_or_die(&E1, E2)
@@
expression E1, E2;
@@
- parse_object_or_die(E1->hash, E2)
+ parse_object_or_die(E1, E2)
@@
expression E1, E2, E3, E4, E5;
@@
- parse_object_buffer(E1.hash, E2, E3, E4, E5)
+ parse_object_buffer(&E1, E2, E3, E4, E5)
@@
expression E1, E2, E3, E4, E5;
@@
- parse_object_buffer(E1->hash, E2, E3, E4, E5)
+ parse_object_buffer(E1, E2, E3, E4, E5)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-05-07 06:10:38 +08:00
|
|
|
o = parse_object(&o->oid);
|
upload-pack: avoid parsing tag destinations
When upload-pack advertises refs, it dereferences any tags
it sees, and shows the resulting sha1 to the client. It does
this by calling deref_tag. That function must load and parse
each tag object to find the sha1 of the tagged object.
However, it also ends up parsing the tagged object itself,
which is not strictly necessary for upload-pack's use.
Each tag produces two object loads (assuming it is not a
recursive tag), when it could get away with only a single
one. Dropping the second load halves the effort we spend.
The downside is that we are no longer verifying the
resulting object by loading it. In particular:
1. We never cross-check the "type" field given in the tag
object with the type of the pointed-to object. If the
tag says it points to a tag but doesn't, then we will
keep peeling and realize the error. If the tag says it
points to a non-tag but actually points to a tag, we
will stop peeling and just advertise the pointed-to
tag.
2. If we are missing the pointed-to object, we will not
realize (because we never even look it up in the object
db).
However, both of these are errors in the object database,
and both will be detected if a client actually requests the
broken objects in question. So we are simply pushing the
verification away from the advertising stage, and down to
the actual fetching stage.
On my test repo with 120K refs, this drops the time to
advertise the refs from ~3.2s to ~2.0s.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-07 03:18:01 +08:00
|
|
|
if (o && o->type == OBJ_TAG && ((struct tag *)o)->tagged)
|
|
|
|
o = ((struct tag *)o)->tagged;
|
|
|
|
else
|
|
|
|
o = NULL;
|
|
|
|
}
|
|
|
|
return o;
|
|
|
|
}
|
|
|
|
|
2017-05-07 06:10:19 +08:00
|
|
|
struct tag *lookup_tag(const struct object_id *oid)
|
2005-04-28 22:46:33 +08:00
|
|
|
{
|
2017-05-07 06:10:19 +08:00
|
|
|
struct object *obj = lookup_object(oid->hash);
|
2007-04-17 13:11:43 +08:00
|
|
|
if (!obj)
|
2017-05-07 06:10:19 +08:00
|
|
|
return create_object(oid->hash, alloc_tag_node());
|
add object_as_type helper for casting objects
When we call lookup_commit, lookup_tree, etc, the logic goes
something like:
1. Look for an existing object struct. If we don't have
one, allocate and return a new one.
2. Double check that any object we have is the expected
type (and complain and return NULL otherwise).
3. Convert an object with type OBJ_NONE (from a prior
call to lookup_unknown_object) to the expected type.
We can encapsulate steps 2 and 3 in a helper function which
checks whether we have the expected object type, converts
OBJ_NONE as appropriate, and returns the object.
Not only does this shorten the code, but it also provides
one central location for converting OBJ_NONE objects into
objects of other types. Future patches will use that to
enforce type-specific invariants.
Since this is a refactoring, we would want it to behave
exactly as the current code. It takes a little reasoning to
see that this is the case:
- for lookup_{commit,tree,etc} functions, we are just
pulling steps 2 and 3 into a function that does the same
thing.
- for the call in peel_object, we currently only do step 3
(but we want to consolidate it with the others, as
mentioned above). However, step 2 is a noop here, as the
surrounding conditional makes sure we have OBJ_NONE
(which we want to keep to avoid an extraneous call to
sha1_object_info).
- for the call in lookup_commit_reference_gently, we are
currently doing step 2 but not step 3. However, step 3
is a noop here. The object we got will have just come
from deref_tag, which must have figured out the type for
each object in order to know when to stop peeling.
Therefore the type will never be OBJ_NONE.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-07-13 14:42:03 +08:00
|
|
|
return object_as_type(obj, OBJ_TAG, 0);
|
2005-04-28 22:46:33 +08:00
|
|
|
}
|
|
|
|
|
2017-04-27 03:29:31 +08:00
|
|
|
static timestamp_t parse_tag_date(const char *buf, const char *tail)
|
2010-04-13 07:25:28 +08:00
|
|
|
{
|
|
|
|
const char *dateptr;
|
|
|
|
|
|
|
|
while (buf < tail && *buf++ != '>')
|
|
|
|
/* nada */;
|
|
|
|
if (buf >= tail)
|
|
|
|
return 0;
|
|
|
|
dateptr = buf;
|
|
|
|
while (buf < tail && *buf++ != '\n')
|
|
|
|
/* nada */;
|
|
|
|
if (buf >= tail)
|
|
|
|
return 0;
|
2017-04-21 18:45:44 +08:00
|
|
|
/* dateptr < buf && buf[-1] == '\n', so parsing will stop at buf-1 */
|
|
|
|
return parse_timestamp(dateptr, NULL, 10);
|
2010-04-13 07:25:28 +08:00
|
|
|
}
|
|
|
|
|
2011-02-05 18:52:20 +08:00
|
|
|
int parse_tag_buffer(struct tag *item, const void *data, unsigned long size)
|
2005-04-28 22:46:33 +08:00
|
|
|
{
|
2017-05-07 06:10:02 +08:00
|
|
|
struct object_id oid;
|
2005-06-22 08:35:10 +08:00
|
|
|
char type[20];
|
2010-04-13 07:25:27 +08:00
|
|
|
const char *bufptr = data;
|
|
|
|
const char *tail = bufptr + size;
|
|
|
|
const char *nl;
|
2005-05-01 00:51:03 +08:00
|
|
|
|
2010-04-13 07:25:25 +08:00
|
|
|
if (item->object.parsed)
|
|
|
|
return 0;
|
|
|
|
item->object.parsed = 1;
|
2005-04-28 22:46:33 +08:00
|
|
|
|
2017-05-07 06:10:02 +08:00
|
|
|
if (size < GIT_SHA1_HEXSZ + 24)
|
2005-05-07 01:48:34 +08:00
|
|
|
return -1;
|
2017-05-07 06:10:02 +08:00
|
|
|
if (memcmp("object ", bufptr, 7) || parse_oid_hex(bufptr + 7, &oid, &bufptr) || *bufptr++ != '\n')
|
2005-05-07 01:48:34 +08:00
|
|
|
return -1;
|
2005-04-28 22:46:33 +08:00
|
|
|
|
2013-12-01 04:55:40 +08:00
|
|
|
if (!starts_with(bufptr, "type "))
|
2005-05-07 01:48:34 +08:00
|
|
|
return -1;
|
2010-04-13 07:25:27 +08:00
|
|
|
bufptr += 5;
|
|
|
|
nl = memchr(bufptr, '\n', tail - bufptr);
|
|
|
|
if (!nl || sizeof(type) <= (nl - bufptr))
|
2005-05-07 01:48:34 +08:00
|
|
|
return -1;
|
2015-09-25 05:08:26 +08:00
|
|
|
memcpy(type, bufptr, nl - bufptr);
|
2010-04-13 07:25:27 +08:00
|
|
|
type[nl - bufptr] = '\0';
|
|
|
|
bufptr = nl + 1;
|
2005-04-28 22:46:33 +08:00
|
|
|
|
2007-02-27 03:56:00 +08:00
|
|
|
if (!strcmp(type, blob_type)) {
|
2017-10-01 22:45:13 +08:00
|
|
|
item->tagged = (struct object *)lookup_blob(&oid);
|
2007-02-27 03:56:00 +08:00
|
|
|
} else if (!strcmp(type, tree_type)) {
|
2017-10-01 22:45:13 +08:00
|
|
|
item->tagged = (struct object *)lookup_tree(&oid);
|
2007-02-27 03:56:00 +08:00
|
|
|
} else if (!strcmp(type, commit_type)) {
|
2017-10-01 22:45:13 +08:00
|
|
|
item->tagged = (struct object *)lookup_commit(&oid);
|
2007-02-27 03:56:00 +08:00
|
|
|
} else if (!strcmp(type, tag_type)) {
|
2017-10-01 22:45:13 +08:00
|
|
|
item->tagged = (struct object *)lookup_tag(&oid);
|
2007-02-27 03:56:00 +08:00
|
|
|
} else {
|
|
|
|
error("Unknown type %s", type);
|
|
|
|
item->tagged = NULL;
|
|
|
|
}
|
|
|
|
|
2013-12-01 04:55:40 +08:00
|
|
|
if (bufptr + 4 < tail && starts_with(bufptr, "tag "))
|
2011-02-14 21:02:51 +08:00
|
|
|
; /* good */
|
|
|
|
else
|
2010-04-13 07:25:27 +08:00
|
|
|
return -1;
|
|
|
|
bufptr += 4;
|
|
|
|
nl = memchr(bufptr, '\n', tail - bufptr);
|
|
|
|
if (!nl)
|
|
|
|
return -1;
|
|
|
|
item->tag = xmemdupz(bufptr, nl - bufptr);
|
|
|
|
bufptr = nl + 1;
|
|
|
|
|
2013-12-01 04:55:40 +08:00
|
|
|
if (bufptr + 7 < tail && starts_with(bufptr, "tagger "))
|
2010-04-13 07:25:28 +08:00
|
|
|
item->date = parse_tag_date(bufptr, tail);
|
|
|
|
else
|
|
|
|
item->date = 0;
|
|
|
|
|
2005-04-28 22:46:33 +08:00
|
|
|
return 0;
|
2005-05-07 01:48:34 +08:00
|
|
|
}
|
2005-05-05 01:44:15 +08:00
|
|
|
|
2005-05-07 01:48:34 +08:00
|
|
|
int parse_tag(struct tag *item)
|
|
|
|
{
|
2007-02-27 03:55:59 +08:00
|
|
|
enum object_type type;
|
2005-05-07 01:48:34 +08:00
|
|
|
void *data;
|
|
|
|
unsigned long size;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (item->object.parsed)
|
|
|
|
return 0;
|
sha1_file: convert read_sha1_file to struct object_id
Convert read_sha1_file to take a pointer to struct object_id and rename
it read_object_file. Do the same for read_sha1_file_extended.
Convert one use in grep.c to use the new function without any other code
change, since the pointer being passed is a void pointer that is already
initialized with a pointer to struct object_id. Update the declaration
and definitions of the modified functions, and apply the following
semantic patch to convert the remaining callers:
@@
expression E1, E2, E3;
@@
- read_sha1_file(E1.hash, E2, E3)
+ read_object_file(&E1, E2, E3)
@@
expression E1, E2, E3;
@@
- read_sha1_file(E1->hash, E2, E3)
+ read_object_file(E1, E2, E3)
@@
expression E1, E2, E3, E4;
@@
- read_sha1_file_extended(E1.hash, E2, E3, E4)
+ read_object_file_extended(&E1, E2, E3, E4)
@@
expression E1, E2, E3, E4;
@@
- read_sha1_file_extended(E1->hash, E2, E3, E4)
+ read_object_file_extended(E1, E2, E3, E4)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-03-12 10:27:53 +08:00
|
|
|
data = read_object_file(&item->object.oid, &type, &size);
|
2005-05-07 01:48:34 +08:00
|
|
|
if (!data)
|
|
|
|
return error("Could not read %s",
|
2015-11-10 10:22:28 +08:00
|
|
|
oid_to_hex(&item->object.oid));
|
2007-02-27 03:55:59 +08:00
|
|
|
if (type != OBJ_TAG) {
|
2005-05-07 01:48:34 +08:00
|
|
|
free(data);
|
|
|
|
return error("Object %s not a tag",
|
2015-11-10 10:22:28 +08:00
|
|
|
oid_to_hex(&item->object.oid));
|
2005-05-07 01:48:34 +08:00
|
|
|
}
|
|
|
|
ret = parse_tag_buffer(item, data, size);
|
2005-05-05 01:44:15 +08:00
|
|
|
free(data);
|
2005-05-07 01:48:34 +08:00
|
|
|
return ret;
|
2005-04-28 22:46:33 +08:00
|
|
|
}
|