global: ensure that object IDs are always padded

The `oidcmp()` and `oideq()` functions only compare the prefix length as
specified by the given hash algorithm. This mandates that the object IDs
have a valid hash algorithm set, or otherwise we wouldn't be able to
figure out that prefix. As we do not have a hash algorithm in many
cases, for example when handling null object IDs, this assumption cannot
always be fulfilled. We thus have a fallback in place that instead uses
`the_repository` to derive the hash function. This implicit dependency
is hidden away from callers and can be quite surprising, especially in
contexts where there may be no repository.

In theory, we can adapt those functions to always memcmp(3P) the whole
length of their hash arrays. But there exist a couple of sites where we
populate `struct object_id`s such that only the prefix of its hash that
is actually used by the hash algorithm is populated. The remaining bytes
are left uninitialized. The fact that those bytes are uninitialized also
leads to warnings under Valgrind in some places where we copy those
bytes.

Refactor callsites where we populate object IDs to always initialize all
bytes. This also allows us to get rid of `oidcpy_with_padding()`, for
one because the input is now fully initialized, and because `oidcpy()`
will now always copy the whole hash array.

Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
This commit is contained in:
Patrick Steinhardt 2024-06-14 08:49:59 +02:00 committed by Junio C Hamano
parent 9da95bda74
commit c98d762ed9
8 changed files with 15 additions and 26 deletions

View File

@ -288,6 +288,8 @@ static inline void oidread(struct object_id *oid, const unsigned char *hash,
const struct git_hash_algo *algop)
{
memcpy(oid->hash, hash, algop->rawsz);
if (algop->rawsz < GIT_MAX_RAWSZ)
memset(oid->hash + algop->rawsz, 0, GIT_MAX_RAWSZ - algop->rawsz);
oid->algo = hash_algo_by_ptr(algop);
}

16
hash.h
View File

@ -31,22 +31,6 @@ static inline int is_null_oid(const struct object_id *oid)
return oideq(oid, null_oid());
}
/* Like oidcpy() but zero-pads the unused bytes in dst's hash array. */
static inline void oidcpy_with_padding(struct object_id *dst,
const struct object_id *src)
{
size_t hashsz;
if (!src->algo)
hashsz = the_hash_algo->rawsz;
else
hashsz = hash_algos[src->algo].rawsz;
memcpy(dst->hash, src->hash, hashsz);
memset(dst->hash + hashsz, 0, GIT_MAX_RAWSZ - hashsz);
dst->algo = src->algo;
}
static inline int is_empty_blob_oid(const struct object_id *oid)
{
return oideq(oid, the_hash_algo->empty_blob);

6
hex.c
View File

@ -25,8 +25,12 @@ int get_oid_hex_algop(const char *hex, struct object_id *oid,
const struct git_hash_algo *algop)
{
int ret = get_hash_hex_algop(hex, oid->hash, algop);
if (!ret)
if (!ret) {
oid_set_algo(oid, algop);
if (algop->rawsz != GIT_MAX_RAWSZ)
memset(oid->hash + algop->rawsz, 0,
GIT_MAX_RAWSZ - algop->rawsz);
}
return ret;
}

View File

@ -1016,6 +1016,7 @@ static void remote_ls(const char *path, int flags,
/* extract hex from sharded "xx/x{38}" filename */
static int get_oid_hex_from_objpath(const char *path, struct object_id *oid)
{
memset(oid->hash, 0, GIT_MAX_RAWSZ);
oid->algo = hash_algo_by_ptr(the_hash_algo);
if (strlen(path) != the_hash_algo->hexsz + 1)

View File

@ -427,6 +427,8 @@ static void load_subtree(struct notes_tree *t, struct leaf_node *subtree,
hashsz - prefix_len))
goto handle_non_note; /* entry.path is not a SHA1 */
memset(object_oid.hash + hashsz, 0, GIT_MAX_RAWSZ - hashsz);
type = PTR_TYPE_NOTE;
} else if (path_len == 2) {
/* This is potentially an internal node */

View File

@ -2743,6 +2743,8 @@ int for_each_file_in_obj_subdir(unsigned int subdir_nr,
!hex_to_bytes(oid.hash + 1, de->d_name,
the_hash_algo->rawsz - 1)) {
oid_set_algo(&oid, the_hash_algo);
memset(oid.hash + the_hash_algo->rawsz, 0,
GIT_MAX_RAWSZ - the_hash_algo->rawsz);
if (obj_cb) {
r = obj_cb(&oid, path->buf, data);
if (r)

View File

@ -42,7 +42,7 @@ void oidtree_insert(struct oidtree *ot, const struct object_id *oid)
* Clear the padding and copy the result in separate steps to
* respect the 4-byte alignment needed by struct object_id.
*/
oidcpy_with_padding(&k, oid);
oidcpy(&k, oid);
memcpy(on->k, &k, sizeof(k));
/*
@ -60,7 +60,7 @@ int oidtree_contains(struct oidtree *ot, const struct object_id *oid)
struct object_id k;
size_t klen = sizeof(k);
oidcpy_with_padding(&k, oid);
oidcpy(&k, oid);
if (oid->algo == GIT_HASH_UNKNOWN)
klen -= sizeof(oid->algo);

View File

@ -429,13 +429,7 @@ static void send_one_item(int fd, struct parallel_checkout_item *pc_item)
fixed_portion->ident = pc_item->ca.ident;
fixed_portion->name_len = name_len;
fixed_portion->working_tree_encoding_len = working_tree_encoding_len;
/*
* We pad the unused bytes in the hash array because, otherwise,
* Valgrind would complain about passing uninitialized bytes to a
* write() syscall. The warning doesn't represent any real risk here,
* but it could hinder the detection of actual errors.
*/
oidcpy_with_padding(&fixed_portion->oid, &pc_item->ce->oid);
oidcpy(&fixed_portion->oid, &pc_item->ce->oid);
variant = data + sizeof(*fixed_portion);
if (working_tree_encoding_len) {