2013-10-25 02:01:06 +08:00
|
|
|
#include "cache.h"
|
|
|
|
#include "object.h"
|
|
|
|
#include "pack.h"
|
|
|
|
#include "pack-objects.h"
|
2018-04-14 23:35:05 +08:00
|
|
|
#include "packfile.h"
|
|
|
|
#include "config.h"
|
2013-10-25 02:01:06 +08:00
|
|
|
|
|
|
|
static uint32_t locate_object_entry_hash(struct packing_data *pdata,
|
2019-06-20 15:41:07 +08:00
|
|
|
const struct object_id *oid,
|
2013-10-25 02:01:06 +08:00
|
|
|
int *found)
|
|
|
|
{
|
2014-07-03 06:20:20 +08:00
|
|
|
uint32_t i, mask = (pdata->index_size - 1);
|
2013-10-25 02:01:06 +08:00
|
|
|
|
2019-06-20 15:41:49 +08:00
|
|
|
i = oidhash(oid) & mask;
|
2013-10-25 02:01:06 +08:00
|
|
|
|
|
|
|
while (pdata->index[i] > 0) {
|
|
|
|
uint32_t pos = pdata->index[i] - 1;
|
|
|
|
|
2019-06-20 15:41:07 +08:00
|
|
|
if (oideq(oid, &pdata->objects[pos].idx.oid)) {
|
2013-10-25 02:01:06 +08:00
|
|
|
*found = 1;
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
|
|
|
i = (i + 1) & mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
*found = 0;
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t closest_pow2(uint32_t v)
|
|
|
|
{
|
|
|
|
v = v - 1;
|
|
|
|
v |= v >> 1;
|
|
|
|
v |= v >> 2;
|
|
|
|
v |= v >> 4;
|
|
|
|
v |= v >> 8;
|
|
|
|
v |= v >> 16;
|
|
|
|
return v + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rehash_objects(struct packing_data *pdata)
|
|
|
|
{
|
|
|
|
uint32_t i;
|
|
|
|
struct object_entry *entry;
|
|
|
|
|
|
|
|
pdata->index_size = closest_pow2(pdata->nr_objects * 3);
|
|
|
|
if (pdata->index_size < 1024)
|
|
|
|
pdata->index_size = 1024;
|
|
|
|
|
2014-06-01 19:07:21 +08:00
|
|
|
free(pdata->index);
|
2021-03-14 00:17:22 +08:00
|
|
|
CALLOC_ARRAY(pdata->index, pdata->index_size);
|
2013-10-25 02:01:06 +08:00
|
|
|
|
|
|
|
entry = pdata->objects;
|
|
|
|
|
|
|
|
for (i = 0; i < pdata->nr_objects; i++) {
|
|
|
|
int found;
|
2017-05-07 06:10:11 +08:00
|
|
|
uint32_t ix = locate_object_entry_hash(pdata,
|
2019-06-20 15:41:07 +08:00
|
|
|
&entry->idx.oid,
|
2017-05-07 06:10:11 +08:00
|
|
|
&found);
|
2013-10-25 02:01:06 +08:00
|
|
|
|
|
|
|
if (found)
|
2018-05-02 17:38:39 +08:00
|
|
|
BUG("Duplicate object in hash");
|
2013-10-25 02:01:06 +08:00
|
|
|
|
|
|
|
pdata->index[ix] = i + 1;
|
|
|
|
entry++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct object_entry *packlist_find(struct packing_data *pdata,
|
pack-objects: drop packlist index_pos optimization
Once upon a time, the code to add an object to our packing list in
pack-objects all lived in a single function. It computed the position
within the hash table once, then used it to check if the object was
already present, and if not, to add it.
Later, in 2834bc27c1 (pack-objects: refactor the packing list,
2013-10-24), this was split into two functions: packlist_find() and
packlist_alloc(). We ended up with an "index_pos" variable that gets
passed through several functions to make it from one to the other.
The resulting code is rather confusing to follow. The "index_pos"
variable is sometimes undefined, if we don't yet have a hash table. This
works out in practice because in that case packlist_alloc() won't use it
at all, since it will have to create/grow the hash table. But it's hard
to verify that, and it does cause gcc 9.2.1's -Wmaybe-uninitialized to
complain when compiled with "-flto -O3" (rightfully, since we do pass
the uninitialized value as a function parameter, even if nobody ends up
using it).
All of this is to save computing the hash index again when we're
inserting into the hash table, which I found doesn't make a measurable
difference in the program runtime (which is not surprising, since we're
doing all kinds of other heavyweight things for each object).
Let's just drop this index_pos variable entirely, simplifying the code
(and pleasing the compiler).
We might be better still refactoring this custom hash table to use one
of our existing implementations (an oidmap, or a kh_oid_map). I stopped
short of that here, but this would be the likely first step towards that
anyway.
Reported-by: Stephan Beyer <s-beyer@gmx.net>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-09-06 09:36:05 +08:00
|
|
|
const struct object_id *oid)
|
2013-10-25 02:01:06 +08:00
|
|
|
{
|
|
|
|
uint32_t i;
|
|
|
|
int found;
|
|
|
|
|
|
|
|
if (!pdata->index_size)
|
|
|
|
return NULL;
|
|
|
|
|
2019-06-20 15:41:07 +08:00
|
|
|
i = locate_object_entry_hash(pdata, oid, &found);
|
2013-10-25 02:01:06 +08:00
|
|
|
|
|
|
|
if (!found)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return &pdata->objects[pdata->index[i] - 1];
|
|
|
|
}
|
|
|
|
|
2018-04-14 23:35:05 +08:00
|
|
|
static void prepare_in_pack_by_idx(struct packing_data *pdata)
|
|
|
|
{
|
|
|
|
struct packed_git **mapping, *p;
|
|
|
|
int cnt = 0, nr = 1U << OE_IN_PACK_BITS;
|
|
|
|
|
|
|
|
ALLOC_ARRAY(mapping, nr);
|
|
|
|
/*
|
|
|
|
* oe_in_pack() on an all-zero'd object_entry
|
|
|
|
* (i.e. in_pack_idx also zero) should return NULL.
|
|
|
|
*/
|
|
|
|
mapping[cnt++] = NULL;
|
2018-11-10 13:49:08 +08:00
|
|
|
for (p = get_all_packs(pdata->repo); p; p = p->next, cnt++) {
|
2018-04-14 23:35:05 +08:00
|
|
|
if (cnt == nr) {
|
|
|
|
free(mapping);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
p->index = cnt;
|
|
|
|
mapping[cnt] = p;
|
|
|
|
}
|
|
|
|
pdata->in_pack_by_idx = mapping;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A new pack appears after prepare_in_pack_by_idx() has been
|
|
|
|
* run. This is likely a race.
|
|
|
|
*
|
|
|
|
* We could map this new pack to in_pack_by_idx[] array, but then we
|
|
|
|
* have to deal with full array anyway. And since it's hard to test
|
|
|
|
* this fall back code, just stay simple and fall back to using
|
|
|
|
* in_pack[] array.
|
|
|
|
*/
|
2019-02-14 13:50:32 +08:00
|
|
|
void oe_map_new_pack(struct packing_data *pack)
|
2018-04-14 23:35:05 +08:00
|
|
|
{
|
|
|
|
uint32_t i;
|
|
|
|
|
pack-objects: avoid pointless oe_map_new_pack() calls
This patch fixes an extreme slowdown in pack-objects when you have more
than 1023 packs. See below for numbers.
Since 43fa44fa3b (pack-objects: move in_pack out of struct object_entry,
2018-04-14), we use a complicated system to save some per-object memory.
Each object_entry structs gets a 10-bit field to store the index of the
pack it's in. We map those indices into pointers using
packing_data->in_pack_by_idx, which we initialize at the start of the
program. If we have 2^10 or more packs, then we instead create an array
of pack pointers, one per object. This is packing_data->in_pack.
So far so good. But there's one other tricky case: if a new pack arrives
after we've initialized in_pack_by_idx, it won't have an index yet. We
solve that by calling oe_map_new_pack(), which just switches on the fly
to the less-optimal in_pack mechanism, allocating the array and
back-filling it for already-seen objects.
But that logic kicks in even when we've switched to it already (whether
because we really did see a new pack, or because we had too many packs
in the first place). The result doesn't produce a wrong outcome, but
it's very slow. What happens is this:
- imagine you have a repo with 500k objects and 2000 packs that you
want to repack.
- before looking at any objects, we call prepare_in_pack_by_idx(). It
starts allocating an index for each pack. On the 1024th pack, it
sees there are too many, so it bails, leaving in_pack_by_idx as
NULL.
- while actually adding objects to the packing list, we call
oe_set_in_pack(), which checks whether the pack already has an
index. If it's one of the packs after the first 1023, then it
doesn't have one, and we'll call oe_map_new_pack().
But there's no useful work for that function to do. We're already
using in_pack, so it just uselessly walks over the complete list of
objects, trying to backfill in_pack.
And we end up doing this for almost 1000 packs (each of which may be
triggered by more than one object). And each time it triggers, we
may iterate over up to 500k objects. So in the absolute worst case,
this is quadratic in the number of objects.
The solution is simple: we don't need to bother checking whether the
pack has an index if we've already converted to using in_pack, since by
definition we're not going to use it. So we can just push the "does the
pack have a valid index" check down into that half of the conditional,
where we know we're going to use it.
The current test in p5303 sadly doesn't notice this problem, since it
maxes out at 1000 packs. If we add a new test to it at 2000 packs, it
does show the improvement:
Test HEAD^ HEAD
----------------------------------------------------------------------
5303.12: repack (2000) 26.72(39.68+0.67) 15.70(28.70+0.66) -41.2%
However, these many-pack test cases are rather expensive to run, so
adding larger and larger numbers isn't appealing. Instead, we can show
it off more easily by using GIT_TEST_FULL_IN_PACK_ARRAY, which forces us
into the absolute worst case: no pack has an index, so we'll trigger
oe_map_new_pack() pointlessly for every single object, making it truly
quadratic.
Here are the numbers (on git.git) with the included change to p5303:
Test HEAD^ HEAD
----------------------------------------------------------------------
5303.3: rev-list (1) 2.05(1.98+0.06) 2.06(1.99+0.06) +0.5%
5303.4: repack (1) 33.45(33.46+0.19) 2.75(2.73+0.22) -91.8%
5303.6: rev-list (50) 2.07(2.01+0.06) 2.06(2.01+0.05) -0.5%
5303.7: repack (50) 34.21(35.18+0.16) 3.49(4.50+0.12) -89.8%
5303.9: rev-list (1000) 2.87(2.78+0.08) 2.88(2.80+0.07) +0.3%
5303.10: repack (1000) 41.26(51.30+0.47) 10.75(20.75+0.44) -73.9%
Again, those improvements aren't realistic for the 1-pack case (because
in the real world, the full-array solution doesn't kick in), but it's
more useful to be testing the more-complicated code path.
While we're looking at this issue, we'll tweak one more thing: in
oe_map_new_pack(), we call REALLOC_ARRAY(pack->in_pack). But we'd never
expect to get here unless we're back-filling it for the first time, in
which case it would be NULL. So let's switch that to ALLOC_ARRAY() for
clarity, and add a BUG() to document the expectation. Unfortunately this
code isn't well-covered in the test suite because it's inherently racy
(it only kicks in if somebody else adds a new pack while we're in the
middle of repacking).
Signed-off-by: Jeff King <peff@peff.net>
Reviewed-by: Derrick Stolee <dstolee@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-11-11 19:12:49 +08:00
|
|
|
if (pack->in_pack)
|
|
|
|
BUG("packing_data has already been converted to pack array");
|
|
|
|
|
|
|
|
ALLOC_ARRAY(pack->in_pack, pack->nr_alloc);
|
2018-04-14 23:35:05 +08:00
|
|
|
|
|
|
|
for (i = 0; i < pack->nr_objects; i++)
|
|
|
|
pack->in_pack[i] = oe_in_pack(pack, pack->objects + i);
|
|
|
|
|
|
|
|
FREE_AND_NULL(pack->in_pack_by_idx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* assume pdata is already zero'd by caller */
|
2018-11-10 13:49:08 +08:00
|
|
|
void prepare_packing_data(struct repository *r, struct packing_data *pdata)
|
2018-04-14 23:35:05 +08:00
|
|
|
{
|
2018-11-10 13:49:08 +08:00
|
|
|
pdata->repo = r;
|
|
|
|
|
2018-04-14 23:35:05 +08:00
|
|
|
if (git_env_bool("GIT_TEST_FULL_IN_PACK_ARRAY", 0)) {
|
|
|
|
/*
|
|
|
|
* do not initialize in_pack_by_idx[] to force the
|
|
|
|
* slow path in oe_in_pack()
|
|
|
|
*/
|
|
|
|
} else {
|
|
|
|
prepare_in_pack_by_idx(pdata);
|
|
|
|
}
|
2018-04-14 23:35:10 +08:00
|
|
|
|
|
|
|
pdata->oe_size_limit = git_env_ulong("GIT_TEST_OE_SIZE",
|
|
|
|
1U << OE_SIZE_BITS);
|
pack-objects: fix performance issues on packing large deltas
Let's start with some background about oe_delta_size() and
oe_set_delta_size(). If you already know, skip the next paragraph.
These two are added in 0aca34e826 (pack-objects: shrink delta_size
field in struct object_entry - 2018-04-14) to help reduce 'struct
object_entry' size. The delta size field in this struct is reduced to
only contain max 1MB. So if any new delta is produced and larger than
1MB, it's dropped because we can't really save such a large size
anywhere. Fallback is provided in case existing packfiles already have
large deltas, then we can retrieve it from the pack.
While this should help small machines repacking large repos without
large deltas (i.e. less memory pressure), dropping large deltas during
the delta selection process could end up with worse pack files. And if
existing packfiles already have >1MB delta and pack-objects is
instructed to not reuse deltas, all of them will be dropped on the
floor, and the resulting pack would be definitely bigger.
There is also a regression in terms of CPU/IO if we have large on-disk
deltas because fallback code needs to parse the pack every time the
delta size is needed and just access to the mmap'd pack data is enough
for extra page faults when memory is under pressure.
Both of these issues were reported on the mailing list. Here's some
numbers for comparison.
Version Pack (MB) MaxRSS(kB) Time (s)
------- --------- ---------- --------
2.17.0 5498 43513628 2494.85
2.18.0 10531 40449596 4168.94
This patch provides a better fallback that is
- cheaper in terms of cpu and io because we won't have to read
existing pack files as much
- better in terms of pack size because the pack heuristics is back to
2.17.0 time, we do not drop large deltas at all
If we encounter any delta (on-disk or created during try_delta phase)
that is larger than the 1MB limit, we stop using delta_size_ field for
this because it can't contain such size anyway. A new array of delta
size is dynamically allocated and can hold all the deltas that 2.17.0
can. This array only contains delta sizes that delta_size_ can't
contain.
With this, we do not have to drop deltas in try_delta() anymore. Of
course the downside is we use slightly more memory, even compared to
2.17.0. But since this is considered an uncommon case, a bit more
memory consumption should not be a problem.
Delta size limit is also raised from 1MB to 16MB to better cover
common case and avoid that extra memory consumption (99.999% deltas in
this reported repo are under 12MB; Jeff noted binary artifacts topped
out at about 3MB in some other private repos). Other fields are
shuffled around to keep this struct packed tight. We don't use more
memory in common case even with this limit update.
A note about thread synchronization. Since this code can be run in
parallel during delta searching phase, we need a mutex. The realloc
part in packlist_alloc() is not protected because it only happens
during the object counting phase, which is always single-threaded.
Access to e->delta_size_ (and by extension
pack->delta_size[e - pack->objects]) is unprotected as before, the
thread scheduler in pack-objects must make sure "e" is never updated
by two different threads.
The area under the new lock is as small as possible, avoiding locking
at all in common case, since lock contention with high thread count
could be expensive (most blobs are small enough that delta compute
time is short and we end up taking the lock very often). The previous
attempt to always hold a lock in oe_delta_size() and
oe_set_delta_size() increases execution time by 33% when repacking
linux.git with with 40 threads.
Reported-by: Elijah Newren <newren@gmail.com>
Helped-by: Elijah Newren <newren@gmail.com>
Helped-by: Jeff King <peff@peff.net>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-22 16:04:21 +08:00
|
|
|
pdata->oe_delta_size_limit = git_env_ulong("GIT_TEST_OE_DELTA_SIZE",
|
|
|
|
1UL << OE_DELTA_SIZE_BITS);
|
2019-01-25 08:22:05 +08:00
|
|
|
init_recursive_mutex(&pdata->odb_lock);
|
2018-04-14 23:35:05 +08:00
|
|
|
}
|
|
|
|
|
2013-10-25 02:01:06 +08:00
|
|
|
struct object_entry *packlist_alloc(struct packing_data *pdata,
|
pack-objects: drop packlist index_pos optimization
Once upon a time, the code to add an object to our packing list in
pack-objects all lived in a single function. It computed the position
within the hash table once, then used it to check if the object was
already present, and if not, to add it.
Later, in 2834bc27c1 (pack-objects: refactor the packing list,
2013-10-24), this was split into two functions: packlist_find() and
packlist_alloc(). We ended up with an "index_pos" variable that gets
passed through several functions to make it from one to the other.
The resulting code is rather confusing to follow. The "index_pos"
variable is sometimes undefined, if we don't yet have a hash table. This
works out in practice because in that case packlist_alloc() won't use it
at all, since it will have to create/grow the hash table. But it's hard
to verify that, and it does cause gcc 9.2.1's -Wmaybe-uninitialized to
complain when compiled with "-flto -O3" (rightfully, since we do pass
the uninitialized value as a function parameter, even if nobody ends up
using it).
All of this is to save computing the hash index again when we're
inserting into the hash table, which I found doesn't make a measurable
difference in the program runtime (which is not surprising, since we're
doing all kinds of other heavyweight things for each object).
Let's just drop this index_pos variable entirely, simplifying the code
(and pleasing the compiler).
We might be better still refactoring this custom hash table to use one
of our existing implementations (an oidmap, or a kh_oid_map). I stopped
short of that here, but this would be the likely first step towards that
anyway.
Reported-by: Stephan Beyer <s-beyer@gmx.net>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-09-06 09:36:05 +08:00
|
|
|
const struct object_id *oid)
|
2013-10-25 02:01:06 +08:00
|
|
|
{
|
|
|
|
struct object_entry *new_entry;
|
|
|
|
|
|
|
|
if (pdata->nr_objects >= pdata->nr_alloc) {
|
|
|
|
pdata->nr_alloc = (pdata->nr_alloc + 1024) * 3 / 2;
|
2014-09-17 02:56:57 +08:00
|
|
|
REALLOC_ARRAY(pdata->objects, pdata->nr_alloc);
|
2018-04-14 23:35:05 +08:00
|
|
|
|
|
|
|
if (!pdata->in_pack_by_idx)
|
|
|
|
REALLOC_ARRAY(pdata->in_pack, pdata->nr_alloc);
|
pack-objects: fix performance issues on packing large deltas
Let's start with some background about oe_delta_size() and
oe_set_delta_size(). If you already know, skip the next paragraph.
These two are added in 0aca34e826 (pack-objects: shrink delta_size
field in struct object_entry - 2018-04-14) to help reduce 'struct
object_entry' size. The delta size field in this struct is reduced to
only contain max 1MB. So if any new delta is produced and larger than
1MB, it's dropped because we can't really save such a large size
anywhere. Fallback is provided in case existing packfiles already have
large deltas, then we can retrieve it from the pack.
While this should help small machines repacking large repos without
large deltas (i.e. less memory pressure), dropping large deltas during
the delta selection process could end up with worse pack files. And if
existing packfiles already have >1MB delta and pack-objects is
instructed to not reuse deltas, all of them will be dropped on the
floor, and the resulting pack would be definitely bigger.
There is also a regression in terms of CPU/IO if we have large on-disk
deltas because fallback code needs to parse the pack every time the
delta size is needed and just access to the mmap'd pack data is enough
for extra page faults when memory is under pressure.
Both of these issues were reported on the mailing list. Here's some
numbers for comparison.
Version Pack (MB) MaxRSS(kB) Time (s)
------- --------- ---------- --------
2.17.0 5498 43513628 2494.85
2.18.0 10531 40449596 4168.94
This patch provides a better fallback that is
- cheaper in terms of cpu and io because we won't have to read
existing pack files as much
- better in terms of pack size because the pack heuristics is back to
2.17.0 time, we do not drop large deltas at all
If we encounter any delta (on-disk or created during try_delta phase)
that is larger than the 1MB limit, we stop using delta_size_ field for
this because it can't contain such size anyway. A new array of delta
size is dynamically allocated and can hold all the deltas that 2.17.0
can. This array only contains delta sizes that delta_size_ can't
contain.
With this, we do not have to drop deltas in try_delta() anymore. Of
course the downside is we use slightly more memory, even compared to
2.17.0. But since this is considered an uncommon case, a bit more
memory consumption should not be a problem.
Delta size limit is also raised from 1MB to 16MB to better cover
common case and avoid that extra memory consumption (99.999% deltas in
this reported repo are under 12MB; Jeff noted binary artifacts topped
out at about 3MB in some other private repos). Other fields are
shuffled around to keep this struct packed tight. We don't use more
memory in common case even with this limit update.
A note about thread synchronization. Since this code can be run in
parallel during delta searching phase, we need a mutex. The realloc
part in packlist_alloc() is not protected because it only happens
during the object counting phase, which is always single-threaded.
Access to e->delta_size_ (and by extension
pack->delta_size[e - pack->objects]) is unprotected as before, the
thread scheduler in pack-objects must make sure "e" is never updated
by two different threads.
The area under the new lock is as small as possible, avoiding locking
at all in common case, since lock contention with high thread count
could be expensive (most blobs are small enough that delta compute
time is short and we end up taking the lock very often). The previous
attempt to always hold a lock in oe_delta_size() and
oe_set_delta_size() increases execution time by 33% when repacking
linux.git with with 40 threads.
Reported-by: Elijah Newren <newren@gmail.com>
Helped-by: Elijah Newren <newren@gmail.com>
Helped-by: Jeff King <peff@peff.net>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-22 16:04:21 +08:00
|
|
|
if (pdata->delta_size)
|
|
|
|
REALLOC_ARRAY(pdata->delta_size, pdata->nr_alloc);
|
2018-08-16 14:13:12 +08:00
|
|
|
|
|
|
|
if (pdata->tree_depth)
|
|
|
|
REALLOC_ARRAY(pdata->tree_depth, pdata->nr_alloc);
|
2018-08-16 14:13:13 +08:00
|
|
|
|
|
|
|
if (pdata->layer)
|
|
|
|
REALLOC_ARRAY(pdata->layer, pdata->nr_alloc);
|
2013-10-25 02:01:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
new_entry = pdata->objects + pdata->nr_objects++;
|
|
|
|
|
|
|
|
memset(new_entry, 0, sizeof(*new_entry));
|
2019-09-06 06:52:25 +08:00
|
|
|
oidcpy(&new_entry->idx.oid, oid);
|
2013-10-25 02:01:06 +08:00
|
|
|
|
|
|
|
if (pdata->index_size * 3 <= pdata->nr_objects * 4)
|
|
|
|
rehash_objects(pdata);
|
pack-objects: drop packlist index_pos optimization
Once upon a time, the code to add an object to our packing list in
pack-objects all lived in a single function. It computed the position
within the hash table once, then used it to check if the object was
already present, and if not, to add it.
Later, in 2834bc27c1 (pack-objects: refactor the packing list,
2013-10-24), this was split into two functions: packlist_find() and
packlist_alloc(). We ended up with an "index_pos" variable that gets
passed through several functions to make it from one to the other.
The resulting code is rather confusing to follow. The "index_pos"
variable is sometimes undefined, if we don't yet have a hash table. This
works out in practice because in that case packlist_alloc() won't use it
at all, since it will have to create/grow the hash table. But it's hard
to verify that, and it does cause gcc 9.2.1's -Wmaybe-uninitialized to
complain when compiled with "-flto -O3" (rightfully, since we do pass
the uninitialized value as a function parameter, even if nobody ends up
using it).
All of this is to save computing the hash index again when we're
inserting into the hash table, which I found doesn't make a measurable
difference in the program runtime (which is not surprising, since we're
doing all kinds of other heavyweight things for each object).
Let's just drop this index_pos variable entirely, simplifying the code
(and pleasing the compiler).
We might be better still refactoring this custom hash table to use one
of our existing implementations (an oidmap, or a kh_oid_map). I stopped
short of that here, but this would be the likely first step towards that
anyway.
Reported-by: Stephan Beyer <s-beyer@gmx.net>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-09-06 09:36:05 +08:00
|
|
|
else {
|
|
|
|
int found;
|
|
|
|
uint32_t pos = locate_object_entry_hash(pdata,
|
|
|
|
&new_entry->idx.oid,
|
|
|
|
&found);
|
|
|
|
if (found)
|
|
|
|
BUG("duplicate object inserted into hash");
|
|
|
|
pdata->index[pos] = pdata->nr_objects;
|
|
|
|
}
|
2013-10-25 02:01:06 +08:00
|
|
|
|
2018-04-14 23:35:05 +08:00
|
|
|
if (pdata->in_pack)
|
|
|
|
pdata->in_pack[pdata->nr_objects - 1] = NULL;
|
|
|
|
|
2018-08-16 14:13:12 +08:00
|
|
|
if (pdata->tree_depth)
|
|
|
|
pdata->tree_depth[pdata->nr_objects - 1] = 0;
|
|
|
|
|
2018-08-16 14:13:13 +08:00
|
|
|
if (pdata->layer)
|
|
|
|
pdata->layer[pdata->nr_objects - 1] = 0;
|
|
|
|
|
2013-10-25 02:01:06 +08:00
|
|
|
return new_entry;
|
|
|
|
}
|
pack-objects: reuse on-disk deltas for thin "have" objects
When we serve a fetch, we pass the "wants" and "haves" from
the fetch negotiation to pack-objects. That tells us not
only which objects we need to send, but we also use the
boundary commits as "preferred bases": their trees and blobs
are candidates for delta bases, both for reusing on-disk
deltas and for finding new ones.
However, this misses some opportunities. Modulo some special
cases like shallow or partial clones, we know that every
object reachable from the "haves" could be a preferred base.
We don't use all of them for two reasons:
1. It's expensive to traverse the whole history and
enumerate all of the objects the other side has.
2. The delta search is expensive, so we want to keep the
number of candidate bases sane. The boundary commits
are the most likely to work.
When we have reachability bitmaps, though, reason 1 no
longer applies. We can efficiently compute the set of
reachable objects on the other side (and in fact already did
so as part of the bitmap set-difference to get the list of
interesting objects). And using this set conveniently
covers the shallow and partial cases, since we have to
disable the use of bitmaps for those anyway.
The second reason argues against using these bases in the
search for new deltas. But there's one case where we can use
this information for free: when we have an existing on-disk
delta that we're considering reusing, we can do so if we
know the other side has the base object. This in fact saves
time during the delta search, because it's one less delta we
have to compute.
And that's exactly what this patch does: when we're
considering whether to reuse an on-disk delta, if bitmaps
tell us the other side has the object (and we're making a
thin-pack), then we reuse it.
Here are the results on p5311 using linux.git, which
simulates a client fetching after `N` days since their last
fetch:
Test origin HEAD
--------------------------------------------------------------------------
5311.3: server (1 days) 0.27(0.27+0.04) 0.12(0.09+0.03) -55.6%
5311.4: size (1 days) 0.9M 237.0K -73.7%
5311.5: client (1 days) 0.04(0.05+0.00) 0.10(0.10+0.00) +150.0%
5311.7: server (2 days) 0.34(0.42+0.04) 0.13(0.10+0.03) -61.8%
5311.8: size (2 days) 1.5M 347.7K -76.5%
5311.9: client (2 days) 0.07(0.08+0.00) 0.16(0.15+0.01) +128.6%
5311.11: server (4 days) 0.56(0.77+0.08) 0.13(0.10+0.02) -76.8%
5311.12: size (4 days) 2.8M 566.6K -79.8%
5311.13: client (4 days) 0.13(0.15+0.00) 0.34(0.31+0.02) +161.5%
5311.15: server (8 days) 0.97(1.39+0.11) 0.30(0.25+0.05) -69.1%
5311.16: size (8 days) 4.3M 1.0M -76.0%
5311.17: client (8 days) 0.20(0.22+0.01) 0.53(0.52+0.01) +165.0%
5311.19: server (16 days) 1.52(2.51+0.12) 0.30(0.26+0.03) -80.3%
5311.20: size (16 days) 8.0M 2.0M -74.5%
5311.21: client (16 days) 0.40(0.47+0.03) 1.01(0.98+0.04) +152.5%
5311.23: server (32 days) 2.40(4.44+0.20) 0.31(0.26+0.04) -87.1%
5311.24: size (32 days) 14.1M 4.1M -70.9%
5311.25: client (32 days) 0.70(0.90+0.03) 1.81(1.75+0.06) +158.6%
5311.27: server (64 days) 11.76(26.57+0.29) 0.55(0.50+0.08) -95.3%
5311.28: size (64 days) 89.4M 47.4M -47.0%
5311.29: client (64 days) 5.71(9.31+0.27) 15.20(15.20+0.32) +166.2%
5311.31: server (128 days) 16.15(36.87+0.40) 0.91(0.82+0.14) -94.4%
5311.32: size (128 days) 134.8M 100.4M -25.5%
5311.33: client (128 days) 9.42(16.86+0.49) 25.34(25.80+0.46) +169.0%
In all cases we save CPU time on the server (sometimes
significant) and the resulting pack is smaller. We do spend
more CPU time on the client side, because it has to
reconstruct more deltas. But that's the right tradeoff to
make, since clients tend to outnumber servers. It just means
the thin pack mechanism is doing its job.
From the user's perspective, the end-to-end time of the
operation will generally be faster. E.g., in the 128-day
case, we saved 15s on the server at a cost of 16s on the
client. Since the resulting pack is 34MB smaller, this is a
net win if the network speed is less than 270Mbit/s. And
that's actually the worst case. The 64-day case saves just
over 11s at a cost of just under 11s. So it's a slight win
at any network speed, and the 40MB saved is pure bonus. That
trend continues for the smaller fetches.
The implementation itself is mostly straightforward, with
the new logic going into check_object(). But there are two
tricky bits.
The first is that check_object() needs access to the
relevant information (the thin flag and bitmap result). We
can do this by pushing these into program-lifetime globals.
The second is that the rest of the code assumes that any
reused delta will point to another "struct object_entry" as
its base. But of course the case we are interested in here
is the one where don't have such an entry!
I looked at a number of options that didn't quite work:
- we could use a flag to signal a reused delta, but it's
not a single bit. We have to actually store the oid of
the base, which is normally done by pointing to the
existing object_entry. And we'd have to modify all the
code which looks at deltas.
- we could add the reused bases to the end of the existing
object_entry array. While this does create some extra
work as later stages consider the extra entries, it's
actually not too bad (we're not sending them, so they
don't cost much in the delta search, and at most we'd
have 2*N of them).
But there's a more subtle problem. Adding to the existing
array means we might need to grow it with realloc, which
could move the earlier entries around. While many of the
references to other entries are done by integer index,
some (including ones on the stack) use pointers, which
would become invalidated.
This isn't insurmountable, but it would require quite a
bit of refactoring (and it's hard to know that you've got
it all, since it may work _most_ of the time and then
fail subtly based on memory allocation patterns).
- we could allocate a new one-off entry for the base. In
fact, this is what an earlier version of this patch did.
However, since the refactoring brought in by ad635e82d6
(Merge branch 'nd/pack-objects-pack-struct', 2018-05-23),
the delta_idx code requires that both entries be in the
main packing list.
So taking all of those options into account, what I ended up
with is a separate list of "external bases" that are not
part of the main packing list. Each delta entry that points
to an external base has a single-bit flag to do so; we have a
little breathing room in the bitfield section of
object_entry.
This lets us limit the change primarily to the oe_delta()
and oe_set_delta_ext() functions. And as a bonus, most of
the rest of the code does not consider these dummy entries
at all, saving both runtime CPU and code complexity.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-22 03:07:05 +08:00
|
|
|
|
|
|
|
void oe_set_delta_ext(struct packing_data *pdata,
|
|
|
|
struct object_entry *delta,
|
2020-02-24 12:30:07 +08:00
|
|
|
const struct object_id *oid)
|
pack-objects: reuse on-disk deltas for thin "have" objects
When we serve a fetch, we pass the "wants" and "haves" from
the fetch negotiation to pack-objects. That tells us not
only which objects we need to send, but we also use the
boundary commits as "preferred bases": their trees and blobs
are candidates for delta bases, both for reusing on-disk
deltas and for finding new ones.
However, this misses some opportunities. Modulo some special
cases like shallow or partial clones, we know that every
object reachable from the "haves" could be a preferred base.
We don't use all of them for two reasons:
1. It's expensive to traverse the whole history and
enumerate all of the objects the other side has.
2. The delta search is expensive, so we want to keep the
number of candidate bases sane. The boundary commits
are the most likely to work.
When we have reachability bitmaps, though, reason 1 no
longer applies. We can efficiently compute the set of
reachable objects on the other side (and in fact already did
so as part of the bitmap set-difference to get the list of
interesting objects). And using this set conveniently
covers the shallow and partial cases, since we have to
disable the use of bitmaps for those anyway.
The second reason argues against using these bases in the
search for new deltas. But there's one case where we can use
this information for free: when we have an existing on-disk
delta that we're considering reusing, we can do so if we
know the other side has the base object. This in fact saves
time during the delta search, because it's one less delta we
have to compute.
And that's exactly what this patch does: when we're
considering whether to reuse an on-disk delta, if bitmaps
tell us the other side has the object (and we're making a
thin-pack), then we reuse it.
Here are the results on p5311 using linux.git, which
simulates a client fetching after `N` days since their last
fetch:
Test origin HEAD
--------------------------------------------------------------------------
5311.3: server (1 days) 0.27(0.27+0.04) 0.12(0.09+0.03) -55.6%
5311.4: size (1 days) 0.9M 237.0K -73.7%
5311.5: client (1 days) 0.04(0.05+0.00) 0.10(0.10+0.00) +150.0%
5311.7: server (2 days) 0.34(0.42+0.04) 0.13(0.10+0.03) -61.8%
5311.8: size (2 days) 1.5M 347.7K -76.5%
5311.9: client (2 days) 0.07(0.08+0.00) 0.16(0.15+0.01) +128.6%
5311.11: server (4 days) 0.56(0.77+0.08) 0.13(0.10+0.02) -76.8%
5311.12: size (4 days) 2.8M 566.6K -79.8%
5311.13: client (4 days) 0.13(0.15+0.00) 0.34(0.31+0.02) +161.5%
5311.15: server (8 days) 0.97(1.39+0.11) 0.30(0.25+0.05) -69.1%
5311.16: size (8 days) 4.3M 1.0M -76.0%
5311.17: client (8 days) 0.20(0.22+0.01) 0.53(0.52+0.01) +165.0%
5311.19: server (16 days) 1.52(2.51+0.12) 0.30(0.26+0.03) -80.3%
5311.20: size (16 days) 8.0M 2.0M -74.5%
5311.21: client (16 days) 0.40(0.47+0.03) 1.01(0.98+0.04) +152.5%
5311.23: server (32 days) 2.40(4.44+0.20) 0.31(0.26+0.04) -87.1%
5311.24: size (32 days) 14.1M 4.1M -70.9%
5311.25: client (32 days) 0.70(0.90+0.03) 1.81(1.75+0.06) +158.6%
5311.27: server (64 days) 11.76(26.57+0.29) 0.55(0.50+0.08) -95.3%
5311.28: size (64 days) 89.4M 47.4M -47.0%
5311.29: client (64 days) 5.71(9.31+0.27) 15.20(15.20+0.32) +166.2%
5311.31: server (128 days) 16.15(36.87+0.40) 0.91(0.82+0.14) -94.4%
5311.32: size (128 days) 134.8M 100.4M -25.5%
5311.33: client (128 days) 9.42(16.86+0.49) 25.34(25.80+0.46) +169.0%
In all cases we save CPU time on the server (sometimes
significant) and the resulting pack is smaller. We do spend
more CPU time on the client side, because it has to
reconstruct more deltas. But that's the right tradeoff to
make, since clients tend to outnumber servers. It just means
the thin pack mechanism is doing its job.
From the user's perspective, the end-to-end time of the
operation will generally be faster. E.g., in the 128-day
case, we saved 15s on the server at a cost of 16s on the
client. Since the resulting pack is 34MB smaller, this is a
net win if the network speed is less than 270Mbit/s. And
that's actually the worst case. The 64-day case saves just
over 11s at a cost of just under 11s. So it's a slight win
at any network speed, and the 40MB saved is pure bonus. That
trend continues for the smaller fetches.
The implementation itself is mostly straightforward, with
the new logic going into check_object(). But there are two
tricky bits.
The first is that check_object() needs access to the
relevant information (the thin flag and bitmap result). We
can do this by pushing these into program-lifetime globals.
The second is that the rest of the code assumes that any
reused delta will point to another "struct object_entry" as
its base. But of course the case we are interested in here
is the one where don't have such an entry!
I looked at a number of options that didn't quite work:
- we could use a flag to signal a reused delta, but it's
not a single bit. We have to actually store the oid of
the base, which is normally done by pointing to the
existing object_entry. And we'd have to modify all the
code which looks at deltas.
- we could add the reused bases to the end of the existing
object_entry array. While this does create some extra
work as later stages consider the extra entries, it's
actually not too bad (we're not sending them, so they
don't cost much in the delta search, and at most we'd
have 2*N of them).
But there's a more subtle problem. Adding to the existing
array means we might need to grow it with realloc, which
could move the earlier entries around. While many of the
references to other entries are done by integer index,
some (including ones on the stack) use pointers, which
would become invalidated.
This isn't insurmountable, but it would require quite a
bit of refactoring (and it's hard to know that you've got
it all, since it may work _most_ of the time and then
fail subtly based on memory allocation patterns).
- we could allocate a new one-off entry for the base. In
fact, this is what an earlier version of this patch did.
However, since the refactoring brought in by ad635e82d6
(Merge branch 'nd/pack-objects-pack-struct', 2018-05-23),
the delta_idx code requires that both entries be in the
main packing list.
So taking all of those options into account, what I ended up
with is a separate list of "external bases" that are not
part of the main packing list. Each delta entry that points
to an external base has a single-bit flag to do so; we have a
little breathing room in the bitfield section of
object_entry.
This lets us limit the change primarily to the oe_delta()
and oe_set_delta_ext() functions. And as a bonus, most of
the rest of the code does not consider these dummy entries
at all, saving both runtime CPU and code complexity.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-22 03:07:05 +08:00
|
|
|
{
|
|
|
|
struct object_entry *base;
|
|
|
|
|
|
|
|
ALLOC_GROW(pdata->ext_bases, pdata->nr_ext + 1, pdata->alloc_ext);
|
|
|
|
base = &pdata->ext_bases[pdata->nr_ext++];
|
|
|
|
memset(base, 0, sizeof(*base));
|
2020-02-24 12:30:07 +08:00
|
|
|
oidcpy(&base->idx.oid, oid);
|
pack-objects: reuse on-disk deltas for thin "have" objects
When we serve a fetch, we pass the "wants" and "haves" from
the fetch negotiation to pack-objects. That tells us not
only which objects we need to send, but we also use the
boundary commits as "preferred bases": their trees and blobs
are candidates for delta bases, both for reusing on-disk
deltas and for finding new ones.
However, this misses some opportunities. Modulo some special
cases like shallow or partial clones, we know that every
object reachable from the "haves" could be a preferred base.
We don't use all of them for two reasons:
1. It's expensive to traverse the whole history and
enumerate all of the objects the other side has.
2. The delta search is expensive, so we want to keep the
number of candidate bases sane. The boundary commits
are the most likely to work.
When we have reachability bitmaps, though, reason 1 no
longer applies. We can efficiently compute the set of
reachable objects on the other side (and in fact already did
so as part of the bitmap set-difference to get the list of
interesting objects). And using this set conveniently
covers the shallow and partial cases, since we have to
disable the use of bitmaps for those anyway.
The second reason argues against using these bases in the
search for new deltas. But there's one case where we can use
this information for free: when we have an existing on-disk
delta that we're considering reusing, we can do so if we
know the other side has the base object. This in fact saves
time during the delta search, because it's one less delta we
have to compute.
And that's exactly what this patch does: when we're
considering whether to reuse an on-disk delta, if bitmaps
tell us the other side has the object (and we're making a
thin-pack), then we reuse it.
Here are the results on p5311 using linux.git, which
simulates a client fetching after `N` days since their last
fetch:
Test origin HEAD
--------------------------------------------------------------------------
5311.3: server (1 days) 0.27(0.27+0.04) 0.12(0.09+0.03) -55.6%
5311.4: size (1 days) 0.9M 237.0K -73.7%
5311.5: client (1 days) 0.04(0.05+0.00) 0.10(0.10+0.00) +150.0%
5311.7: server (2 days) 0.34(0.42+0.04) 0.13(0.10+0.03) -61.8%
5311.8: size (2 days) 1.5M 347.7K -76.5%
5311.9: client (2 days) 0.07(0.08+0.00) 0.16(0.15+0.01) +128.6%
5311.11: server (4 days) 0.56(0.77+0.08) 0.13(0.10+0.02) -76.8%
5311.12: size (4 days) 2.8M 566.6K -79.8%
5311.13: client (4 days) 0.13(0.15+0.00) 0.34(0.31+0.02) +161.5%
5311.15: server (8 days) 0.97(1.39+0.11) 0.30(0.25+0.05) -69.1%
5311.16: size (8 days) 4.3M 1.0M -76.0%
5311.17: client (8 days) 0.20(0.22+0.01) 0.53(0.52+0.01) +165.0%
5311.19: server (16 days) 1.52(2.51+0.12) 0.30(0.26+0.03) -80.3%
5311.20: size (16 days) 8.0M 2.0M -74.5%
5311.21: client (16 days) 0.40(0.47+0.03) 1.01(0.98+0.04) +152.5%
5311.23: server (32 days) 2.40(4.44+0.20) 0.31(0.26+0.04) -87.1%
5311.24: size (32 days) 14.1M 4.1M -70.9%
5311.25: client (32 days) 0.70(0.90+0.03) 1.81(1.75+0.06) +158.6%
5311.27: server (64 days) 11.76(26.57+0.29) 0.55(0.50+0.08) -95.3%
5311.28: size (64 days) 89.4M 47.4M -47.0%
5311.29: client (64 days) 5.71(9.31+0.27) 15.20(15.20+0.32) +166.2%
5311.31: server (128 days) 16.15(36.87+0.40) 0.91(0.82+0.14) -94.4%
5311.32: size (128 days) 134.8M 100.4M -25.5%
5311.33: client (128 days) 9.42(16.86+0.49) 25.34(25.80+0.46) +169.0%
In all cases we save CPU time on the server (sometimes
significant) and the resulting pack is smaller. We do spend
more CPU time on the client side, because it has to
reconstruct more deltas. But that's the right tradeoff to
make, since clients tend to outnumber servers. It just means
the thin pack mechanism is doing its job.
From the user's perspective, the end-to-end time of the
operation will generally be faster. E.g., in the 128-day
case, we saved 15s on the server at a cost of 16s on the
client. Since the resulting pack is 34MB smaller, this is a
net win if the network speed is less than 270Mbit/s. And
that's actually the worst case. The 64-day case saves just
over 11s at a cost of just under 11s. So it's a slight win
at any network speed, and the 40MB saved is pure bonus. That
trend continues for the smaller fetches.
The implementation itself is mostly straightforward, with
the new logic going into check_object(). But there are two
tricky bits.
The first is that check_object() needs access to the
relevant information (the thin flag and bitmap result). We
can do this by pushing these into program-lifetime globals.
The second is that the rest of the code assumes that any
reused delta will point to another "struct object_entry" as
its base. But of course the case we are interested in here
is the one where don't have such an entry!
I looked at a number of options that didn't quite work:
- we could use a flag to signal a reused delta, but it's
not a single bit. We have to actually store the oid of
the base, which is normally done by pointing to the
existing object_entry. And we'd have to modify all the
code which looks at deltas.
- we could add the reused bases to the end of the existing
object_entry array. While this does create some extra
work as later stages consider the extra entries, it's
actually not too bad (we're not sending them, so they
don't cost much in the delta search, and at most we'd
have 2*N of them).
But there's a more subtle problem. Adding to the existing
array means we might need to grow it with realloc, which
could move the earlier entries around. While many of the
references to other entries are done by integer index,
some (including ones on the stack) use pointers, which
would become invalidated.
This isn't insurmountable, but it would require quite a
bit of refactoring (and it's hard to know that you've got
it all, since it may work _most_ of the time and then
fail subtly based on memory allocation patterns).
- we could allocate a new one-off entry for the base. In
fact, this is what an earlier version of this patch did.
However, since the refactoring brought in by ad635e82d6
(Merge branch 'nd/pack-objects-pack-struct', 2018-05-23),
the delta_idx code requires that both entries be in the
main packing list.
So taking all of those options into account, what I ended up
with is a separate list of "external bases" that are not
part of the main packing list. Each delta entry that points
to an external base has a single-bit flag to do so; we have a
little breathing room in the bitfield section of
object_entry.
This lets us limit the change primarily to the oe_delta()
and oe_set_delta_ext() functions. And as a bonus, most of
the rest of the code does not consider these dummy entries
at all, saving both runtime CPU and code complexity.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-22 03:07:05 +08:00
|
|
|
|
|
|
|
/* These flags mark that we are not part of the actual pack output. */
|
|
|
|
base->preferred_base = 1;
|
|
|
|
base->filled = 1;
|
|
|
|
|
|
|
|
delta->ext_base = 1;
|
|
|
|
delta->delta_idx = base - pdata->ext_bases + 1;
|
|
|
|
}
|