git/fetch-pack.c

1004 lines
24 KiB
C
Raw Normal View History

#include "cache.h"
#include "refs.h"
#include "pkt-line.h"
#include "commit.h"
#include "tag.h"
#include "exec_cmd.h"
#include "pack.h"
#include "sideband.h"
#include "fetch-pack.h"
#include "remote.h"
#include "run-command.h"
#include "connect.h"
#include "transport.h"
#include "version.h"
fetch-pack: avoid quadratic behavior in rev_list_push When we call find_common to start finding common ancestors with the remote side of a fetch, the first thing we do is insert the tip of each ref into our rev_list linked list. We keep the list sorted the whole time with commit_list_insert_by_date, which means our insertion ends up doing O(n^2) timestamp comparisons. We could teach rev_list_push to use an unsorted list, and then sort it once after we have added each ref. However, in get_rev, we process the list by popping commits off the front and adding parents back in timestamp-sorted order. So that procedure would still operate on the large list. Instead, we can replace the linked list with a heap-based priority queue, which can do O(log n) insertion, making the whole insertion procedure O(n log n). As a result of switching to the prio_queue struct, we fix two minor bugs: 1. When we "pop" a commit in get_rev, and when we clear the rev_list in find_common, we do not take care to free the "struct commit_list", and just leak its memory. With the prio_queue implementation, the memory management is handled for us. 2. In get_rev, we look at the head commit of the list, possibly push its parents onto the list, and then "pop" the front of the list off, assuming it is the same element that we just peeked at. This is typically going to be the case, but would not be in the face of clock skew: the parents are inserted by date, and could potentially be inserted at the head of the list if they have a timestamp newer than their descendent. In this case, we would accidentally pop the parent, and never process it at all. The new implementation pulls the commit off of the queue as we examine it, and so does not suffer from this problem. With this patch, a fetch of a single commit into a repository with 50,000 refs went from: real 0m7.984s user 0m7.852s sys 0m0.120s to: real 0m2.017s user 0m1.884s sys 0m0.124s Before this patch, a larger case with 370K refs still had not completed after tens of minutes; with this patch, it completes in about 12 seconds. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-07-02 14:24:21 +08:00
#include "prio-queue.h"
static int transfer_unpack_limit = -1;
static int fetch_unpack_limit = -1;
static int unpack_limit = 100;
static int prefer_ofs_delta = 1;
static int no_done;
static int fetch_fsck_objects = -1;
static int transfer_fsck_objects = -1;
static int agent_supported;
static struct lock_file shallow_lock;
static const char *alternate_shallow_file;
#define COMPLETE (1U << 0)
#define COMMON (1U << 1)
#define COMMON_REF (1U << 2)
#define SEEN (1U << 3)
#define POPPED (1U << 4)
static int marked;
/*
* After sending this many "have"s if we do not get any new ACK , we
* give up traversing our history.
*/
#define MAX_IN_VAIN 256
fetch-pack: avoid quadratic behavior in rev_list_push When we call find_common to start finding common ancestors with the remote side of a fetch, the first thing we do is insert the tip of each ref into our rev_list linked list. We keep the list sorted the whole time with commit_list_insert_by_date, which means our insertion ends up doing O(n^2) timestamp comparisons. We could teach rev_list_push to use an unsorted list, and then sort it once after we have added each ref. However, in get_rev, we process the list by popping commits off the front and adding parents back in timestamp-sorted order. So that procedure would still operate on the large list. Instead, we can replace the linked list with a heap-based priority queue, which can do O(log n) insertion, making the whole insertion procedure O(n log n). As a result of switching to the prio_queue struct, we fix two minor bugs: 1. When we "pop" a commit in get_rev, and when we clear the rev_list in find_common, we do not take care to free the "struct commit_list", and just leak its memory. With the prio_queue implementation, the memory management is handled for us. 2. In get_rev, we look at the head commit of the list, possibly push its parents onto the list, and then "pop" the front of the list off, assuming it is the same element that we just peeked at. This is typically going to be the case, but would not be in the face of clock skew: the parents are inserted by date, and could potentially be inserted at the head of the list if they have a timestamp newer than their descendent. In this case, we would accidentally pop the parent, and never process it at all. The new implementation pulls the commit off of the queue as we examine it, and so does not suffer from this problem. With this patch, a fetch of a single commit into a repository with 50,000 refs went from: real 0m7.984s user 0m7.852s sys 0m0.120s to: real 0m2.017s user 0m1.884s sys 0m0.124s Before this patch, a larger case with 370K refs still had not completed after tens of minutes; with this patch, it completes in about 12 seconds. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-07-02 14:24:21 +08:00
static struct prio_queue rev_list = { compare_commits_by_commit_date };
static int non_common_revs, multi_ack, use_sideband, allow_tip_sha1_in_want;
static void rev_list_push(struct commit *commit, int mark)
{
if (!(commit->object.flags & mark)) {
commit->object.flags |= mark;
if (!(commit->object.parsed))
if (parse_commit(commit))
return;
fetch-pack: avoid quadratic behavior in rev_list_push When we call find_common to start finding common ancestors with the remote side of a fetch, the first thing we do is insert the tip of each ref into our rev_list linked list. We keep the list sorted the whole time with commit_list_insert_by_date, which means our insertion ends up doing O(n^2) timestamp comparisons. We could teach rev_list_push to use an unsorted list, and then sort it once after we have added each ref. However, in get_rev, we process the list by popping commits off the front and adding parents back in timestamp-sorted order. So that procedure would still operate on the large list. Instead, we can replace the linked list with a heap-based priority queue, which can do O(log n) insertion, making the whole insertion procedure O(n log n). As a result of switching to the prio_queue struct, we fix two minor bugs: 1. When we "pop" a commit in get_rev, and when we clear the rev_list in find_common, we do not take care to free the "struct commit_list", and just leak its memory. With the prio_queue implementation, the memory management is handled for us. 2. In get_rev, we look at the head commit of the list, possibly push its parents onto the list, and then "pop" the front of the list off, assuming it is the same element that we just peeked at. This is typically going to be the case, but would not be in the face of clock skew: the parents are inserted by date, and could potentially be inserted at the head of the list if they have a timestamp newer than their descendent. In this case, we would accidentally pop the parent, and never process it at all. The new implementation pulls the commit off of the queue as we examine it, and so does not suffer from this problem. With this patch, a fetch of a single commit into a repository with 50,000 refs went from: real 0m7.984s user 0m7.852s sys 0m0.120s to: real 0m2.017s user 0m1.884s sys 0m0.124s Before this patch, a larger case with 370K refs still had not completed after tens of minutes; with this patch, it completes in about 12 seconds. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-07-02 14:24:21 +08:00
prio_queue_put(&rev_list, commit);
if (!(commit->object.flags & COMMON))
non_common_revs++;
}
}
static int rev_list_insert_ref(const char *refname, const unsigned char *sha1, int flag, void *cb_data)
{
struct object *o = deref_tag(parse_object(sha1), refname, 0);
if (o && o->type == OBJ_COMMIT)
rev_list_push((struct commit *)o, SEEN);
return 0;
}
static int clear_marks(const char *refname, const unsigned char *sha1, int flag, void *cb_data)
{
struct object *o = deref_tag(parse_object(sha1), refname, 0);
if (o && o->type == OBJ_COMMIT)
clear_commit_marks((struct commit *)o,
COMMON | COMMON_REF | SEEN | POPPED);
return 0;
}
/*
This function marks a rev and its ancestors as common.
In some cases, it is desirable to mark only the ancestors (for example
when only the server does not yet know that they are common).
*/
static void mark_common(struct commit *commit,
int ancestors_only, int dont_parse)
{
if (commit != NULL && !(commit->object.flags & COMMON)) {
struct object *o = (struct object *)commit;
if (!ancestors_only)
o->flags |= COMMON;
if (!(o->flags & SEEN))
rev_list_push(commit, SEEN);
else {
struct commit_list *parents;
if (!ancestors_only && !(o->flags & POPPED))
non_common_revs--;
if (!o->parsed && !dont_parse)
if (parse_commit(commit))
return;
for (parents = commit->parents;
parents;
parents = parents->next)
mark_common(parents->item, 0, dont_parse);
}
}
}
/*
Get the next rev to send, ignoring the common.
*/
static const unsigned char *get_rev(void)
{
struct commit *commit = NULL;
while (commit == NULL) {
unsigned int mark;
struct commit_list *parents;
fetch-pack: avoid quadratic behavior in rev_list_push When we call find_common to start finding common ancestors with the remote side of a fetch, the first thing we do is insert the tip of each ref into our rev_list linked list. We keep the list sorted the whole time with commit_list_insert_by_date, which means our insertion ends up doing O(n^2) timestamp comparisons. We could teach rev_list_push to use an unsorted list, and then sort it once after we have added each ref. However, in get_rev, we process the list by popping commits off the front and adding parents back in timestamp-sorted order. So that procedure would still operate on the large list. Instead, we can replace the linked list with a heap-based priority queue, which can do O(log n) insertion, making the whole insertion procedure O(n log n). As a result of switching to the prio_queue struct, we fix two minor bugs: 1. When we "pop" a commit in get_rev, and when we clear the rev_list in find_common, we do not take care to free the "struct commit_list", and just leak its memory. With the prio_queue implementation, the memory management is handled for us. 2. In get_rev, we look at the head commit of the list, possibly push its parents onto the list, and then "pop" the front of the list off, assuming it is the same element that we just peeked at. This is typically going to be the case, but would not be in the face of clock skew: the parents are inserted by date, and could potentially be inserted at the head of the list if they have a timestamp newer than their descendent. In this case, we would accidentally pop the parent, and never process it at all. The new implementation pulls the commit off of the queue as we examine it, and so does not suffer from this problem. With this patch, a fetch of a single commit into a repository with 50,000 refs went from: real 0m7.984s user 0m7.852s sys 0m0.120s to: real 0m2.017s user 0m1.884s sys 0m0.124s Before this patch, a larger case with 370K refs still had not completed after tens of minutes; with this patch, it completes in about 12 seconds. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-07-02 14:24:21 +08:00
if (rev_list.nr == 0 || non_common_revs == 0)
return NULL;
fetch-pack: avoid quadratic behavior in rev_list_push When we call find_common to start finding common ancestors with the remote side of a fetch, the first thing we do is insert the tip of each ref into our rev_list linked list. We keep the list sorted the whole time with commit_list_insert_by_date, which means our insertion ends up doing O(n^2) timestamp comparisons. We could teach rev_list_push to use an unsorted list, and then sort it once after we have added each ref. However, in get_rev, we process the list by popping commits off the front and adding parents back in timestamp-sorted order. So that procedure would still operate on the large list. Instead, we can replace the linked list with a heap-based priority queue, which can do O(log n) insertion, making the whole insertion procedure O(n log n). As a result of switching to the prio_queue struct, we fix two minor bugs: 1. When we "pop" a commit in get_rev, and when we clear the rev_list in find_common, we do not take care to free the "struct commit_list", and just leak its memory. With the prio_queue implementation, the memory management is handled for us. 2. In get_rev, we look at the head commit of the list, possibly push its parents onto the list, and then "pop" the front of the list off, assuming it is the same element that we just peeked at. This is typically going to be the case, but would not be in the face of clock skew: the parents are inserted by date, and could potentially be inserted at the head of the list if they have a timestamp newer than their descendent. In this case, we would accidentally pop the parent, and never process it at all. The new implementation pulls the commit off of the queue as we examine it, and so does not suffer from this problem. With this patch, a fetch of a single commit into a repository with 50,000 refs went from: real 0m7.984s user 0m7.852s sys 0m0.120s to: real 0m2.017s user 0m1.884s sys 0m0.124s Before this patch, a larger case with 370K refs still had not completed after tens of minutes; with this patch, it completes in about 12 seconds. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-07-02 14:24:21 +08:00
commit = prio_queue_get(&rev_list);
if (!commit->object.parsed)
parse_commit(commit);
parents = commit->parents;
commit->object.flags |= POPPED;
if (!(commit->object.flags & COMMON))
non_common_revs--;
if (commit->object.flags & COMMON) {
/* do not send "have", and ignore ancestors */
commit = NULL;
mark = COMMON | SEEN;
} else if (commit->object.flags & COMMON_REF)
/* send "have", and ignore ancestors */
mark = COMMON | SEEN;
else
/* send "have", also for its ancestors */
mark = SEEN;
while (parents) {
if (!(parents->item->object.flags & SEEN))
rev_list_push(parents->item, mark);
if (mark & COMMON)
mark_common(parents->item, 1, 0);
parents = parents->next;
}
}
return commit->object.sha1;
}
enum ack_type {
NAK = 0,
ACK,
ACK_continue,
ACK_common,
ACK_ready
};
static void consume_shallow_list(struct fetch_pack_args *args, int fd)
{
if (args->stateless_rpc && args->depth > 0) {
/* If we sent a depth we will get back "duplicate"
* shallow and unshallow commands every time there
* is a block of have lines exchanged.
*/
pkt-line: provide a LARGE_PACKET_MAX static buffer Most of the callers of packet_read_line just read into a static 1000-byte buffer (callers which handle arbitrary binary data already use LARGE_PACKET_MAX). This works fine in practice, because: 1. The only variable-sized data in these lines is a ref name, and refs tend to be a lot shorter than 1000 characters. 2. When sending ref lines, git-core always limits itself to 1000 byte packets. However, the only limit given in the protocol specification in Documentation/technical/protocol-common.txt is LARGE_PACKET_MAX; the 1000 byte limit is mentioned only in pack-protocol.txt, and then only describing what we write, not as a specific limit for readers. This patch lets us bump the 1000-byte limit to LARGE_PACKET_MAX. Even though git-core will never write a packet where this makes a difference, there are two good reasons to do this: 1. Other git implementations may have followed protocol-common.txt and used a larger maximum size. We don't bump into it in practice because it would involve very long ref names. 2. We may want to increase the 1000-byte limit one day. Since packets are transferred before any capabilities, it's difficult to do this in a backwards-compatible way. But if we bump the size of buffer the readers can handle, eventually older versions of git will be obsolete enough that we can justify bumping the writers, as well. We don't have plans to do this anytime soon, but there is no reason not to start the clock ticking now. Just bumping all of the reading bufs to LARGE_PACKET_MAX would waste memory. Instead, since most readers just read into a temporary buffer anyway, let's provide a single static buffer that all callers can use. We can further wrap this detail away by having the packet_read_line wrapper just use the buffer transparently and return a pointer to the static storage. That covers most of the cases, and the remaining ones already read into their own LARGE_PACKET_MAX buffers. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-02-21 04:02:57 +08:00
char *line;
while ((line = packet_read_line(fd, NULL))) {
if (!prefixcmp(line, "shallow "))
continue;
if (!prefixcmp(line, "unshallow "))
continue;
die("git fetch-pack: expected shallow list");
}
}
}
struct write_shallow_data {
struct strbuf *out;
int use_pack_protocol;
int count;
};
static int write_one_shallow(const struct commit_graft *graft, void *cb_data)
{
struct write_shallow_data *data = cb_data;
const char *hex = sha1_to_hex(graft->sha1);
data->count++;
if (data->use_pack_protocol)
packet_buf_write(data->out, "shallow %s", hex);
else {
strbuf_addstr(data->out, hex);
strbuf_addch(data->out, '\n');
}
return 0;
}
static int write_shallow_commits(struct strbuf *out, int use_pack_protocol)
{
struct write_shallow_data data;
data.out = out;
data.use_pack_protocol = use_pack_protocol;
data.count = 0;
for_each_commit_graft(write_one_shallow, &data);
return data.count;
}
static enum ack_type get_ack(int fd, unsigned char *result_sha1)
{
pkt-line: provide a LARGE_PACKET_MAX static buffer Most of the callers of packet_read_line just read into a static 1000-byte buffer (callers which handle arbitrary binary data already use LARGE_PACKET_MAX). This works fine in practice, because: 1. The only variable-sized data in these lines is a ref name, and refs tend to be a lot shorter than 1000 characters. 2. When sending ref lines, git-core always limits itself to 1000 byte packets. However, the only limit given in the protocol specification in Documentation/technical/protocol-common.txt is LARGE_PACKET_MAX; the 1000 byte limit is mentioned only in pack-protocol.txt, and then only describing what we write, not as a specific limit for readers. This patch lets us bump the 1000-byte limit to LARGE_PACKET_MAX. Even though git-core will never write a packet where this makes a difference, there are two good reasons to do this: 1. Other git implementations may have followed protocol-common.txt and used a larger maximum size. We don't bump into it in practice because it would involve very long ref names. 2. We may want to increase the 1000-byte limit one day. Since packets are transferred before any capabilities, it's difficult to do this in a backwards-compatible way. But if we bump the size of buffer the readers can handle, eventually older versions of git will be obsolete enough that we can justify bumping the writers, as well. We don't have plans to do this anytime soon, but there is no reason not to start the clock ticking now. Just bumping all of the reading bufs to LARGE_PACKET_MAX would waste memory. Instead, since most readers just read into a temporary buffer anyway, let's provide a single static buffer that all callers can use. We can further wrap this detail away by having the packet_read_line wrapper just use the buffer transparently and return a pointer to the static storage. That covers most of the cases, and the remaining ones already read into their own LARGE_PACKET_MAX buffers. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-02-21 04:02:57 +08:00
int len;
char *line = packet_read_line(fd, &len);
if (!len)
die("git fetch-pack: expected ACK/NAK, got EOF");
if (!strcmp(line, "NAK"))
return NAK;
if (!prefixcmp(line, "ACK ")) {
if (!get_sha1_hex(line+4, result_sha1)) {
if (len < 45)
return ACK;
if (strstr(line+45, "continue"))
return ACK_continue;
if (strstr(line+45, "common"))
return ACK_common;
if (strstr(line+45, "ready"))
return ACK_ready;
return ACK;
}
}
die("git fetch_pack: expected ACK/NAK, got '%s'", line);
}
static void send_request(struct fetch_pack_args *args,
int fd, struct strbuf *buf)
{
if (args->stateless_rpc) {
send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
packet_flush(fd);
} else
write_or_die(fd, buf->buf, buf->len);
}
static void insert_one_alternate_ref(const struct ref *ref, void *unused)
{
rev_list_insert_ref(NULL, ref->old_sha1, 0, NULL);
}
#define INITIAL_FLUSH 16
#define PIPESAFE_FLUSH 32
#define LARGE_FLUSH 1024
static int next_flush(struct fetch_pack_args *args, int count)
{
int flush_limit = args->stateless_rpc ? LARGE_FLUSH : PIPESAFE_FLUSH;
if (count < flush_limit)
count <<= 1;
else
count += flush_limit;
return count;
}
static int find_common(struct fetch_pack_args *args,
int fd[2], unsigned char *result_sha1,
struct ref *refs)
{
int fetching;
int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
const unsigned char *sha1;
unsigned in_vain = 0;
int got_continue = 0;
int got_ready = 0;
struct strbuf req_buf = STRBUF_INIT;
size_t state_len = 0;
if (args->stateless_rpc && multi_ack == 1)
die("--stateless-rpc requires multi_ack_detailed");
if (marked)
for_each_ref(clear_marks, NULL);
marked = 1;
for_each_ref(rev_list_insert_ref, NULL);
for_each_alternate_ref(insert_one_alternate_ref, NULL);
fetching = 0;
for ( ; refs ; refs = refs->next) {
unsigned char *remote = refs->old_sha1;
const char *remote_hex;
struct object *o;
/*
* If that object is complete (i.e. it is an ancestor of a
* local ref), we tell them we have it but do not have to
* tell them about its ancestors, which they already know
* about.
*
* We use lookup_object here because we are only
* interested in the case we *know* the object is
* reachable and we have already scanned it.
*/
if (((o = lookup_object(remote)) != NULL) &&
(o->flags & COMPLETE)) {
continue;
}
remote_hex = sha1_to_hex(remote);
if (!fetching) {
struct strbuf c = STRBUF_INIT;
if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
if (no_done) strbuf_addstr(&c, " no-done");
if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
if (use_sideband == 1) strbuf_addstr(&c, " side-band");
if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
if (args->no_progress) strbuf_addstr(&c, " no-progress");
if (args->include_tag) strbuf_addstr(&c, " include-tag");
if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
if (agent_supported) strbuf_addf(&c, " agent=%s",
git_user_agent_sanitized());
packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
strbuf_release(&c);
} else
packet_buf_write(&req_buf, "want %s\n", remote_hex);
fetching++;
}
if (!fetching) {
strbuf_release(&req_buf);
packet_flush(fd[1]);
return 1;
}
if (is_repository_shallow())
write_shallow_commits(&req_buf, 1);
if (args->depth > 0)
packet_buf_write(&req_buf, "deepen %d", args->depth);
packet_buf_flush(&req_buf);
state_len = req_buf.len;
if (args->depth > 0) {
pkt-line: provide a LARGE_PACKET_MAX static buffer Most of the callers of packet_read_line just read into a static 1000-byte buffer (callers which handle arbitrary binary data already use LARGE_PACKET_MAX). This works fine in practice, because: 1. The only variable-sized data in these lines is a ref name, and refs tend to be a lot shorter than 1000 characters. 2. When sending ref lines, git-core always limits itself to 1000 byte packets. However, the only limit given in the protocol specification in Documentation/technical/protocol-common.txt is LARGE_PACKET_MAX; the 1000 byte limit is mentioned only in pack-protocol.txt, and then only describing what we write, not as a specific limit for readers. This patch lets us bump the 1000-byte limit to LARGE_PACKET_MAX. Even though git-core will never write a packet where this makes a difference, there are two good reasons to do this: 1. Other git implementations may have followed protocol-common.txt and used a larger maximum size. We don't bump into it in practice because it would involve very long ref names. 2. We may want to increase the 1000-byte limit one day. Since packets are transferred before any capabilities, it's difficult to do this in a backwards-compatible way. But if we bump the size of buffer the readers can handle, eventually older versions of git will be obsolete enough that we can justify bumping the writers, as well. We don't have plans to do this anytime soon, but there is no reason not to start the clock ticking now. Just bumping all of the reading bufs to LARGE_PACKET_MAX would waste memory. Instead, since most readers just read into a temporary buffer anyway, let's provide a single static buffer that all callers can use. We can further wrap this detail away by having the packet_read_line wrapper just use the buffer transparently and return a pointer to the static storage. That covers most of the cases, and the remaining ones already read into their own LARGE_PACKET_MAX buffers. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-02-21 04:02:57 +08:00
char *line;
unsigned char sha1[20];
send_request(args, fd[1], &req_buf);
pkt-line: provide a LARGE_PACKET_MAX static buffer Most of the callers of packet_read_line just read into a static 1000-byte buffer (callers which handle arbitrary binary data already use LARGE_PACKET_MAX). This works fine in practice, because: 1. The only variable-sized data in these lines is a ref name, and refs tend to be a lot shorter than 1000 characters. 2. When sending ref lines, git-core always limits itself to 1000 byte packets. However, the only limit given in the protocol specification in Documentation/technical/protocol-common.txt is LARGE_PACKET_MAX; the 1000 byte limit is mentioned only in pack-protocol.txt, and then only describing what we write, not as a specific limit for readers. This patch lets us bump the 1000-byte limit to LARGE_PACKET_MAX. Even though git-core will never write a packet where this makes a difference, there are two good reasons to do this: 1. Other git implementations may have followed protocol-common.txt and used a larger maximum size. We don't bump into it in practice because it would involve very long ref names. 2. We may want to increase the 1000-byte limit one day. Since packets are transferred before any capabilities, it's difficult to do this in a backwards-compatible way. But if we bump the size of buffer the readers can handle, eventually older versions of git will be obsolete enough that we can justify bumping the writers, as well. We don't have plans to do this anytime soon, but there is no reason not to start the clock ticking now. Just bumping all of the reading bufs to LARGE_PACKET_MAX would waste memory. Instead, since most readers just read into a temporary buffer anyway, let's provide a single static buffer that all callers can use. We can further wrap this detail away by having the packet_read_line wrapper just use the buffer transparently and return a pointer to the static storage. That covers most of the cases, and the remaining ones already read into their own LARGE_PACKET_MAX buffers. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-02-21 04:02:57 +08:00
while ((line = packet_read_line(fd[0], NULL))) {
if (!prefixcmp(line, "shallow ")) {
if (get_sha1_hex(line + 8, sha1))
die("invalid shallow line: %s", line);
register_shallow(sha1);
continue;
}
if (!prefixcmp(line, "unshallow ")) {
if (get_sha1_hex(line + 10, sha1))
die("invalid unshallow line: %s", line);
if (!lookup_object(sha1))
die("object not found: %s", line);
/* make sure that it is parsed as shallow */
if (!parse_object(sha1))
die("error in object: %s", line);
if (unregister_shallow(sha1))
die("no shallow found: %s", line);
continue;
}
die("expected shallow/unshallow, got %s", line);
}
} else if (!args->stateless_rpc)
send_request(args, fd[1], &req_buf);
if (!args->stateless_rpc) {
/* If we aren't using the stateless-rpc interface
* we don't need to retain the headers.
*/
strbuf_setlen(&req_buf, 0);
state_len = 0;
}
flushes = 0;
retval = -1;
while ((sha1 = get_rev())) {
packet_buf_write(&req_buf, "have %s\n", sha1_to_hex(sha1));
if (args->verbose)
fprintf(stderr, "have %s\n", sha1_to_hex(sha1));
in_vain++;
if (flush_at <= ++count) {
int ack;
packet_buf_flush(&req_buf);
send_request(args, fd[1], &req_buf);
strbuf_setlen(&req_buf, state_len);
flushes++;
flush_at = next_flush(args, count);
/*
* We keep one window "ahead" of the other side, and
* will wait for an ACK only on the next one
*/
if (!args->stateless_rpc && count == INITIAL_FLUSH)
continue;
consume_shallow_list(args, fd[0]);
do {
ack = get_ack(fd[0], result_sha1);
if (args->verbose && ack)
fprintf(stderr, "got ack %d %s\n", ack,
sha1_to_hex(result_sha1));
switch (ack) {
case ACK:
flushes = 0;
multi_ack = 0;
retval = 0;
goto done;
case ACK_common:
case ACK_ready:
case ACK_continue: {
struct commit *commit =
lookup_commit(result_sha1);
if (!commit)
die("invalid commit %s", sha1_to_hex(result_sha1));
if (args->stateless_rpc
&& ack == ACK_common
&& !(commit->object.flags & COMMON)) {
/* We need to replay the have for this object
* on the next RPC request so the peer knows
* it is in common with us.
*/
const char *hex = sha1_to_hex(result_sha1);
packet_buf_write(&req_buf, "have %s\n", hex);
state_len = req_buf.len;
}
mark_common(commit, 0, 1);
retval = 0;
in_vain = 0;
got_continue = 1;
if (ack == ACK_ready) {
fetch-pack: avoid quadratic behavior in rev_list_push When we call find_common to start finding common ancestors with the remote side of a fetch, the first thing we do is insert the tip of each ref into our rev_list linked list. We keep the list sorted the whole time with commit_list_insert_by_date, which means our insertion ends up doing O(n^2) timestamp comparisons. We could teach rev_list_push to use an unsorted list, and then sort it once after we have added each ref. However, in get_rev, we process the list by popping commits off the front and adding parents back in timestamp-sorted order. So that procedure would still operate on the large list. Instead, we can replace the linked list with a heap-based priority queue, which can do O(log n) insertion, making the whole insertion procedure O(n log n). As a result of switching to the prio_queue struct, we fix two minor bugs: 1. When we "pop" a commit in get_rev, and when we clear the rev_list in find_common, we do not take care to free the "struct commit_list", and just leak its memory. With the prio_queue implementation, the memory management is handled for us. 2. In get_rev, we look at the head commit of the list, possibly push its parents onto the list, and then "pop" the front of the list off, assuming it is the same element that we just peeked at. This is typically going to be the case, but would not be in the face of clock skew: the parents are inserted by date, and could potentially be inserted at the head of the list if they have a timestamp newer than their descendent. In this case, we would accidentally pop the parent, and never process it at all. The new implementation pulls the commit off of the queue as we examine it, and so does not suffer from this problem. With this patch, a fetch of a single commit into a repository with 50,000 refs went from: real 0m7.984s user 0m7.852s sys 0m0.120s to: real 0m2.017s user 0m1.884s sys 0m0.124s Before this patch, a larger case with 370K refs still had not completed after tens of minutes; with this patch, it completes in about 12 seconds. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-07-02 14:24:21 +08:00
clear_prio_queue(&rev_list);
got_ready = 1;
}
break;
}
}
} while (ack);
flushes--;
if (got_continue && MAX_IN_VAIN < in_vain) {
if (args->verbose)
fprintf(stderr, "giving up\n");
break; /* give up */
}
}
}
done:
if (!got_ready || !no_done) {
packet_buf_write(&req_buf, "done\n");
send_request(args, fd[1], &req_buf);
}
if (args->verbose)
fprintf(stderr, "done\n");
if (retval != 0) {
multi_ack = 0;
flushes++;
}
strbuf_release(&req_buf);
consume_shallow_list(args, fd[0]);
while (flushes || multi_ack) {
int ack = get_ack(fd[0], result_sha1);
if (ack) {
if (args->verbose)
fprintf(stderr, "got ack (%d) %s\n", ack,
sha1_to_hex(result_sha1));
if (ack == ACK)
return 0;
multi_ack = 1;
continue;
}
flushes--;
}
/* it is no error to fetch into a completely empty repo */
return count ? retval : 0;
}
static struct commit_list *complete;
static int mark_complete(const char *refname, const unsigned char *sha1, int flag, void *cb_data)
{
struct object *o = parse_object(sha1);
while (o && o->type == OBJ_TAG) {
struct tag *t = (struct tag *) o;
if (!t->tagged)
break; /* broken repository */
o->flags |= COMPLETE;
o = parse_object(t->tagged->sha1);
}
if (o && o->type == OBJ_COMMIT) {
struct commit *commit = (struct commit *)o;
if (!(commit->object.flags & COMPLETE)) {
commit->object.flags |= COMPLETE;
commit_list_insert(commit, &complete);
}
}
return 0;
}
static void mark_recent_complete_commits(struct fetch_pack_args *args,
unsigned long cutoff)
{
while (complete && cutoff <= complete->item->date) {
if (args->verbose)
fprintf(stderr, "Marking %s as complete\n",
sha1_to_hex(complete->item->object.sha1));
pop_most_recent_commit(&complete, COMPLETE);
}
}
static void filter_refs(struct fetch_pack_args *args,
struct ref **refs,
struct ref **sought, int nr_sought)
{
struct ref *newlist = NULL;
struct ref **newtail = &newlist;
struct ref *ref, *next;
int i;
i = 0;
for (ref = *refs; ref; ref = next) {
int keep = 0;
next = ref->next;
if (!memcmp(ref->name, "refs/", 5) &&
check_refname_format(ref->name + 5, 0))
; /* trash */
else {
while (i < nr_sought) {
int cmp = strcmp(ref->name, sought[i]->name);
if (cmp < 0)
break; /* definitely do not have it */
else if (cmp == 0) {
keep = 1; /* definitely have it */
sought[i]->matched = 1;
}
i++;
}
}
if (!keep && args->fetch_all &&
(!args->depth || prefixcmp(ref->name, "refs/tags/")))
keep = 1;
if (keep) {
*newtail = ref;
ref->next = NULL;
newtail = &ref->next;
} else {
free(ref);
}
}
/* Append unmatched requests to the list */
if (allow_tip_sha1_in_want) {
for (i = 0; i < nr_sought; i++) {
ref = sought[i];
if (ref->matched)
continue;
if (get_sha1_hex(ref->name, ref->old_sha1))
continue;
ref->matched = 1;
*newtail = ref;
ref->next = NULL;
newtail = &ref->next;
}
}
*refs = newlist;
}
static void mark_alternate_complete(const struct ref *ref, void *unused)
{
mark_complete(NULL, ref->old_sha1, 0, NULL);
}
static int everything_local(struct fetch_pack_args *args,
struct ref **refs,
struct ref **sought, int nr_sought)
{
struct ref *ref;
int retval;
unsigned long cutoff = 0;
save_commit_buffer = 0;
for (ref = *refs; ref; ref = ref->next) {
struct object *o;
if (!has_sha1_file(ref->old_sha1))
continue;
o = parse_object(ref->old_sha1);
if (!o)
continue;
/* We already have it -- which may mean that we were
* in sync with the other side at some time after
* that (it is OK if we guess wrong here).
*/
if (o->type == OBJ_COMMIT) {
struct commit *commit = (struct commit *)o;
if (!cutoff || cutoff < commit->date)
cutoff = commit->date;
}
}
if (!args->depth) {
for_each_ref(mark_complete, NULL);
for_each_alternate_ref(mark_alternate_complete, NULL);
commit_list_sort_by_date(&complete);
if (cutoff)
mark_recent_complete_commits(args, cutoff);
}
/*
* Mark all complete remote refs as common refs.
* Don't mark them common yet; the server has to be told so first.
*/
for (ref = *refs; ref; ref = ref->next) {
struct object *o = deref_tag(lookup_object(ref->old_sha1),
NULL, 0);
if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
continue;
if (!(o->flags & SEEN)) {
rev_list_push((struct commit *)o, COMMON_REF | SEEN);
mark_common((struct commit *)o, 1, 1);
}
}
filter_refs(args, refs, sought, nr_sought);
for (retval = 1, ref = *refs; ref ; ref = ref->next) {
const unsigned char *remote = ref->old_sha1;
unsigned char local[20];
struct object *o;
o = lookup_object(remote);
if (!o || !(o->flags & COMPLETE)) {
retval = 0;
if (!args->verbose)
continue;
fprintf(stderr,
"want %s (%s)\n", sha1_to_hex(remote),
ref->name);
continue;
}
hashcpy(ref->new_sha1, local);
if (!args->verbose)
continue;
fprintf(stderr,
"already have %s (%s)\n", sha1_to_hex(remote),
ref->name);
}
return retval;
}
static int sideband_demux(int in, int out, void *data)
{
int *xd = data;
int ret = recv_sideband("fetch-pack", xd[0], out);
close(out);
return ret;
}
static int get_pack(struct fetch_pack_args *args,
int xd[2], char **pack_lockfile)
{
struct async demux;
const char *argv[22];
char keep_arg[256];
char hdr_arg[256];
const char **av;
int do_keep = args->keep_pack;
struct child_process cmd;
clone: open a shortcut for connectivity check In order to make sure the cloned repository is good, we run "rev-list --objects --not --all $new_refs" on the repository. This is expensive on large repositories. This patch attempts to mitigate the impact in this special case. In the "good" clone case, we only have one pack. If all of the following are met, we can be sure that all objects reachable from the new refs exist, which is the intention of running "rev-list ...": - all refs point to an object in the pack - there are no dangling pointers in any object in the pack - no objects in the pack point to objects outside the pack The second and third checks can be done with the help of index-pack as a slight variation of --strict check (which introduces a new condition for the shortcut: pack transfer must be used and the number of objects large enough to call index-pack). The first is checked in check_everything_connected after we get an "ok" from index-pack. "index-pack + new checks" is still faster than the current "index-pack + rev-list", which is the whole point of this patch. If any of the conditions fail, we fall back to the good old but expensive "rev-list ..". In that case it's even more expensive because we have to pay for the new checks in index-pack. But that should only happen when the other side is either buggy or malicious. Cloning linux-2.6 over file:// before after real 3m25.693s 2m53.050s user 5m2.037s 4m42.396s sys 0m13.750s 0m16.574s A more realistic test with ssh:// over wireless before after real 11m26.629s 10m4.213s user 5m43.196s 5m19.444s sys 0m35.812s 0m37.630s This shortcut is not applied to shallow clones, partly because shallow clones should have no more objects than a usual fetch and the cost of rev-list is acceptable, partly to avoid dealing with corner cases when grafting is involved. This shortcut does not apply to unpack-objects code path either because the number of objects must be small in order to trigger that code path. Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-05-26 09:16:17 +08:00
int ret;
memset(&demux, 0, sizeof(demux));
if (use_sideband) {
/* xd[] is talking with upload-pack; subprocess reads from
* xd[0], spits out band#2 to stderr, and feeds us band#1
* through demux->out.
*/
demux.proc = sideband_demux;
demux.data = xd;
demux.out = -1;
if (start_async(&demux))
die("fetch-pack: unable to fork off sideband"
" demultiplexer");
}
else
demux.out = xd[0];
memset(&cmd, 0, sizeof(cmd));
cmd.argv = argv;
av = argv;
*hdr_arg = 0;
if (!args->keep_pack && unpack_limit) {
struct pack_header header;
if (read_pack_header(demux.out, &header))
die("protocol error: bad pack header");
snprintf(hdr_arg, sizeof(hdr_arg),
"--pack_header=%"PRIu32",%"PRIu32,
ntohl(header.hdr_version), ntohl(header.hdr_entries));
if (ntohl(header.hdr_entries) < unpack_limit)
do_keep = 0;
else
do_keep = 1;
}
if (alternate_shallow_file) {
*av++ = "--shallow-file";
*av++ = alternate_shallow_file;
}
if (do_keep) {
if (pack_lockfile)
cmd.out = -1;
*av++ = "index-pack";
*av++ = "--stdin";
if (!args->quiet && !args->no_progress)
*av++ = "-v";
if (args->use_thin_pack)
*av++ = "--fix-thin";
if (args->lock_pack || unpack_limit) {
int s = sprintf(keep_arg,
"--keep=fetch-pack %"PRIuMAX " on ", (uintmax_t) getpid());
if (gethostname(keep_arg + s, sizeof(keep_arg) - s))
strcpy(keep_arg + s, "localhost");
*av++ = keep_arg;
}
clone: open a shortcut for connectivity check In order to make sure the cloned repository is good, we run "rev-list --objects --not --all $new_refs" on the repository. This is expensive on large repositories. This patch attempts to mitigate the impact in this special case. In the "good" clone case, we only have one pack. If all of the following are met, we can be sure that all objects reachable from the new refs exist, which is the intention of running "rev-list ...": - all refs point to an object in the pack - there are no dangling pointers in any object in the pack - no objects in the pack point to objects outside the pack The second and third checks can be done with the help of index-pack as a slight variation of --strict check (which introduces a new condition for the shortcut: pack transfer must be used and the number of objects large enough to call index-pack). The first is checked in check_everything_connected after we get an "ok" from index-pack. "index-pack + new checks" is still faster than the current "index-pack + rev-list", which is the whole point of this patch. If any of the conditions fail, we fall back to the good old but expensive "rev-list ..". In that case it's even more expensive because we have to pay for the new checks in index-pack. But that should only happen when the other side is either buggy or malicious. Cloning linux-2.6 over file:// before after real 3m25.693s 2m53.050s user 5m2.037s 4m42.396s sys 0m13.750s 0m16.574s A more realistic test with ssh:// over wireless before after real 11m26.629s 10m4.213s user 5m43.196s 5m19.444s sys 0m35.812s 0m37.630s This shortcut is not applied to shallow clones, partly because shallow clones should have no more objects than a usual fetch and the cost of rev-list is acceptable, partly to avoid dealing with corner cases when grafting is involved. This shortcut does not apply to unpack-objects code path either because the number of objects must be small in order to trigger that code path. Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-05-26 09:16:17 +08:00
if (args->check_self_contained_and_connected)
*av++ = "--check-self-contained-and-connected";
}
else {
*av++ = "unpack-objects";
if (args->quiet || args->no_progress)
*av++ = "-q";
clone: open a shortcut for connectivity check In order to make sure the cloned repository is good, we run "rev-list --objects --not --all $new_refs" on the repository. This is expensive on large repositories. This patch attempts to mitigate the impact in this special case. In the "good" clone case, we only have one pack. If all of the following are met, we can be sure that all objects reachable from the new refs exist, which is the intention of running "rev-list ...": - all refs point to an object in the pack - there are no dangling pointers in any object in the pack - no objects in the pack point to objects outside the pack The second and third checks can be done with the help of index-pack as a slight variation of --strict check (which introduces a new condition for the shortcut: pack transfer must be used and the number of objects large enough to call index-pack). The first is checked in check_everything_connected after we get an "ok" from index-pack. "index-pack + new checks" is still faster than the current "index-pack + rev-list", which is the whole point of this patch. If any of the conditions fail, we fall back to the good old but expensive "rev-list ..". In that case it's even more expensive because we have to pay for the new checks in index-pack. But that should only happen when the other side is either buggy or malicious. Cloning linux-2.6 over file:// before after real 3m25.693s 2m53.050s user 5m2.037s 4m42.396s sys 0m13.750s 0m16.574s A more realistic test with ssh:// over wireless before after real 11m26.629s 10m4.213s user 5m43.196s 5m19.444s sys 0m35.812s 0m37.630s This shortcut is not applied to shallow clones, partly because shallow clones should have no more objects than a usual fetch and the cost of rev-list is acceptable, partly to avoid dealing with corner cases when grafting is involved. This shortcut does not apply to unpack-objects code path either because the number of objects must be small in order to trigger that code path. Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-05-26 09:16:17 +08:00
args->check_self_contained_and_connected = 0;
}
if (*hdr_arg)
*av++ = hdr_arg;
if (fetch_fsck_objects >= 0
? fetch_fsck_objects
: transfer_fsck_objects >= 0
? transfer_fsck_objects
: 0)
*av++ = "--strict";
*av++ = NULL;
cmd.in = demux.out;
cmd.git_cmd = 1;
if (start_command(&cmd))
die("fetch-pack: unable to fork off %s", argv[0]);
if (do_keep && pack_lockfile) {
*pack_lockfile = index_pack_lockfile(cmd.out);
close(cmd.out);
}
clone: open a shortcut for connectivity check In order to make sure the cloned repository is good, we run "rev-list --objects --not --all $new_refs" on the repository. This is expensive on large repositories. This patch attempts to mitigate the impact in this special case. In the "good" clone case, we only have one pack. If all of the following are met, we can be sure that all objects reachable from the new refs exist, which is the intention of running "rev-list ...": - all refs point to an object in the pack - there are no dangling pointers in any object in the pack - no objects in the pack point to objects outside the pack The second and third checks can be done with the help of index-pack as a slight variation of --strict check (which introduces a new condition for the shortcut: pack transfer must be used and the number of objects large enough to call index-pack). The first is checked in check_everything_connected after we get an "ok" from index-pack. "index-pack + new checks" is still faster than the current "index-pack + rev-list", which is the whole point of this patch. If any of the conditions fail, we fall back to the good old but expensive "rev-list ..". In that case it's even more expensive because we have to pay for the new checks in index-pack. But that should only happen when the other side is either buggy or malicious. Cloning linux-2.6 over file:// before after real 3m25.693s 2m53.050s user 5m2.037s 4m42.396s sys 0m13.750s 0m16.574s A more realistic test with ssh:// over wireless before after real 11m26.629s 10m4.213s user 5m43.196s 5m19.444s sys 0m35.812s 0m37.630s This shortcut is not applied to shallow clones, partly because shallow clones should have no more objects than a usual fetch and the cost of rev-list is acceptable, partly to avoid dealing with corner cases when grafting is involved. This shortcut does not apply to unpack-objects code path either because the number of objects must be small in order to trigger that code path. Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-05-26 09:16:17 +08:00
ret = finish_command(&cmd);
if (!ret || (args->check_self_contained_and_connected && ret == 1))
args->self_contained_and_connected =
args->check_self_contained_and_connected &&
ret == 0;
else
die("%s failed", argv[0]);
if (use_sideband && finish_async(&demux))
die("error in sideband demultiplexer");
return 0;
}
static int cmp_ref_by_name(const void *a_, const void *b_)
{
const struct ref *a = *((const struct ref **)a_);
const struct ref *b = *((const struct ref **)b_);
return strcmp(a->name, b->name);
}
static void setup_alternate_shallow(void)
{
struct strbuf sb = STRBUF_INIT;
int fd;
check_shallow_file_for_update();
fd = hold_lock_file_for_update(&shallow_lock, git_path("shallow"),
LOCK_DIE_ON_ERROR);
if (write_shallow_commits(&sb, 0)) {
if (write_in_full(fd, sb.buf, sb.len) != sb.len)
die_errno("failed to write to %s", shallow_lock.filename);
alternate_shallow_file = shallow_lock.filename;
} else
/*
* is_repository_shallow() sees empty string as "no
* shallow file".
*/
alternate_shallow_file = "";
strbuf_release(&sb);
}
static struct ref *do_fetch_pack(struct fetch_pack_args *args,
int fd[2],
const struct ref *orig_ref,
struct ref **sought, int nr_sought,
char **pack_lockfile)
{
struct ref *ref = copy_ref_list(orig_ref);
unsigned char sha1[20];
const char *agent_feature;
int agent_len;
sort_ref_list(&ref, ref_compare_name);
qsort(sought, nr_sought, sizeof(*sought), cmp_ref_by_name);
if (is_repository_shallow() && !server_supports("shallow"))
die("Server does not support shallow clients");
if (server_supports("multi_ack_detailed")) {
if (args->verbose)
fprintf(stderr, "Server supports multi_ack_detailed\n");
multi_ack = 2;
if (server_supports("no-done")) {
if (args->verbose)
fprintf(stderr, "Server supports no-done\n");
if (args->stateless_rpc)
no_done = 1;
}
}
else if (server_supports("multi_ack")) {
if (args->verbose)
fprintf(stderr, "Server supports multi_ack\n");
multi_ack = 1;
}
if (server_supports("side-band-64k")) {
if (args->verbose)
fprintf(stderr, "Server supports side-band-64k\n");
use_sideband = 2;
}
else if (server_supports("side-band")) {
if (args->verbose)
fprintf(stderr, "Server supports side-band\n");
use_sideband = 1;
}
if (server_supports("allow-tip-sha1-in-want")) {
if (args->verbose)
fprintf(stderr, "Server supports allow-tip-sha1-in-want\n");
allow_tip_sha1_in_want = 1;
}
if (!server_supports("thin-pack"))
args->use_thin_pack = 0;
if (!server_supports("no-progress"))
args->no_progress = 0;
if (!server_supports("include-tag"))
args->include_tag = 0;
if (server_supports("ofs-delta")) {
if (args->verbose)
fprintf(stderr, "Server supports ofs-delta\n");
} else
prefer_ofs_delta = 0;
if ((agent_feature = server_feature_value("agent", &agent_len))) {
agent_supported = 1;
if (args->verbose && agent_len)
fprintf(stderr, "Server version is %.*s\n",
agent_len, agent_feature);
}
if (everything_local(args, &ref, sought, nr_sought)) {
packet_flush(fd[1]);
goto all_done;
}
if (find_common(args, fd, sha1, ref) < 0)
if (!args->keep_pack)
/* When cloning, it is not unusual to have
* no common commit.
*/
warning("no common commits");
if (args->stateless_rpc)
packet_flush(fd[1]);
if (args->depth > 0)
setup_alternate_shallow();
else
alternate_shallow_file = NULL;
if (get_pack(args, fd, pack_lockfile))
die("git fetch-pack: fetch failed.");
all_done:
return ref;
}
static int fetch_pack_config(const char *var, const char *value, void *cb)
{
if (strcmp(var, "fetch.unpacklimit") == 0) {
fetch_unpack_limit = git_config_int(var, value);
return 0;
}
if (strcmp(var, "transfer.unpacklimit") == 0) {
transfer_unpack_limit = git_config_int(var, value);
return 0;
}
if (strcmp(var, "repack.usedeltabaseoffset") == 0) {
prefer_ofs_delta = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "fetch.fsckobjects")) {
fetch_fsck_objects = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "transfer.fsckobjects")) {
transfer_fsck_objects = git_config_bool(var, value);
return 0;
}
return git_default_config(var, value, cb);
}
static void fetch_pack_setup(void)
{
static int did_setup;
if (did_setup)
return;
git_config(fetch_pack_config, NULL);
if (0 <= transfer_unpack_limit)
unpack_limit = transfer_unpack_limit;
else if (0 <= fetch_unpack_limit)
unpack_limit = fetch_unpack_limit;
did_setup = 1;
}
static int remove_duplicates_in_refs(struct ref **ref, int nr)
{
struct string_list names = STRING_LIST_INIT_NODUP;
int src, dst;
for (src = dst = 0; src < nr; src++) {
struct string_list_item *item;
item = string_list_insert(&names, ref[src]->name);
if (item->util)
continue; /* already have it */
item->util = ref[src];
if (src != dst)
ref[dst] = ref[src];
dst++;
}
for (src = dst; src < nr; src++)
ref[src] = NULL;
string_list_clear(&names, 0);
return dst;
}
struct ref *fetch_pack(struct fetch_pack_args *args,
int fd[], struct child_process *conn,
const struct ref *ref,
const char *dest,
struct ref **sought, int nr_sought,
char **pack_lockfile)
{
struct ref *ref_cpy;
fetch_pack_setup();
if (nr_sought)
nr_sought = remove_duplicates_in_refs(sought, nr_sought);
if (!ref) {
packet_flush(fd[1]);
die("no matching remote head");
}
ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought, pack_lockfile);
if (args->depth > 0 && alternate_shallow_file) {
if (*alternate_shallow_file == '\0') { /* --unshallow */
unlink_or_warn(git_path("shallow"));
rollback_lock_file(&shallow_lock);
} else
commit_lock_file(&shallow_lock);
}
reprepare_packed_git();
return ref_cpy;
}