git/upload-pack.c

754 lines
18 KiB
C
Raw Normal View History

#include "cache.h"
#include "refs.h"
#include "pkt-line.h"
#include "sideband.h"
#include "tag.h"
#include "object.h"
#include "commit.h"
#include "exec_cmd.h"
#include "diff.h"
#include "revision.h"
#include "list-objects.h"
#include "run-command.h"
static const char upload_pack_usage[] = "git upload-pack [--strict] [--timeout=<n>] <dir>";
/* bits #0..7 in revision.h, #8..10 in commit.c */
#define THEY_HAVE (1u << 11)
#define OUR_REF (1u << 12)
#define WANTED (1u << 13)
#define COMMON_KNOWN (1u << 14)
#define REACHABLE (1u << 15)
#define SHALLOW (1u << 16)
#define NOT_SHALLOW (1u << 17)
#define CLIENT_SHALLOW (1u << 18)
static unsigned long oldest_have;
static int multi_ack, nr_our_refs;
static int no_done;
static int use_thin_pack, use_ofs_delta, use_include_tag;
static int no_progress, daemon_mode;
static int shallow_nr;
static struct object_array have_obj;
static struct object_array want_obj;
static struct object_array extra_edge_obj;
static unsigned int timeout;
/* 0 for no sideband,
* otherwise maximum packet size (up to 65520 bytes).
*/
static int use_sideband;
static int debug_fd;
static int advertise_refs;
static int stateless_rpc;
static void reset_timeout(void)
{
alarm(timeout);
}
static int strip(char *line, int len)
{
if (len && line[len-1] == '\n')
line[--len] = 0;
return len;
}
static ssize_t send_client_data(int fd, const char *data, ssize_t sz)
{
if (use_sideband)
return send_sideband(1, fd, data, sz, use_sideband);
if (fd == 3)
/* emergency quit */
fd = 2;
if (fd == 2) {
/* XXX: are we happy to lose stuff here? */
xwrite(fd, data, sz);
return sz;
}
return safe_write(fd, data, sz);
}
static FILE *pack_pipe = NULL;
static void show_commit(struct commit *commit, void *data)
{
if (commit->object.flags & BOUNDARY)
fputc('-', pack_pipe);
if (fputs(sha1_to_hex(commit->object.sha1), pack_pipe) < 0)
die("broken output pipe");
fputc('\n', pack_pipe);
fflush(pack_pipe);
free(commit->buffer);
commit->buffer = NULL;
}
show_object(): push path_name() call further down In particular, pushing the "path_name()" call _into_ the show() function would seem to allow - more clarity into who "owns" the name (ie now when we free the name in the show_object callback, it's because we generated it ourselves by calling path_name()) - not calling path_name() at all, either because we don't care about the name in the first place, or because we are actually happy walking the linked list of "struct name_path *" and the last component. Now, I didn't do that latter optimization, because it would require some more coding, but especially looking at "builtin-pack-objects.c", we really don't even want the whole pathname, we really would be better off with the list of path components. Why? We use that name for two things: - add_preferred_base_object(), which actually _wants_ to traverse the path, and now does it by looking for '/' characters! - for 'name_hash()', which only cares about the last 16 characters of a name, so again, generating the full name seems to be just unnecessary work. Anyway, so I didn't look any closer at those things, but it did convince me that the "show_object()" calling convention was crazy, and we're actually better off doing _less_ in list-objects.c, and giving people access to the internal data structures so that they can decide whether they want to generate a path-name or not. This patch does that, and then for people who did use the name (even if they might do something more clever in the future), it just does the straightforward "name = path_name(path, component); .. free(name);" thing. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-04-11 09:15:26 +08:00
static void show_object(struct object *obj, const struct name_path *path, const char *component)
{
/* An object with name "foo\n0000000..." can be used to
* confuse downstream git-pack-objects very badly.
*/
show_object(): push path_name() call further down In particular, pushing the "path_name()" call _into_ the show() function would seem to allow - more clarity into who "owns" the name (ie now when we free the name in the show_object callback, it's because we generated it ourselves by calling path_name()) - not calling path_name() at all, either because we don't care about the name in the first place, or because we are actually happy walking the linked list of "struct name_path *" and the last component. Now, I didn't do that latter optimization, because it would require some more coding, but especially looking at "builtin-pack-objects.c", we really don't even want the whole pathname, we really would be better off with the list of path components. Why? We use that name for two things: - add_preferred_base_object(), which actually _wants_ to traverse the path, and now does it by looking for '/' characters! - for 'name_hash()', which only cares about the last 16 characters of a name, so again, generating the full name seems to be just unnecessary work. Anyway, so I didn't look any closer at those things, but it did convince me that the "show_object()" calling convention was crazy, and we're actually better off doing _less_ in list-objects.c, and giving people access to the internal data structures so that they can decide whether they want to generate a path-name or not. This patch does that, and then for people who did use the name (even if they might do something more clever in the future), it just does the straightforward "name = path_name(path, component); .. free(name);" thing. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-04-11 09:15:26 +08:00
const char *name = path_name(path, component);
process_{tree,blob}: show objects without buffering Here's a less trivial thing, and slightly more dubious one. I was looking at that "struct object_array objects", and wondering why we do that. I have honestly totally forgotten. Why not just call the "show()" function as we encounter the objects? Rather than add the objects to the object_array, and then at the very end going through the array and doing a 'show' on all, just do things more incrementally. Now, there are possible downsides to this: - the "buffer using object_array" _can_ in theory result in at least better I-cache usage (two tight loops rather than one more spread out one). I don't think this is a real issue, but in theory.. - this _does_ change the order of the objects printed. Instead of doing a "process_tree(revs, commit->tree, &objects, NULL, "");" in the loop over the commits (which puts all the root trees _first_ in the object list, this patch just adds them to the list of pending objects, and then we'll traverse them in that order (and thus show each root tree object together with the objects we discover under it) I _think_ the new ordering actually makes more sense, but the object ordering is actually a subtle thing when it comes to packing efficiency, so any change in order is going to have implications for packing. Good or bad, I dunno. - There may be some reason why we did it that odd way with the object array, that I have simply forgotten. Anyway, now that we don't buffer up the objects before showing them that may actually result in lower memory usage during that whole traverse_commit_list() phase. This is seriously not very deeply tested. It makes sense to me, it seems to pass all the tests, it looks ok, but... Does anybody remember why we did that "object_array" thing? It used to be an "object_list" a long long time ago, but got changed into the array due to better memory usage patterns (those linked lists of obejcts are horrible from a memory allocation standpoint). But I wonder why we didn't do this back then. Maybe there's a reason for it. Or maybe there _used_ to be a reason, and no longer is. Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-04-11 08:27:58 +08:00
const char *ep = strchr(name, '\n');
if (ep) {
process_{tree,blob}: show objects without buffering Here's a less trivial thing, and slightly more dubious one. I was looking at that "struct object_array objects", and wondering why we do that. I have honestly totally forgotten. Why not just call the "show()" function as we encounter the objects? Rather than add the objects to the object_array, and then at the very end going through the array and doing a 'show' on all, just do things more incrementally. Now, there are possible downsides to this: - the "buffer using object_array" _can_ in theory result in at least better I-cache usage (two tight loops rather than one more spread out one). I don't think this is a real issue, but in theory.. - this _does_ change the order of the objects printed. Instead of doing a "process_tree(revs, commit->tree, &objects, NULL, "");" in the loop over the commits (which puts all the root trees _first_ in the object list, this patch just adds them to the list of pending objects, and then we'll traverse them in that order (and thus show each root tree object together with the objects we discover under it) I _think_ the new ordering actually makes more sense, but the object ordering is actually a subtle thing when it comes to packing efficiency, so any change in order is going to have implications for packing. Good or bad, I dunno. - There may be some reason why we did it that odd way with the object array, that I have simply forgotten. Anyway, now that we don't buffer up the objects before showing them that may actually result in lower memory usage during that whole traverse_commit_list() phase. This is seriously not very deeply tested. It makes sense to me, it seems to pass all the tests, it looks ok, but... Does anybody remember why we did that "object_array" thing? It used to be an "object_list" a long long time ago, but got changed into the array due to better memory usage patterns (those linked lists of obejcts are horrible from a memory allocation standpoint). But I wonder why we didn't do this back then. Maybe there's a reason for it. Or maybe there _used_ to be a reason, and no longer is. Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-04-11 08:27:58 +08:00
fprintf(pack_pipe, "%s %.*s\n", sha1_to_hex(obj->sha1),
(int) (ep - name),
name);
}
else
fprintf(pack_pipe, "%s %s\n",
process_{tree,blob}: show objects without buffering Here's a less trivial thing, and slightly more dubious one. I was looking at that "struct object_array objects", and wondering why we do that. I have honestly totally forgotten. Why not just call the "show()" function as we encounter the objects? Rather than add the objects to the object_array, and then at the very end going through the array and doing a 'show' on all, just do things more incrementally. Now, there are possible downsides to this: - the "buffer using object_array" _can_ in theory result in at least better I-cache usage (two tight loops rather than one more spread out one). I don't think this is a real issue, but in theory.. - this _does_ change the order of the objects printed. Instead of doing a "process_tree(revs, commit->tree, &objects, NULL, "");" in the loop over the commits (which puts all the root trees _first_ in the object list, this patch just adds them to the list of pending objects, and then we'll traverse them in that order (and thus show each root tree object together with the objects we discover under it) I _think_ the new ordering actually makes more sense, but the object ordering is actually a subtle thing when it comes to packing efficiency, so any change in order is going to have implications for packing. Good or bad, I dunno. - There may be some reason why we did it that odd way with the object array, that I have simply forgotten. Anyway, now that we don't buffer up the objects before showing them that may actually result in lower memory usage during that whole traverse_commit_list() phase. This is seriously not very deeply tested. It makes sense to me, it seems to pass all the tests, it looks ok, but... Does anybody remember why we did that "object_array" thing? It used to be an "object_list" a long long time ago, but got changed into the array due to better memory usage patterns (those linked lists of obejcts are horrible from a memory allocation standpoint). But I wonder why we didn't do this back then. Maybe there's a reason for it. Or maybe there _used_ to be a reason, and no longer is. Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-04-11 08:27:58 +08:00
sha1_to_hex(obj->sha1), name);
show_object(): push path_name() call further down In particular, pushing the "path_name()" call _into_ the show() function would seem to allow - more clarity into who "owns" the name (ie now when we free the name in the show_object callback, it's because we generated it ourselves by calling path_name()) - not calling path_name() at all, either because we don't care about the name in the first place, or because we are actually happy walking the linked list of "struct name_path *" and the last component. Now, I didn't do that latter optimization, because it would require some more coding, but especially looking at "builtin-pack-objects.c", we really don't even want the whole pathname, we really would be better off with the list of path components. Why? We use that name for two things: - add_preferred_base_object(), which actually _wants_ to traverse the path, and now does it by looking for '/' characters! - for 'name_hash()', which only cares about the last 16 characters of a name, so again, generating the full name seems to be just unnecessary work. Anyway, so I didn't look any closer at those things, but it did convince me that the "show_object()" calling convention was crazy, and we're actually better off doing _less_ in list-objects.c, and giving people access to the internal data structures so that they can decide whether they want to generate a path-name or not. This patch does that, and then for people who did use the name (even if they might do something more clever in the future), it just does the straightforward "name = path_name(path, component); .. free(name);" thing. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-04-11 09:15:26 +08:00
free((char *)name);
}
static void show_edge(struct commit *commit)
{
fprintf(pack_pipe, "-%s\n", sha1_to_hex(commit->object.sha1));
}
static int do_rev_list(int in, int out, void *user_data)
{
int i;
struct rev_info revs;
pack_pipe = xfdopen(out, "w");
init_revisions(&revs, NULL);
revs.tag_objects = 1;
revs.tree_objects = 1;
revs.blob_objects = 1;
if (use_thin_pack)
revs.edge_hint = 1;
for (i = 0; i < want_obj.nr; i++) {
struct object *o = want_obj.objects[i].item;
/* why??? */
o->flags &= ~UNINTERESTING;
add_pending_object(&revs, o, NULL);
}
for (i = 0; i < have_obj.nr; i++) {
struct object *o = have_obj.objects[i].item;
o->flags |= UNINTERESTING;
add_pending_object(&revs, o, NULL);
}
setup_revisions(0, NULL, &revs, NULL);
if (prepare_revision_walk(&revs))
die("revision walk setup failed");
mark_edges_uninteresting(revs.commits, &revs, show_edge);
if (use_thin_pack)
for (i = 0; i < extra_edge_obj.nr; i++)
fprintf(pack_pipe, "-%s\n", sha1_to_hex(
extra_edge_obj.objects[i].item->sha1));
traverse_commit_list(&revs, show_commit, show_object, NULL);
fflush(pack_pipe);
fclose(pack_pipe);
return 0;
}
static void create_pack_file(void)
{
struct async rev_list;
struct child_process pack_objects;
int create_full_pack = (nr_our_refs == want_obj.nr && !have_obj.nr);
char data[8193], progress[128];
char abort_msg[] = "aborting due to possible repository "
"corruption on the remote side.";
int buffered = -1;
ssize_t sz;
const char *argv[10];
int arg = 0;
upload-pack: start pack-objects before async rev-list In a pthread-enabled version of upload-pack, there's a race condition that can cause a deadlock on the fflush(NULL) we call from run-command. What happens is this: 1. Upload-pack is informed we are doing a shallow clone. 2. We call start_async() to spawn a thread that will generate rev-list results to feed to pack-objects. It gets a file descriptor to a pipe which will eventually hook to pack-objects. 3. The rev-list thread uses fdopen to create a new output stream around the fd we gave it, called pack_pipe. 4. The thread writes results to pack_pipe. Outside of our control, libc is doing locking on the stream. We keep writing until the OS pipe buffer is full, and then we block in write(), still holding the lock. 5. The main thread now uses start_command to spawn pack-objects. Before forking, it calls fflush(NULL) to flush every stdio output buffer. It blocks trying to get the lock on pack_pipe. And we have a deadlock. The thread will block until somebody starts reading from the pipe. But nobody will read from the pipe until we finish flushing to the pipe. To fix this, we swap the start order: we start the pack-objects reader first, and then the rev-list writer after. Thus the problematic fflush(NULL) happens before we even open the new file descriptor (and even if it didn't, flushing should no longer block, as the reader at the end of the pipe is now active). Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-04-07 05:33:33 +08:00
argv[arg++] = "pack-objects";
if (!shallow_nr) {
argv[arg++] = "--revs";
if (create_full_pack)
argv[arg++] = "--all";
else if (use_thin_pack)
argv[arg++] = "--thin";
}
argv[arg++] = "--stdout";
if (!no_progress)
argv[arg++] = "--progress";
if (use_ofs_delta)
argv[arg++] = "--delta-base-offset";
if (use_include_tag)
argv[arg++] = "--include-tag";
argv[arg++] = NULL;
memset(&pack_objects, 0, sizeof(pack_objects));
upload-pack: start pack-objects before async rev-list In a pthread-enabled version of upload-pack, there's a race condition that can cause a deadlock on the fflush(NULL) we call from run-command. What happens is this: 1. Upload-pack is informed we are doing a shallow clone. 2. We call start_async() to spawn a thread that will generate rev-list results to feed to pack-objects. It gets a file descriptor to a pipe which will eventually hook to pack-objects. 3. The rev-list thread uses fdopen to create a new output stream around the fd we gave it, called pack_pipe. 4. The thread writes results to pack_pipe. Outside of our control, libc is doing locking on the stream. We keep writing until the OS pipe buffer is full, and then we block in write(), still holding the lock. 5. The main thread now uses start_command to spawn pack-objects. Before forking, it calls fflush(NULL) to flush every stdio output buffer. It blocks trying to get the lock on pack_pipe. And we have a deadlock. The thread will block until somebody starts reading from the pipe. But nobody will read from the pipe until we finish flushing to the pipe. To fix this, we swap the start order: we start the pack-objects reader first, and then the rev-list writer after. Thus the problematic fflush(NULL) happens before we even open the new file descriptor (and even if it didn't, flushing should no longer block, as the reader at the end of the pipe is now active). Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-04-07 05:33:33 +08:00
pack_objects.in = -1;
pack_objects.out = -1;
pack_objects.err = -1;
pack_objects.git_cmd = 1;
pack_objects.argv = argv;
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-05 03:46:48 +08:00
if (start_command(&pack_objects))
die("git upload-pack: unable to fork git-pack-objects");
upload-pack: start pack-objects before async rev-list In a pthread-enabled version of upload-pack, there's a race condition that can cause a deadlock on the fflush(NULL) we call from run-command. What happens is this: 1. Upload-pack is informed we are doing a shallow clone. 2. We call start_async() to spawn a thread that will generate rev-list results to feed to pack-objects. It gets a file descriptor to a pipe which will eventually hook to pack-objects. 3. The rev-list thread uses fdopen to create a new output stream around the fd we gave it, called pack_pipe. 4. The thread writes results to pack_pipe. Outside of our control, libc is doing locking on the stream. We keep writing until the OS pipe buffer is full, and then we block in write(), still holding the lock. 5. The main thread now uses start_command to spawn pack-objects. Before forking, it calls fflush(NULL) to flush every stdio output buffer. It blocks trying to get the lock on pack_pipe. And we have a deadlock. The thread will block until somebody starts reading from the pipe. But nobody will read from the pipe until we finish flushing to the pipe. To fix this, we swap the start order: we start the pack-objects reader first, and then the rev-list writer after. Thus the problematic fflush(NULL) happens before we even open the new file descriptor (and even if it didn't, flushing should no longer block, as the reader at the end of the pipe is now active). Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-04-07 05:33:33 +08:00
if (shallow_nr) {
memset(&rev_list, 0, sizeof(rev_list));
rev_list.proc = do_rev_list;
rev_list.out = pack_objects.in;
if (start_async(&rev_list))
die("git upload-pack: unable to fork git-rev-list");
}
else {
FILE *pipe_fd = xfdopen(pack_objects.in, "w");
if (!create_full_pack) {
int i;
for (i = 0; i < want_obj.nr; i++)
fprintf(pipe_fd, "%s\n", sha1_to_hex(want_obj.objects[i].item->sha1));
fprintf(pipe_fd, "--not\n");
for (i = 0; i < have_obj.nr; i++)
fprintf(pipe_fd, "%s\n", sha1_to_hex(have_obj.objects[i].item->sha1));
}
fprintf(pipe_fd, "\n");
fflush(pipe_fd);
fclose(pipe_fd);
}
/* We read from pack_objects.err to capture stderr output for
* progress bar, and pack_objects.out to capture the pack data.
*/
while (1) {
struct pollfd pfd[2];
int pe, pu, pollsize;
reset_timeout();
pollsize = 0;
pe = pu = -1;
if (0 <= pack_objects.out) {
pfd[pollsize].fd = pack_objects.out;
pfd[pollsize].events = POLLIN;
pu = pollsize;
pollsize++;
}
if (0 <= pack_objects.err) {
pfd[pollsize].fd = pack_objects.err;
pfd[pollsize].events = POLLIN;
pe = pollsize;
pollsize++;
}
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-05 03:46:48 +08:00
if (!pollsize)
break;
if (poll(pfd, pollsize, -1) < 0) {
if (errno != EINTR) {
error("poll failed, resuming: %s",
strerror(errno));
sleep(1);
}
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-05 03:46:48 +08:00
continue;
}
if (0 <= pe && (pfd[pe].revents & (POLLIN|POLLHUP))) {
/* Status ready; we ship that in the side-band
* or dump to the standard error.
*/
sz = xread(pack_objects.err, progress,
sizeof(progress));
if (0 < sz)
send_client_data(2, progress, sz);
else if (sz == 0) {
close(pack_objects.err);
pack_objects.err = -1;
}
else
goto fail;
/* give priority to status messages */
continue;
}
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-05 03:46:48 +08:00
if (0 <= pu && (pfd[pu].revents & (POLLIN|POLLHUP))) {
/* Data ready; we keep the last byte to ourselves
* in case we detect broken rev-list, so that we
* can leave the stream corrupted. This is
* unfortunate -- unpack-objects would happily
* accept a valid packdata with trailing garbage,
* so appending garbage after we pass all the
* pack data is not good enough to signal
* breakage to downstream.
*/
char *cp = data;
ssize_t outsz = 0;
if (0 <= buffered) {
*cp++ = buffered;
outsz++;
}
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-05 03:46:48 +08:00
sz = xread(pack_objects.out, cp,
sizeof(data) - outsz);
if (0 < sz)
;
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-05 03:46:48 +08:00
else if (sz == 0) {
close(pack_objects.out);
pack_objects.out = -1;
}
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-05 03:46:48 +08:00
else
goto fail;
sz += outsz;
if (1 < sz) {
buffered = data[sz-1] & 0xFF;
sz--;
}
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-05 03:46:48 +08:00
else
buffered = -1;
sz = send_client_data(1, data, sz);
if (sz < 0)
goto fail;
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-05 03:46:48 +08:00
}
}
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-05 03:46:48 +08:00
if (finish_command(&pack_objects)) {
error("git upload-pack: git-pack-objects died with error.");
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-05 03:46:48 +08:00
goto fail;
}
if (shallow_nr && finish_async(&rev_list))
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-05 03:46:48 +08:00
goto fail; /* error was already reported */
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-05 03:46:48 +08:00
/* flush the data */
if (0 <= buffered) {
data[0] = buffered;
sz = send_client_data(1, data, 1);
if (sz < 0)
goto fail;
fprintf(stderr, "flushed.\n");
}
upload-pack: Use finish_{command,async}() instead of waitpid(). upload-pack spawns two processes, rev-list and pack-objects, and carefully monitors their status so that it can report failure to the remote end. This change removes the complicated procedures on the grounds of the following observations: - If everything is OK, rev-list closes its output pipe end, upon which pack-objects (which reads from the pipe) sees EOF and terminates itself, closing its output (and error) pipes. upload-pack reads from both until it sees EOF in both. It collects the exit codes of the child processes (which indicate success) and terminates successfully. - If rev-list sees an error, it closes its output and terminates with failure. pack-objects sees EOF in its input and terminates successfully. Again upload-pack reads its inputs until EOF. When it now collects the exit codes of its child processes, it notices the failure of rev-list and signals failure to the remote end. - If pack-objects sees an error, it terminates with failure. Since this breaks the pipe to rev-list, rev-list is killed with SIGPIPE. upload-pack reads its input until EOF, then collects the exit codes of the child processes, notices their failures, and signals failure to the remote end. - If upload-pack itself dies unexpectedly, pack-objects is killed with SIGPIPE, and subsequently also rev-list. The upshot of this is that precise monitoring of child processes is not required because both terminate if either one of them dies unexpectedly. This allows us to use finish_command() and finish_async() instead of an explicit waitpid(2) call. The change is smaller than it looks because most of it only reduces the indentation of a large part of the inner loop. Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-05 03:46:48 +08:00
if (use_sideband)
packet_flush(1);
return;
fail:
send_client_data(3, abort_msg, sizeof(abort_msg));
die("git upload-pack: %s", abort_msg);
}
static int got_sha1(char *hex, unsigned char *sha1)
{
struct object *o;
int we_knew_they_have = 0;
if (get_sha1_hex(hex, sha1))
die("git upload-pack: expected SHA1 object, got '%s'", hex);
if (!has_sha1_file(sha1))
return -1;
o = lookup_object(sha1);
if (!(o && o->parsed))
o = parse_object(sha1);
if (!o)
die("oops (%s)", sha1_to_hex(sha1));
2006-08-13 13:16:51 +08:00
if (o->type == OBJ_COMMIT) {
struct commit_list *parents;
struct commit *commit = (struct commit *)o;
if (o->flags & THEY_HAVE)
we_knew_they_have = 1;
else
o->flags |= THEY_HAVE;
if (!oldest_have || (commit->date < oldest_have))
oldest_have = commit->date;
for (parents = commit->parents;
parents;
parents = parents->next)
parents->item->object.flags |= THEY_HAVE;
}
if (!we_knew_they_have) {
add_object_array(o, NULL, &have_obj);
return 1;
}
return 0;
}
static int reachable(struct commit *want)
{
struct commit_list *work = NULL;
commit_list_insert_by_date(want, &work);
while (work) {
struct commit_list *list = work->next;
struct commit *commit = work->item;
free(work);
work = list;
if (commit->object.flags & THEY_HAVE) {
want->object.flags |= COMMON_KNOWN;
break;
}
if (!commit->object.parsed)
parse_object(commit->object.sha1);
if (commit->object.flags & REACHABLE)
continue;
commit->object.flags |= REACHABLE;
if (commit->date < oldest_have)
continue;
for (list = commit->parents; list; list = list->next) {
struct commit *parent = list->item;
if (!(parent->object.flags & REACHABLE))
commit_list_insert_by_date(parent, &work);
}
}
want->object.flags |= REACHABLE;
clear_commit_marks(want, REACHABLE);
free_commit_list(work);
return (want->object.flags & COMMON_KNOWN);
}
static int ok_to_give_up(void)
{
int i;
if (!have_obj.nr)
return 0;
for (i = 0; i < want_obj.nr; i++) {
struct object *want = want_obj.objects[i].item;
if (want->flags & COMMON_KNOWN)
continue;
want = deref_tag(want, "a want line", 0);
if (!want || want->type != OBJ_COMMIT) {
/* no way to tell if this is reachable by
* looking at the ancestry chain alone, so
* leave a note to ourselves not to worry about
* this object anymore.
*/
want_obj.objects[i].item->flags |= COMMON_KNOWN;
continue;
}
if (!reachable((struct commit *)want))
return 0;
}
return 1;
}
static int get_common_commits(void)
{
static char line[1000];
unsigned char sha1[20];
Add multi_ack_detailed capability to fetch-pack/upload-pack When multi_ack_detailed is enabled the ACK continue messages returned by the remote upload-pack are broken out to describe the different states within the peer. This permits the client to better understand the server's in-memory state. The fetch-pack/upload-pack protocol now looks like: NAK --------------------------------- Always sent in response to "done" if there was no common base selected from the "have" lines (or no have lines were sent). * no multi_ack or multi_ack_detailed: Sent when the client has sent a pkt-line flush ("0000") and the server has not yet found a common base object. * either multi_ack or multi_ack_detailed: Always sent in response to a pkt-line flush. ACK %s ----------------------------------- * no multi_ack or multi_ack_detailed: Sent in response to "have" when the object exists on the remote side and is therefore an object in common between the peers. The argument is the SHA-1 of the common object. * either multi_ack or multi_ack_detailed: Sent in response to "done" if there are common objects. The argument is the last SHA-1 determined to be common. ACK %s continue ----------------------------------- * multi_ack only: Sent in response to "have". The remote side wants the client to consider this object as common, and immediately stop transmitting additional "have" lines for objects that are reachable from it. The reason the client should stop is not given, but is one of the two cases below available under multi_ack_detailed. ACK %s common ----------------------------------- * multi_ack_detailed only: Sent in response to "have". Both sides have this object. Like with "ACK %s continue" above the client should stop sending have lines reachable for objects from the argument. ACK %s ready ----------------------------------- * multi_ack_detailed only: Sent in response to "have". The client should stop transmitting objects which are reachable from the argument, and send "done" soon to get the objects. If the remote side has the specified object, it should first send an "ACK %s common" message prior to sending "ACK %s ready". Clients may still submit additional "have" lines if there are more side branches for the client to explore that might be added to the common set and reduce the number of objects to transfer. Signed-off-by: Shawn O. Pearce <spearce@spearce.org> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-10-31 08:47:25 +08:00
char last_hex[41];
int got_common = 0;
int got_other = 0;
int sent_ready = 0;
save_commit_buffer = 0;
for (;;) {
int len = packet_read_line(0, line, sizeof(line));
reset_timeout();
if (!len) {
if (multi_ack == 2 && got_common
&& !got_other && ok_to_give_up()) {
sent_ready = 1;
packet_write(1, "ACK %s ready\n", last_hex);
}
if (have_obj.nr == 0 || multi_ack)
packet_write(1, "NAK\n");
if (no_done && sent_ready) {
packet_write(1, "ACK %s\n", last_hex);
return 0;
}
if (stateless_rpc)
exit(0);
got_common = 0;
got_other = 0;
continue;
}
strip(line, len);
if (!prefixcmp(line, "have ")) {
switch (got_sha1(line+5, sha1)) {
case -1: /* they have what we do not */
got_other = 1;
Add multi_ack_detailed capability to fetch-pack/upload-pack When multi_ack_detailed is enabled the ACK continue messages returned by the remote upload-pack are broken out to describe the different states within the peer. This permits the client to better understand the server's in-memory state. The fetch-pack/upload-pack protocol now looks like: NAK --------------------------------- Always sent in response to "done" if there was no common base selected from the "have" lines (or no have lines were sent). * no multi_ack or multi_ack_detailed: Sent when the client has sent a pkt-line flush ("0000") and the server has not yet found a common base object. * either multi_ack or multi_ack_detailed: Always sent in response to a pkt-line flush. ACK %s ----------------------------------- * no multi_ack or multi_ack_detailed: Sent in response to "have" when the object exists on the remote side and is therefore an object in common between the peers. The argument is the SHA-1 of the common object. * either multi_ack or multi_ack_detailed: Sent in response to "done" if there are common objects. The argument is the last SHA-1 determined to be common. ACK %s continue ----------------------------------- * multi_ack only: Sent in response to "have". The remote side wants the client to consider this object as common, and immediately stop transmitting additional "have" lines for objects that are reachable from it. The reason the client should stop is not given, but is one of the two cases below available under multi_ack_detailed. ACK %s common ----------------------------------- * multi_ack_detailed only: Sent in response to "have". Both sides have this object. Like with "ACK %s continue" above the client should stop sending have lines reachable for objects from the argument. ACK %s ready ----------------------------------- * multi_ack_detailed only: Sent in response to "have". The client should stop transmitting objects which are reachable from the argument, and send "done" soon to get the objects. If the remote side has the specified object, it should first send an "ACK %s common" message prior to sending "ACK %s ready". Clients may still submit additional "have" lines if there are more side branches for the client to explore that might be added to the common set and reduce the number of objects to transfer. Signed-off-by: Shawn O. Pearce <spearce@spearce.org> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-10-31 08:47:25 +08:00
if (multi_ack && ok_to_give_up()) {
const char *hex = sha1_to_hex(sha1);
if (multi_ack == 2) {
sent_ready = 1;
Add multi_ack_detailed capability to fetch-pack/upload-pack When multi_ack_detailed is enabled the ACK continue messages returned by the remote upload-pack are broken out to describe the different states within the peer. This permits the client to better understand the server's in-memory state. The fetch-pack/upload-pack protocol now looks like: NAK --------------------------------- Always sent in response to "done" if there was no common base selected from the "have" lines (or no have lines were sent). * no multi_ack or multi_ack_detailed: Sent when the client has sent a pkt-line flush ("0000") and the server has not yet found a common base object. * either multi_ack or multi_ack_detailed: Always sent in response to a pkt-line flush. ACK %s ----------------------------------- * no multi_ack or multi_ack_detailed: Sent in response to "have" when the object exists on the remote side and is therefore an object in common between the peers. The argument is the SHA-1 of the common object. * either multi_ack or multi_ack_detailed: Sent in response to "done" if there are common objects. The argument is the last SHA-1 determined to be common. ACK %s continue ----------------------------------- * multi_ack only: Sent in response to "have". The remote side wants the client to consider this object as common, and immediately stop transmitting additional "have" lines for objects that are reachable from it. The reason the client should stop is not given, but is one of the two cases below available under multi_ack_detailed. ACK %s common ----------------------------------- * multi_ack_detailed only: Sent in response to "have". Both sides have this object. Like with "ACK %s continue" above the client should stop sending have lines reachable for objects from the argument. ACK %s ready ----------------------------------- * multi_ack_detailed only: Sent in response to "have". The client should stop transmitting objects which are reachable from the argument, and send "done" soon to get the objects. If the remote side has the specified object, it should first send an "ACK %s common" message prior to sending "ACK %s ready". Clients may still submit additional "have" lines if there are more side branches for the client to explore that might be added to the common set and reduce the number of objects to transfer. Signed-off-by: Shawn O. Pearce <spearce@spearce.org> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-10-31 08:47:25 +08:00
packet_write(1, "ACK %s ready\n", hex);
} else
Add multi_ack_detailed capability to fetch-pack/upload-pack When multi_ack_detailed is enabled the ACK continue messages returned by the remote upload-pack are broken out to describe the different states within the peer. This permits the client to better understand the server's in-memory state. The fetch-pack/upload-pack protocol now looks like: NAK --------------------------------- Always sent in response to "done" if there was no common base selected from the "have" lines (or no have lines were sent). * no multi_ack or multi_ack_detailed: Sent when the client has sent a pkt-line flush ("0000") and the server has not yet found a common base object. * either multi_ack or multi_ack_detailed: Always sent in response to a pkt-line flush. ACK %s ----------------------------------- * no multi_ack or multi_ack_detailed: Sent in response to "have" when the object exists on the remote side and is therefore an object in common between the peers. The argument is the SHA-1 of the common object. * either multi_ack or multi_ack_detailed: Sent in response to "done" if there are common objects. The argument is the last SHA-1 determined to be common. ACK %s continue ----------------------------------- * multi_ack only: Sent in response to "have". The remote side wants the client to consider this object as common, and immediately stop transmitting additional "have" lines for objects that are reachable from it. The reason the client should stop is not given, but is one of the two cases below available under multi_ack_detailed. ACK %s common ----------------------------------- * multi_ack_detailed only: Sent in response to "have". Both sides have this object. Like with "ACK %s continue" above the client should stop sending have lines reachable for objects from the argument. ACK %s ready ----------------------------------- * multi_ack_detailed only: Sent in response to "have". The client should stop transmitting objects which are reachable from the argument, and send "done" soon to get the objects. If the remote side has the specified object, it should first send an "ACK %s common" message prior to sending "ACK %s ready". Clients may still submit additional "have" lines if there are more side branches for the client to explore that might be added to the common set and reduce the number of objects to transfer. Signed-off-by: Shawn O. Pearce <spearce@spearce.org> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-10-31 08:47:25 +08:00
packet_write(1, "ACK %s continue\n", hex);
}
break;
default:
got_common = 1;
Add multi_ack_detailed capability to fetch-pack/upload-pack When multi_ack_detailed is enabled the ACK continue messages returned by the remote upload-pack are broken out to describe the different states within the peer. This permits the client to better understand the server's in-memory state. The fetch-pack/upload-pack protocol now looks like: NAK --------------------------------- Always sent in response to "done" if there was no common base selected from the "have" lines (or no have lines were sent). * no multi_ack or multi_ack_detailed: Sent when the client has sent a pkt-line flush ("0000") and the server has not yet found a common base object. * either multi_ack or multi_ack_detailed: Always sent in response to a pkt-line flush. ACK %s ----------------------------------- * no multi_ack or multi_ack_detailed: Sent in response to "have" when the object exists on the remote side and is therefore an object in common between the peers. The argument is the SHA-1 of the common object. * either multi_ack or multi_ack_detailed: Sent in response to "done" if there are common objects. The argument is the last SHA-1 determined to be common. ACK %s continue ----------------------------------- * multi_ack only: Sent in response to "have". The remote side wants the client to consider this object as common, and immediately stop transmitting additional "have" lines for objects that are reachable from it. The reason the client should stop is not given, but is one of the two cases below available under multi_ack_detailed. ACK %s common ----------------------------------- * multi_ack_detailed only: Sent in response to "have". Both sides have this object. Like with "ACK %s continue" above the client should stop sending have lines reachable for objects from the argument. ACK %s ready ----------------------------------- * multi_ack_detailed only: Sent in response to "have". The client should stop transmitting objects which are reachable from the argument, and send "done" soon to get the objects. If the remote side has the specified object, it should first send an "ACK %s common" message prior to sending "ACK %s ready". Clients may still submit additional "have" lines if there are more side branches for the client to explore that might be added to the common set and reduce the number of objects to transfer. Signed-off-by: Shawn O. Pearce <spearce@spearce.org> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-10-31 08:47:25 +08:00
memcpy(last_hex, sha1_to_hex(sha1), 41);
if (multi_ack == 2)
packet_write(1, "ACK %s common\n", last_hex);
else if (multi_ack)
packet_write(1, "ACK %s continue\n", last_hex);
else if (have_obj.nr == 1)
Add multi_ack_detailed capability to fetch-pack/upload-pack When multi_ack_detailed is enabled the ACK continue messages returned by the remote upload-pack are broken out to describe the different states within the peer. This permits the client to better understand the server's in-memory state. The fetch-pack/upload-pack protocol now looks like: NAK --------------------------------- Always sent in response to "done" if there was no common base selected from the "have" lines (or no have lines were sent). * no multi_ack or multi_ack_detailed: Sent when the client has sent a pkt-line flush ("0000") and the server has not yet found a common base object. * either multi_ack or multi_ack_detailed: Always sent in response to a pkt-line flush. ACK %s ----------------------------------- * no multi_ack or multi_ack_detailed: Sent in response to "have" when the object exists on the remote side and is therefore an object in common between the peers. The argument is the SHA-1 of the common object. * either multi_ack or multi_ack_detailed: Sent in response to "done" if there are common objects. The argument is the last SHA-1 determined to be common. ACK %s continue ----------------------------------- * multi_ack only: Sent in response to "have". The remote side wants the client to consider this object as common, and immediately stop transmitting additional "have" lines for objects that are reachable from it. The reason the client should stop is not given, but is one of the two cases below available under multi_ack_detailed. ACK %s common ----------------------------------- * multi_ack_detailed only: Sent in response to "have". Both sides have this object. Like with "ACK %s continue" above the client should stop sending have lines reachable for objects from the argument. ACK %s ready ----------------------------------- * multi_ack_detailed only: Sent in response to "have". The client should stop transmitting objects which are reachable from the argument, and send "done" soon to get the objects. If the remote side has the specified object, it should first send an "ACK %s common" message prior to sending "ACK %s ready". Clients may still submit additional "have" lines if there are more side branches for the client to explore that might be added to the common set and reduce the number of objects to transfer. Signed-off-by: Shawn O. Pearce <spearce@spearce.org> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-10-31 08:47:25 +08:00
packet_write(1, "ACK %s\n", last_hex);
break;
}
continue;
}
if (!strcmp(line, "done")) {
if (have_obj.nr > 0) {
if (multi_ack)
packet_write(1, "ACK %s\n", last_hex);
return 0;
}
packet_write(1, "NAK\n");
return -1;
}
die("git upload-pack: expected SHA1 list, got '%s'", line);
}
}
static void receive_needs(void)
{
struct object_array shallows = OBJECT_ARRAY_INIT;
static char line[1000];
int len, depth = 0;
shallow_nr = 0;
if (debug_fd)
write_str_in_full(debug_fd, "#S\n");
for (;;) {
struct object *o;
unsigned char sha1_buf[20];
len = packet_read_line(0, line, sizeof(line));
reset_timeout();
if (!len)
break;
if (debug_fd)
write_in_full(debug_fd, line, len);
if (!prefixcmp(line, "shallow ")) {
unsigned char sha1[20];
struct object *object;
if (get_sha1(line + 8, sha1))
die("invalid shallow line: %s", line);
object = parse_object(sha1);
if (!object)
die("did not find object for %s", line);
object->flags |= CLIENT_SHALLOW;
add_object_array(object, NULL, &shallows);
continue;
}
if (!prefixcmp(line, "deepen ")) {
char *end;
depth = strtol(line + 7, &end, 0);
if (end == line + 7 || depth <= 0)
die("Invalid deepen: %s", line);
continue;
}
if (prefixcmp(line, "want ") ||
get_sha1_hex(line+5, sha1_buf))
die("git upload-pack: protocol error, "
"expected to get sha, not '%s'", line);
Add multi_ack_detailed capability to fetch-pack/upload-pack When multi_ack_detailed is enabled the ACK continue messages returned by the remote upload-pack are broken out to describe the different states within the peer. This permits the client to better understand the server's in-memory state. The fetch-pack/upload-pack protocol now looks like: NAK --------------------------------- Always sent in response to "done" if there was no common base selected from the "have" lines (or no have lines were sent). * no multi_ack or multi_ack_detailed: Sent when the client has sent a pkt-line flush ("0000") and the server has not yet found a common base object. * either multi_ack or multi_ack_detailed: Always sent in response to a pkt-line flush. ACK %s ----------------------------------- * no multi_ack or multi_ack_detailed: Sent in response to "have" when the object exists on the remote side and is therefore an object in common between the peers. The argument is the SHA-1 of the common object. * either multi_ack or multi_ack_detailed: Sent in response to "done" if there are common objects. The argument is the last SHA-1 determined to be common. ACK %s continue ----------------------------------- * multi_ack only: Sent in response to "have". The remote side wants the client to consider this object as common, and immediately stop transmitting additional "have" lines for objects that are reachable from it. The reason the client should stop is not given, but is one of the two cases below available under multi_ack_detailed. ACK %s common ----------------------------------- * multi_ack_detailed only: Sent in response to "have". Both sides have this object. Like with "ACK %s continue" above the client should stop sending have lines reachable for objects from the argument. ACK %s ready ----------------------------------- * multi_ack_detailed only: Sent in response to "have". The client should stop transmitting objects which are reachable from the argument, and send "done" soon to get the objects. If the remote side has the specified object, it should first send an "ACK %s common" message prior to sending "ACK %s ready". Clients may still submit additional "have" lines if there are more side branches for the client to explore that might be added to the common set and reduce the number of objects to transfer. Signed-off-by: Shawn O. Pearce <spearce@spearce.org> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-10-31 08:47:25 +08:00
if (strstr(line+45, "multi_ack_detailed"))
multi_ack = 2;
else if (strstr(line+45, "multi_ack"))
multi_ack = 1;
if (strstr(line+45, "no-done"))
no_done = 1;
if (strstr(line+45, "thin-pack"))
use_thin_pack = 1;
if (strstr(line+45, "ofs-delta"))
use_ofs_delta = 1;
if (strstr(line+45, "side-band-64k"))
use_sideband = LARGE_PACKET_MAX;
else if (strstr(line+45, "side-band"))
use_sideband = DEFAULT_PACKET_MAX;
if (strstr(line+45, "no-progress"))
no_progress = 1;
if (strstr(line+45, "include-tag"))
use_include_tag = 1;
/* We have sent all our refs already, and the other end
* should have chosen out of them; otherwise they are
* asking for nonsense.
*
* Hmph. We may later want to allow "want" line that
* asks for something like "master~10" (symbolic)...
* would it make sense? I don't know.
*/
o = lookup_object(sha1_buf);
if (!o || !(o->flags & OUR_REF))
die("git upload-pack: not our ref %s",
sha1_to_hex(sha1_buf));
if (!(o->flags & WANTED)) {
o->flags |= WANTED;
add_object_array(o, NULL, &want_obj);
}
}
if (debug_fd)
write_str_in_full(debug_fd, "#E\n");
if (!use_sideband && daemon_mode)
no_progress = 1;
if (depth == 0 && shallows.nr == 0)
return;
if (depth > 0) {
struct commit_list *result, *backup;
int i;
backup = result = get_shallow_commits(&want_obj, depth,
SHALLOW, NOT_SHALLOW);
while (result) {
struct object *object = &result->item->object;
if (!(object->flags & (CLIENT_SHALLOW|NOT_SHALLOW))) {
packet_write(1, "shallow %s",
sha1_to_hex(object->sha1));
register_shallow(object->sha1);
shallow_nr++;
}
result = result->next;
}
free_commit_list(backup);
for (i = 0; i < shallows.nr; i++) {
struct object *object = shallows.objects[i].item;
if (object->flags & NOT_SHALLOW) {
struct commit_list *parents;
packet_write(1, "unshallow %s",
sha1_to_hex(object->sha1));
object->flags &= ~CLIENT_SHALLOW;
/* make sure the real parents are parsed */
unregister_shallow(object->sha1);
object->parsed = 0;
if (parse_commit((struct commit *)object))
die("invalid commit");
parents = ((struct commit *)object)->parents;
while (parents) {
add_object_array(&parents->item->object,
NULL, &want_obj);
parents = parents->next;
}
add_object_array(object, NULL, &extra_edge_obj);
}
/* make sure commit traversal conforms to client */
register_shallow(object->sha1);
}
packet_flush(1);
} else
if (shallows.nr > 0) {
int i;
for (i = 0; i < shallows.nr; i++)
register_shallow(shallows.objects[i].item->sha1);
}
shallow_nr += shallows.nr;
free(shallows.objects);
}
static int send_ref(const char *refname, const unsigned char *sha1, int flag, void *cb_data)
{
static const char *capabilities = "multi_ack thin-pack side-band"
" side-band-64k ofs-delta shallow no-progress"
Add multi_ack_detailed capability to fetch-pack/upload-pack When multi_ack_detailed is enabled the ACK continue messages returned by the remote upload-pack are broken out to describe the different states within the peer. This permits the client to better understand the server's in-memory state. The fetch-pack/upload-pack protocol now looks like: NAK --------------------------------- Always sent in response to "done" if there was no common base selected from the "have" lines (or no have lines were sent). * no multi_ack or multi_ack_detailed: Sent when the client has sent a pkt-line flush ("0000") and the server has not yet found a common base object. * either multi_ack or multi_ack_detailed: Always sent in response to a pkt-line flush. ACK %s ----------------------------------- * no multi_ack or multi_ack_detailed: Sent in response to "have" when the object exists on the remote side and is therefore an object in common between the peers. The argument is the SHA-1 of the common object. * either multi_ack or multi_ack_detailed: Sent in response to "done" if there are common objects. The argument is the last SHA-1 determined to be common. ACK %s continue ----------------------------------- * multi_ack only: Sent in response to "have". The remote side wants the client to consider this object as common, and immediately stop transmitting additional "have" lines for objects that are reachable from it. The reason the client should stop is not given, but is one of the two cases below available under multi_ack_detailed. ACK %s common ----------------------------------- * multi_ack_detailed only: Sent in response to "have". Both sides have this object. Like with "ACK %s continue" above the client should stop sending have lines reachable for objects from the argument. ACK %s ready ----------------------------------- * multi_ack_detailed only: Sent in response to "have". The client should stop transmitting objects which are reachable from the argument, and send "done" soon to get the objects. If the remote side has the specified object, it should first send an "ACK %s common" message prior to sending "ACK %s ready". Clients may still submit additional "have" lines if there are more side branches for the client to explore that might be added to the common set and reduce the number of objects to transfer. Signed-off-by: Shawn O. Pearce <spearce@spearce.org> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-10-31 08:47:25 +08:00
" include-tag multi_ack_detailed";
struct object *o = parse_object(sha1);
if (!o)
die("git upload-pack: cannot find object %s:", sha1_to_hex(sha1));
if (capabilities)
packet_write(1, "%s %s%c%s%s\n", sha1_to_hex(sha1), refname,
0, capabilities,
stateless_rpc ? " no-done" : "");
else
packet_write(1, "%s %s\n", sha1_to_hex(sha1), refname);
capabilities = NULL;
if (!(o->flags & OUR_REF)) {
o->flags |= OUR_REF;
nr_our_refs++;
}
if (o->type == OBJ_TAG) {
o = deref_tag(o, refname, 0);
if (o)
packet_write(1, "%s %s^{}\n", sha1_to_hex(o->sha1), refname);
}
return 0;
}
static int mark_our_ref(const char *refname, const unsigned char *sha1, int flag, void *cb_data)
{
struct object *o = parse_object(sha1);
if (!o)
die("git upload-pack: cannot find object %s:", sha1_to_hex(sha1));
if (!(o->flags & OUR_REF)) {
o->flags |= OUR_REF;
nr_our_refs++;
}
return 0;
}
static void upload_pack(void)
{
if (advertise_refs || !stateless_rpc) {
reset_timeout();
head_ref(send_ref, NULL);
for_each_ref(send_ref, NULL);
packet_flush(1);
} else {
head_ref(mark_our_ref, NULL);
for_each_ref(mark_our_ref, NULL);
}
if (advertise_refs)
return;
receive_needs();
if (want_obj.nr) {
get_common_commits();
create_pack_file();
}
}
int main(int argc, char **argv)
{
char *dir;
int i;
int strict = 0;
packet_trace_identity("upload-pack");
git_extract_argv0_path(argv[0]);
read_replace_refs = 0;
for (i = 1; i < argc; i++) {
char *arg = argv[i];
if (arg[0] != '-')
break;
if (!strcmp(arg, "--advertise-refs")) {
advertise_refs = 1;
continue;
}
if (!strcmp(arg, "--stateless-rpc")) {
stateless_rpc = 1;
continue;
}
if (!strcmp(arg, "--strict")) {
strict = 1;
continue;
}
if (!prefixcmp(arg, "--timeout=")) {
timeout = atoi(arg+10);
daemon_mode = 1;
continue;
}
if (!strcmp(arg, "--")) {
i++;
break;
}
}
if (i != argc-1)
usage(upload_pack_usage);
setup_path();
dir = argv[i];
if (!enter_repo(dir, strict))
die("'%s' does not appear to be a git repository", dir);
if (is_repository_shallow())
die("attempt to fetch/clone from a shallow repository");
if (getenv("GIT_DEBUG_SEND_PACK"))
debug_fd = atoi(getenv("GIT_DEBUG_SEND_PACK"));
upload_pack();
return 0;
}