2005-09-08 08:26:23 +08:00
|
|
|
#include "fetch.h"
|
2005-05-01 07:53:56 +08:00
|
|
|
|
|
|
|
#include "cache.h"
|
|
|
|
#include "commit.h"
|
|
|
|
#include "tree.h"
|
2006-05-30 03:20:48 +08:00
|
|
|
#include "tree-walk.h"
|
2005-06-22 08:35:53 +08:00
|
|
|
#include "tag.h"
|
|
|
|
#include "blob.h"
|
2005-06-07 04:38:26 +08:00
|
|
|
#include "refs.h"
|
2006-07-28 05:56:19 +08:00
|
|
|
#include "strbuf.h"
|
2005-06-07 04:38:26 +08:00
|
|
|
|
2005-05-01 07:53:56 +08:00
|
|
|
int get_tree = 0;
|
|
|
|
int get_history = 0;
|
|
|
|
int get_all = 0;
|
2005-05-06 16:37:21 +08:00
|
|
|
int get_verbosely = 0;
|
2005-09-27 09:38:08 +08:00
|
|
|
int get_recover = 0;
|
2005-05-04 16:26:24 +08:00
|
|
|
static unsigned char current_commit_sha1[20];
|
2005-05-01 07:53:56 +08:00
|
|
|
|
2005-08-03 07:46:10 +08:00
|
|
|
void pull_say(const char *fmt, const char *hex)
|
|
|
|
{
|
2005-05-06 16:37:21 +08:00
|
|
|
if (get_verbosely)
|
|
|
|
fprintf(stderr, fmt, hex);
|
|
|
|
}
|
|
|
|
|
2005-05-04 16:26:24 +08:00
|
|
|
static void report_missing(const char *what, const unsigned char *missing)
|
|
|
|
{
|
|
|
|
char missing_hex[41];
|
|
|
|
|
|
|
|
strcpy(missing_hex, sha1_to_hex(missing));;
|
|
|
|
fprintf(stderr,
|
|
|
|
"Cannot obtain needed %s %s\nwhile processing commit %s.\n",
|
|
|
|
what, missing_hex, sha1_to_hex(current_commit_sha1));
|
|
|
|
}
|
|
|
|
|
2005-09-22 00:33:54 +08:00
|
|
|
static int process(struct object *obj);
|
2005-06-22 08:35:53 +08:00
|
|
|
|
2005-08-03 07:46:10 +08:00
|
|
|
static int process_tree(struct tree *tree)
|
2005-05-01 07:53:56 +08:00
|
|
|
{
|
2006-05-30 03:20:48 +08:00
|
|
|
struct tree_desc desc;
|
tree_entry(): new tree-walking helper function
This adds a "tree_entry()" function that combines the common operation of
doing a "tree_entry_extract()" + "update_tree_entry()".
It also has a simplified calling convention, designed for simple loops
that traverse over a whole tree: the arguments are pointers to the tree
descriptor and a name_entry structure to fill in, and it returns a boolean
"true" if there was an entry left to be gotten in the tree.
This allows tree traversal with
struct tree_desc desc;
struct name_entry entry;
desc.buf = tree->buffer;
desc.size = tree->size;
while (tree_entry(&desc, &entry) {
... use "entry.{path, sha1, mode, pathlen}" ...
}
which is not only shorter than writing it out in full, it's hopefully less
error prone too.
[ It's actually a tad faster too - we don't need to recalculate the entry
pathlength in both extract and update, but need to do it only once.
Also, some callers can avoid doing a "strlen()" on the result, since
it's returned as part of the name_entry structure.
However, by now we're talking just 1% speedup on "git-rev-list --objects
--all", and we're definitely at the point where tree walking is no
longer the issue any more. ]
NOTE! Not everybody wants to use this new helper function, since some of
the tree walkers very much on purpose do the descriptor update separately
from the entry extraction. So the "extract + update" sequence still
remains as the core sequence, this is just a simplified interface.
We should probably add a silly two-line inline helper function for
initializing the descriptor from the "struct tree" too, just to cut down
on the noise from that common "desc" initializer.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-05-31 00:45:45 +08:00
|
|
|
struct name_entry entry;
|
2005-05-01 07:53:56 +08:00
|
|
|
|
|
|
|
if (parse_tree(tree))
|
|
|
|
return -1;
|
|
|
|
|
2006-05-30 03:20:48 +08:00
|
|
|
desc.buf = tree->buffer;
|
|
|
|
desc.size = tree->size;
|
tree_entry(): new tree-walking helper function
This adds a "tree_entry()" function that combines the common operation of
doing a "tree_entry_extract()" + "update_tree_entry()".
It also has a simplified calling convention, designed for simple loops
that traverse over a whole tree: the arguments are pointers to the tree
descriptor and a name_entry structure to fill in, and it returns a boolean
"true" if there was an entry left to be gotten in the tree.
This allows tree traversal with
struct tree_desc desc;
struct name_entry entry;
desc.buf = tree->buffer;
desc.size = tree->size;
while (tree_entry(&desc, &entry) {
... use "entry.{path, sha1, mode, pathlen}" ...
}
which is not only shorter than writing it out in full, it's hopefully less
error prone too.
[ It's actually a tad faster too - we don't need to recalculate the entry
pathlength in both extract and update, but need to do it only once.
Also, some callers can avoid doing a "strlen()" on the result, since
it's returned as part of the name_entry structure.
However, by now we're talking just 1% speedup on "git-rev-list --objects
--all", and we're definitely at the point where tree walking is no
longer the issue any more. ]
NOTE! Not everybody wants to use this new helper function, since some of
the tree walkers very much on purpose do the descriptor update separately
from the entry extraction. So the "extract + update" sequence still
remains as the core sequence, this is just a simplified interface.
We should probably add a silly two-line inline helper function for
initializing the descriptor from the "struct tree" too, just to cut down
on the noise from that common "desc" initializer.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-05-31 00:45:45 +08:00
|
|
|
while (tree_entry(&desc, &entry)) {
|
2006-06-03 06:23:47 +08:00
|
|
|
struct object *obj = NULL;
|
|
|
|
|
tree_entry(): new tree-walking helper function
This adds a "tree_entry()" function that combines the common operation of
doing a "tree_entry_extract()" + "update_tree_entry()".
It also has a simplified calling convention, designed for simple loops
that traverse over a whole tree: the arguments are pointers to the tree
descriptor and a name_entry structure to fill in, and it returns a boolean
"true" if there was an entry left to be gotten in the tree.
This allows tree traversal with
struct tree_desc desc;
struct name_entry entry;
desc.buf = tree->buffer;
desc.size = tree->size;
while (tree_entry(&desc, &entry) {
... use "entry.{path, sha1, mode, pathlen}" ...
}
which is not only shorter than writing it out in full, it's hopefully less
error prone too.
[ It's actually a tad faster too - we don't need to recalculate the entry
pathlength in both extract and update, but need to do it only once.
Also, some callers can avoid doing a "strlen()" on the result, since
it's returned as part of the name_entry structure.
However, by now we're talking just 1% speedup on "git-rev-list --objects
--all", and we're definitely at the point where tree walking is no
longer the issue any more. ]
NOTE! Not everybody wants to use this new helper function, since some of
the tree walkers very much on purpose do the descriptor update separately
from the entry extraction. So the "extract + update" sequence still
remains as the core sequence, this is just a simplified interface.
We should probably add a silly two-line inline helper function for
initializing the descriptor from the "struct tree" too, just to cut down
on the noise from that common "desc" initializer.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-05-31 00:45:45 +08:00
|
|
|
if (S_ISDIR(entry.mode)) {
|
|
|
|
struct tree *tree = lookup_tree(entry.sha1);
|
2006-06-03 06:23:47 +08:00
|
|
|
if (tree)
|
|
|
|
obj = &tree->object;
|
|
|
|
}
|
|
|
|
else {
|
tree_entry(): new tree-walking helper function
This adds a "tree_entry()" function that combines the common operation of
doing a "tree_entry_extract()" + "update_tree_entry()".
It also has a simplified calling convention, designed for simple loops
that traverse over a whole tree: the arguments are pointers to the tree
descriptor and a name_entry structure to fill in, and it returns a boolean
"true" if there was an entry left to be gotten in the tree.
This allows tree traversal with
struct tree_desc desc;
struct name_entry entry;
desc.buf = tree->buffer;
desc.size = tree->size;
while (tree_entry(&desc, &entry) {
... use "entry.{path, sha1, mode, pathlen}" ...
}
which is not only shorter than writing it out in full, it's hopefully less
error prone too.
[ It's actually a tad faster too - we don't need to recalculate the entry
pathlength in both extract and update, but need to do it only once.
Also, some callers can avoid doing a "strlen()" on the result, since
it's returned as part of the name_entry structure.
However, by now we're talking just 1% speedup on "git-rev-list --objects
--all", and we're definitely at the point where tree walking is no
longer the issue any more. ]
NOTE! Not everybody wants to use this new helper function, since some of
the tree walkers very much on purpose do the descriptor update separately
from the entry extraction. So the "extract + update" sequence still
remains as the core sequence, this is just a simplified interface.
We should probably add a silly two-line inline helper function for
initializing the descriptor from the "struct tree" too, just to cut down
on the noise from that common "desc" initializer.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-05-31 00:45:45 +08:00
|
|
|
struct blob *blob = lookup_blob(entry.sha1);
|
2006-06-03 06:23:47 +08:00
|
|
|
if (blob)
|
|
|
|
obj = &blob->object;
|
2006-05-30 03:18:33 +08:00
|
|
|
}
|
2006-06-03 06:23:47 +08:00
|
|
|
if (!obj || process(obj))
|
2005-05-01 07:53:56 +08:00
|
|
|
return -1;
|
|
|
|
}
|
2006-05-30 03:18:33 +08:00
|
|
|
free(tree->buffer);
|
|
|
|
tree->buffer = NULL;
|
2006-05-30 03:20:48 +08:00
|
|
|
tree->size = 0;
|
2005-05-01 07:53:56 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-09-22 00:34:24 +08:00
|
|
|
#define COMPLETE (1U << 0)
|
|
|
|
#define SEEN (1U << 1)
|
|
|
|
#define TO_SCAN (1U << 2)
|
2005-09-18 16:01:07 +08:00
|
|
|
|
2005-09-17 05:30:29 +08:00
|
|
|
static struct commit_list *complete = NULL;
|
2005-09-15 09:31:42 +08:00
|
|
|
|
2005-08-03 07:46:10 +08:00
|
|
|
static int process_commit(struct commit *commit)
|
2005-05-01 07:53:56 +08:00
|
|
|
{
|
2005-08-03 07:46:10 +08:00
|
|
|
if (parse_commit(commit))
|
2005-05-01 07:53:56 +08:00
|
|
|
return -1;
|
|
|
|
|
2005-09-15 09:31:42 +08:00
|
|
|
while (complete && complete->item->date >= commit->date) {
|
2005-09-17 05:30:29 +08:00
|
|
|
pop_most_recent_commit(&complete, COMPLETE);
|
2005-09-15 09:31:42 +08:00
|
|
|
}
|
|
|
|
|
2005-09-17 05:30:29 +08:00
|
|
|
if (commit->object.flags & COMPLETE)
|
2005-09-15 09:31:42 +08:00
|
|
|
return 0;
|
|
|
|
|
2006-08-23 14:49:00 +08:00
|
|
|
hashcpy(current_commit_sha1, commit->object.sha1);
|
2005-05-01 07:53:56 +08:00
|
|
|
|
2005-09-18 16:01:07 +08:00
|
|
|
pull_say("walk %s\n", sha1_to_hex(commit->object.sha1));
|
|
|
|
|
2005-05-01 07:53:56 +08:00
|
|
|
if (get_tree) {
|
2005-09-22 00:33:54 +08:00
|
|
|
if (process(&commit->tree->object))
|
2005-05-01 07:53:56 +08:00
|
|
|
return -1;
|
|
|
|
if (!get_all)
|
|
|
|
get_tree = 0;
|
|
|
|
}
|
|
|
|
if (get_history) {
|
2005-08-03 07:46:10 +08:00
|
|
|
struct commit_list *parents = commit->parents;
|
2005-05-01 07:53:56 +08:00
|
|
|
for (; parents; parents = parents->next) {
|
2005-09-22 00:33:54 +08:00
|
|
|
if (process(&parents->item->object))
|
2005-05-01 07:53:56 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-08-03 07:46:10 +08:00
|
|
|
static int process_tag(struct tag *tag)
|
2005-06-22 08:35:53 +08:00
|
|
|
{
|
2005-08-03 07:46:10 +08:00
|
|
|
if (parse_tag(tag))
|
2005-06-22 08:35:53 +08:00
|
|
|
return -1;
|
2005-09-22 00:33:54 +08:00
|
|
|
return process(tag->tagged);
|
2005-06-22 08:35:53 +08:00
|
|
|
}
|
|
|
|
|
2005-08-03 07:46:10 +08:00
|
|
|
static struct object_list *process_queue = NULL;
|
|
|
|
static struct object_list **process_queue_end = &process_queue;
|
|
|
|
|
2005-08-12 07:38:09 +08:00
|
|
|
static int process_object(struct object *obj)
|
2005-06-22 08:35:53 +08:00
|
|
|
{
|
2006-07-12 11:45:31 +08:00
|
|
|
if (obj->type == OBJ_COMMIT) {
|
2005-08-12 07:38:09 +08:00
|
|
|
if (process_commit((struct commit *)obj))
|
|
|
|
return -1;
|
|
|
|
return 0;
|
|
|
|
}
|
2006-07-12 11:45:31 +08:00
|
|
|
if (obj->type == OBJ_TREE) {
|
2005-08-12 07:38:09 +08:00
|
|
|
if (process_tree((struct tree *)obj))
|
|
|
|
return -1;
|
|
|
|
return 0;
|
|
|
|
}
|
2006-07-12 11:45:31 +08:00
|
|
|
if (obj->type == OBJ_BLOB) {
|
2005-08-12 07:38:09 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2006-07-12 11:45:31 +08:00
|
|
|
if (obj->type == OBJ_TAG) {
|
2005-08-12 07:38:09 +08:00
|
|
|
if (process_tag((struct tag *)obj))
|
|
|
|
return -1;
|
2005-06-22 08:35:53 +08:00
|
|
|
return 0;
|
2005-08-12 07:38:09 +08:00
|
|
|
}
|
|
|
|
return error("Unable to determine requirements "
|
|
|
|
"of type %s for %s",
|
Shrink "struct object" a bit
This shrinks "struct object" by a small amount, by getting rid of the
"struct type *" pointer and replacing it with a 3-bit bitfield instead.
In addition, we merge the bitfields and the "flags" field, which
incidentally should also remove a useless 4-byte padding from the object
when in 64-bit mode.
Now, our "struct object" is still too damn large, but it's now less
obviously bloated, and of the remaining fields, only the "util" (which is
not used by most things) is clearly something that should be eventually
discarded.
This shrinks the "git-rev-list --all" memory use by about 2.5% on the
kernel archive (and, perhaps more importantly, on the larger mozilla
archive). That may not sound like much, but I suspect it's more on a
64-bit platform.
There are other remaining inefficiencies (the parent lists, for example,
probably have horrible malloc overhead), but this was pretty obvious.
Most of the patch is just changing the comparison of the "type" pointer
from one of the constant string pointers to the appropriate new TYPE_xxx
small integer constant.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-06-15 07:45:13 +08:00
|
|
|
typename(obj->type), sha1_to_hex(obj->sha1));
|
2005-08-12 07:38:09 +08:00
|
|
|
}
|
|
|
|
|
2005-09-22 00:33:54 +08:00
|
|
|
static int process(struct object *obj)
|
2005-08-12 07:38:09 +08:00
|
|
|
{
|
2005-09-22 00:33:59 +08:00
|
|
|
if (obj->flags & SEEN)
|
|
|
|
return 0;
|
|
|
|
obj->flags |= SEEN;
|
|
|
|
|
2005-09-22 00:33:54 +08:00
|
|
|
if (has_sha1_file(obj->sha1)) {
|
2005-08-12 07:38:09 +08:00
|
|
|
/* We already have it, so we should scan it now. */
|
2005-09-18 16:01:07 +08:00
|
|
|
obj->flags |= TO_SCAN;
|
2006-06-07 05:04:17 +08:00
|
|
|
}
|
|
|
|
else {
|
2005-09-22 00:34:14 +08:00
|
|
|
if (obj->flags & COMPLETE)
|
|
|
|
return 0;
|
|
|
|
prefetch(obj->sha1);
|
2005-08-12 07:38:09 +08:00
|
|
|
}
|
2005-09-22 00:34:14 +08:00
|
|
|
|
2005-08-03 07:46:10 +08:00
|
|
|
object_list_insert(obj, process_queue_end);
|
|
|
|
process_queue_end = &(*process_queue_end)->next;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int loop(void)
|
|
|
|
{
|
2005-09-18 16:01:07 +08:00
|
|
|
struct object_list *elem;
|
|
|
|
|
2005-08-03 07:46:10 +08:00
|
|
|
while (process_queue) {
|
|
|
|
struct object *obj = process_queue->item;
|
2005-09-18 16:01:07 +08:00
|
|
|
elem = process_queue;
|
|
|
|
process_queue = elem->next;
|
|
|
|
free(elem);
|
2005-08-03 07:46:10 +08:00
|
|
|
if (!process_queue)
|
|
|
|
process_queue_end = &process_queue;
|
|
|
|
|
2005-09-18 16:01:07 +08:00
|
|
|
/* If we are not scanning this object, we placed it in
|
|
|
|
* the queue because we needed to fetch it first.
|
|
|
|
*/
|
|
|
|
if (! (obj->flags & TO_SCAN)) {
|
2005-10-11 14:22:01 +08:00
|
|
|
if (fetch(obj->sha1)) {
|
Shrink "struct object" a bit
This shrinks "struct object" by a small amount, by getting rid of the
"struct type *" pointer and replacing it with a 3-bit bitfield instead.
In addition, we merge the bitfields and the "flags" field, which
incidentally should also remove a useless 4-byte padding from the object
when in 64-bit mode.
Now, our "struct object" is still too damn large, but it's now less
obviously bloated, and of the remaining fields, only the "util" (which is
not used by most things) is clearly something that should be eventually
discarded.
This shrinks the "git-rev-list --all" memory use by about 2.5% on the
kernel archive (and, perhaps more importantly, on the larger mozilla
archive). That may not sound like much, but I suspect it's more on a
64-bit platform.
There are other remaining inefficiencies (the parent lists, for example,
probably have horrible malloc overhead), but this was pretty obvious.
Most of the patch is just changing the comparison of the "type" pointer
from one of the constant string pointers to the appropriate new TYPE_xxx
small integer constant.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-06-15 07:45:13 +08:00
|
|
|
report_missing(typename(obj->type), obj->sha1);
|
2005-09-18 16:01:07 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
2005-08-03 07:46:10 +08:00
|
|
|
if (!obj->type)
|
|
|
|
parse_object(obj->sha1);
|
2005-08-12 07:38:09 +08:00
|
|
|
if (process_object(obj))
|
|
|
|
return -1;
|
2005-08-03 07:46:10 +08:00
|
|
|
}
|
|
|
|
return 0;
|
2005-06-22 08:35:53 +08:00
|
|
|
}
|
|
|
|
|
2005-06-07 04:38:26 +08:00
|
|
|
static int interpret_target(char *target, unsigned char *sha1)
|
|
|
|
{
|
|
|
|
if (!get_sha1_hex(target, sha1))
|
|
|
|
return 0;
|
|
|
|
if (!check_ref_format(target)) {
|
|
|
|
if (!fetch_ref(target, sha1)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2005-09-15 09:31:42 +08:00
|
|
|
static int mark_complete(const char *path, const unsigned char *sha1)
|
|
|
|
{
|
2005-09-17 05:30:29 +08:00
|
|
|
struct commit *commit = lookup_commit_reference_gently(sha1, 1);
|
|
|
|
if (commit) {
|
|
|
|
commit->object.flags |= COMPLETE;
|
|
|
|
insert_by_date(commit, &complete);
|
2005-09-15 09:31:42 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2005-06-07 04:38:26 +08:00
|
|
|
|
2006-07-28 05:56:19 +08:00
|
|
|
int pull_targets_stdin(char ***target, const char ***write_ref)
|
|
|
|
{
|
|
|
|
int targets = 0, targets_alloc = 0;
|
|
|
|
struct strbuf buf;
|
|
|
|
*target = NULL; *write_ref = NULL;
|
|
|
|
strbuf_init(&buf);
|
|
|
|
while (1) {
|
|
|
|
char *rf_one = NULL;
|
|
|
|
char *tg_one;
|
|
|
|
|
|
|
|
read_line(&buf, stdin, '\n');
|
|
|
|
if (buf.eof)
|
|
|
|
break;
|
|
|
|
tg_one = buf.buf;
|
|
|
|
rf_one = strchr(tg_one, '\t');
|
|
|
|
if (rf_one)
|
|
|
|
*rf_one++ = 0;
|
|
|
|
|
|
|
|
if (targets >= targets_alloc) {
|
|
|
|
targets_alloc = targets_alloc ? targets_alloc * 2 : 64;
|
|
|
|
*target = xrealloc(*target, targets_alloc * sizeof(**target));
|
|
|
|
*write_ref = xrealloc(*write_ref, targets_alloc * sizeof(**write_ref));
|
|
|
|
}
|
2006-09-02 12:16:31 +08:00
|
|
|
(*target)[targets] = xstrdup(tg_one);
|
|
|
|
(*write_ref)[targets] = rf_one ? xstrdup(rf_one) : NULL;
|
2006-07-28 05:56:19 +08:00
|
|
|
targets++;
|
|
|
|
}
|
|
|
|
return targets;
|
|
|
|
}
|
|
|
|
|
|
|
|
void pull_targets_free(int targets, char **target, const char **write_ref)
|
|
|
|
{
|
|
|
|
while (targets--) {
|
|
|
|
free(target[targets]);
|
2006-07-29 08:10:07 +08:00
|
|
|
if (write_ref && write_ref[targets])
|
2006-07-28 05:56:19 +08:00
|
|
|
free((char *) write_ref[targets]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-07-28 05:56:17 +08:00
|
|
|
int pull(int targets, char **target, const char **write_ref,
|
2006-07-28 05:56:14 +08:00
|
|
|
const char *write_ref_log_details)
|
2005-05-01 07:53:56 +08:00
|
|
|
{
|
2006-07-28 05:56:17 +08:00
|
|
|
struct ref_lock **lock = xcalloc(targets, sizeof(struct ref_lock *));
|
|
|
|
unsigned char *sha1 = xmalloc(targets * 20);
|
2006-05-19 15:29:26 +08:00
|
|
|
char *msg;
|
|
|
|
int ret;
|
2006-07-28 05:56:17 +08:00
|
|
|
int i;
|
2005-06-07 04:38:26 +08:00
|
|
|
|
2005-09-16 06:06:39 +08:00
|
|
|
save_commit_buffer = 0;
|
2005-09-23 20:28:13 +08:00
|
|
|
track_object_refs = 0;
|
2006-07-28 05:56:17 +08:00
|
|
|
|
|
|
|
for (i = 0; i < targets; i++) {
|
2006-07-29 08:10:07 +08:00
|
|
|
if (!write_ref || !write_ref[i])
|
2006-07-28 05:56:17 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
lock[i] = lock_ref_sha1(write_ref[i], NULL, 0);
|
|
|
|
if (!lock[i]) {
|
|
|
|
error("Can't lock ref %s", write_ref[i]);
|
|
|
|
goto unlock_and_fail;
|
2006-05-19 15:29:26 +08:00
|
|
|
}
|
2005-06-07 04:38:26 +08:00
|
|
|
}
|
|
|
|
|
2006-05-25 07:42:38 +08:00
|
|
|
if (!get_recover)
|
2005-09-27 09:38:08 +08:00
|
|
|
for_each_ref(mark_complete);
|
2005-09-15 09:31:42 +08:00
|
|
|
|
2006-07-28 05:56:17 +08:00
|
|
|
for (i = 0; i < targets; i++) {
|
|
|
|
if (interpret_target(target[i], &sha1[20 * i])) {
|
|
|
|
error("Could not interpret %s as something to pull", target[i]);
|
|
|
|
goto unlock_and_fail;
|
|
|
|
}
|
|
|
|
if (process(lookup_unknown_object(&sha1[20 * i])))
|
|
|
|
goto unlock_and_fail;
|
2006-05-17 17:55:02 +08:00
|
|
|
}
|
2006-07-28 05:56:17 +08:00
|
|
|
|
|
|
|
if (loop())
|
|
|
|
goto unlock_and_fail;
|
|
|
|
|
|
|
|
if (write_ref_log_details) {
|
|
|
|
msg = xmalloc(strlen(write_ref_log_details) + 12);
|
|
|
|
sprintf(msg, "fetch from %s", write_ref_log_details);
|
|
|
|
} else {
|
|
|
|
msg = NULL;
|
2006-05-17 17:55:02 +08:00
|
|
|
}
|
2006-07-28 05:56:17 +08:00
|
|
|
for (i = 0; i < targets; i++) {
|
2006-07-29 08:10:07 +08:00
|
|
|
if (!write_ref || !write_ref[i])
|
2006-07-28 05:56:17 +08:00
|
|
|
continue;
|
|
|
|
ret = write_ref_sha1(lock[i], &sha1[20 * i], msg ? msg : "fetch (unknown)");
|
|
|
|
lock[i] = NULL;
|
|
|
|
if (ret)
|
|
|
|
goto unlock_and_fail;
|
2006-05-17 17:55:02 +08:00
|
|
|
}
|
2006-08-28 12:19:39 +08:00
|
|
|
free(msg);
|
2006-05-17 17:55:02 +08:00
|
|
|
|
2005-06-07 04:38:26 +08:00
|
|
|
return 0;
|
2006-07-28 05:56:17 +08:00
|
|
|
|
|
|
|
|
|
|
|
unlock_and_fail:
|
|
|
|
for (i = 0; i < targets; i++)
|
|
|
|
if (lock[i])
|
|
|
|
unlock_ref(lock[i]);
|
|
|
|
return -1;
|
2005-05-01 07:53:56 +08:00
|
|
|
}
|