2017-06-13 06:13:57 +08:00
|
|
|
#define NO_THE_INDEX_COMPATIBILITY_MACROS
|
2006-01-07 17:33:54 +08:00
|
|
|
#include "cache.h"
|
2007-08-10 04:42:50 +08:00
|
|
|
#include "cache-tree.h"
|
2005-04-19 02:39:48 +08:00
|
|
|
#include "tree.h"
|
|
|
|
#include "blob.h"
|
2005-09-05 14:03:51 +08:00
|
|
|
#include "commit.h"
|
|
|
|
#include "tag.h"
|
2006-05-30 03:16:12 +08:00
|
|
|
#include "tree-walk.h"
|
2005-04-19 02:39:48 +08:00
|
|
|
|
|
|
|
const char *tree_type = "tree";
|
|
|
|
|
2017-06-13 06:13:57 +08:00
|
|
|
static int read_one_entry_opt(struct index_state *istate,
|
|
|
|
const unsigned char *sha1,
|
|
|
|
const char *base, int baselen,
|
|
|
|
const char *pathname,
|
|
|
|
unsigned mode, int stage, int opt)
|
2005-04-23 07:42:37 +08:00
|
|
|
{
|
2005-11-27 01:38:20 +08:00
|
|
|
int len;
|
|
|
|
unsigned int size;
|
|
|
|
struct cache_entry *ce;
|
|
|
|
|
|
|
|
if (S_ISDIR(mode))
|
|
|
|
return READ_TREE_RECURSIVE;
|
|
|
|
|
|
|
|
len = strlen(pathname);
|
|
|
|
size = cache_entry_size(baselen + len);
|
2006-04-04 02:30:46 +08:00
|
|
|
ce = xcalloc(1, size);
|
2005-04-23 07:42:37 +08:00
|
|
|
|
|
|
|
ce->ce_mode = create_ce_mode(mode);
|
2012-07-11 17:22:37 +08:00
|
|
|
ce->ce_flags = create_ce_flags(stage);
|
|
|
|
ce->ce_namelen = baselen + len;
|
2005-04-23 07:42:37 +08:00
|
|
|
memcpy(ce->name, base, baselen);
|
|
|
|
memcpy(ce->name + baselen, pathname, len+1);
|
2016-09-06 04:07:52 +08:00
|
|
|
hashcpy(ce->oid.hash, sha1);
|
2017-06-13 06:13:57 +08:00
|
|
|
return add_index_entry(istate, ce, opt);
|
2007-08-10 04:42:50 +08:00
|
|
|
}
|
|
|
|
|
2014-11-30 17:05:00 +08:00
|
|
|
static int read_one_entry(const unsigned char *sha1, struct strbuf *base,
|
|
|
|
const char *pathname, unsigned mode, int stage,
|
|
|
|
void *context)
|
2007-08-10 04:42:50 +08:00
|
|
|
{
|
2017-06-13 06:13:57 +08:00
|
|
|
struct index_state *istate = context;
|
|
|
|
return read_one_entry_opt(istate, sha1, base->buf, base->len, pathname,
|
2014-11-30 17:05:00 +08:00
|
|
|
mode, stage,
|
2007-08-10 04:42:50 +08:00
|
|
|
ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is used when the caller knows there is no existing entries at
|
|
|
|
* the stage that will conflict with the entry being added.
|
|
|
|
*/
|
2014-11-30 17:05:00 +08:00
|
|
|
static int read_one_entry_quick(const unsigned char *sha1, struct strbuf *base,
|
|
|
|
const char *pathname, unsigned mode, int stage,
|
|
|
|
void *context)
|
2007-08-10 04:42:50 +08:00
|
|
|
{
|
2017-06-13 06:13:57 +08:00
|
|
|
struct index_state *istate = context;
|
|
|
|
return read_one_entry_opt(istate, sha1, base->buf, base->len, pathname,
|
2014-11-30 17:05:00 +08:00
|
|
|
mode, stage,
|
2007-08-10 04:42:50 +08:00
|
|
|
ADD_CACHE_JUST_APPEND);
|
2005-04-23 07:42:37 +08:00
|
|
|
}
|
|
|
|
|
2011-03-25 17:34:18 +08:00
|
|
|
static int read_tree_1(struct tree *tree, struct strbuf *base,
|
2013-07-14 16:35:52 +08:00
|
|
|
int stage, const struct pathspec *pathspec,
|
2011-03-25 17:34:18 +08:00
|
|
|
read_tree_fn_t fn, void *context)
|
2005-04-23 07:42:37 +08:00
|
|
|
{
|
2006-05-30 03:17:28 +08:00
|
|
|
struct tree_desc desc;
|
tree_entry(): new tree-walking helper function
This adds a "tree_entry()" function that combines the common operation of
doing a "tree_entry_extract()" + "update_tree_entry()".
It also has a simplified calling convention, designed for simple loops
that traverse over a whole tree: the arguments are pointers to the tree
descriptor and a name_entry structure to fill in, and it returns a boolean
"true" if there was an entry left to be gotten in the tree.
This allows tree traversal with
struct tree_desc desc;
struct name_entry entry;
desc.buf = tree->buffer;
desc.size = tree->size;
while (tree_entry(&desc, &entry) {
... use "entry.{path, sha1, mode, pathlen}" ...
}
which is not only shorter than writing it out in full, it's hopefully less
error prone too.
[ It's actually a tad faster too - we don't need to recalculate the entry
pathlength in both extract and update, but need to do it only once.
Also, some callers can avoid doing a "strlen()" on the result, since
it's returned as part of the name_entry structure.
However, by now we're talking just 1% speedup on "git-rev-list --objects
--all", and we're definitely at the point where tree walking is no
longer the issue any more. ]
NOTE! Not everybody wants to use this new helper function, since some of
the tree walkers very much on purpose do the descriptor update separately
from the entry extraction. So the "extract + update" sequence still
remains as the core sequence, this is just a simplified interface.
We should probably add a silly two-line inline helper function for
initializing the descriptor from the "struct tree" too, just to cut down
on the noise from that common "desc" initializer.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-05-31 00:45:45 +08:00
|
|
|
struct name_entry entry;
|
2017-05-07 06:10:15 +08:00
|
|
|
struct object_id oid;
|
2011-10-24 14:36:10 +08:00
|
|
|
int len, oldlen = base->len;
|
|
|
|
enum interesting retval = entry_not_interesting;
|
2006-05-30 03:17:28 +08:00
|
|
|
|
2006-01-26 14:13:36 +08:00
|
|
|
if (parse_tree(tree))
|
|
|
|
return -1;
|
2006-05-30 03:17:28 +08:00
|
|
|
|
2007-03-22 01:08:25 +08:00
|
|
|
init_tree_desc(&desc, tree->buffer, tree->size);
|
2006-05-30 03:17:28 +08:00
|
|
|
|
tree_entry(): new tree-walking helper function
This adds a "tree_entry()" function that combines the common operation of
doing a "tree_entry_extract()" + "update_tree_entry()".
It also has a simplified calling convention, designed for simple loops
that traverse over a whole tree: the arguments are pointers to the tree
descriptor and a name_entry structure to fill in, and it returns a boolean
"true" if there was an entry left to be gotten in the tree.
This allows tree traversal with
struct tree_desc desc;
struct name_entry entry;
desc.buf = tree->buffer;
desc.size = tree->size;
while (tree_entry(&desc, &entry) {
... use "entry.{path, sha1, mode, pathlen}" ...
}
which is not only shorter than writing it out in full, it's hopefully less
error prone too.
[ It's actually a tad faster too - we don't need to recalculate the entry
pathlength in both extract and update, but need to do it only once.
Also, some callers can avoid doing a "strlen()" on the result, since
it's returned as part of the name_entry structure.
However, by now we're talking just 1% speedup on "git-rev-list --objects
--all", and we're definitely at the point where tree walking is no
longer the issue any more. ]
NOTE! Not everybody wants to use this new helper function, since some of
the tree walkers very much on purpose do the descriptor update separately
from the entry extraction. So the "extract + update" sequence still
remains as the core sequence, this is just a simplified interface.
We should probably add a silly two-line inline helper function for
initializing the descriptor from the "struct tree" too, just to cut down
on the noise from that common "desc" initializer.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-05-31 00:45:45 +08:00
|
|
|
while (tree_entry(&desc, &entry)) {
|
2011-10-24 14:36:10 +08:00
|
|
|
if (retval != all_entries_interesting) {
|
2011-03-25 17:34:18 +08:00
|
|
|
retval = tree_entry_interesting(&entry, base, 0, pathspec);
|
2011-10-24 14:36:10 +08:00
|
|
|
if (retval == all_entries_not_interesting)
|
2011-03-25 17:34:18 +08:00
|
|
|
break;
|
2011-10-24 14:36:10 +08:00
|
|
|
if (retval == entry_not_interesting)
|
2011-03-25 17:34:18 +08:00
|
|
|
continue;
|
|
|
|
}
|
2005-07-15 02:26:31 +08:00
|
|
|
|
2016-04-18 07:10:39 +08:00
|
|
|
switch (fn(entry.oid->hash, base,
|
2011-03-25 17:34:18 +08:00
|
|
|
entry.path, entry.mode, stage, context)) {
|
2005-11-27 01:38:20 +08:00
|
|
|
case 0:
|
|
|
|
continue;
|
|
|
|
case READ_TREE_RECURSIVE:
|
2009-02-11 09:42:04 +08:00
|
|
|
break;
|
2005-11-27 01:38:20 +08:00
|
|
|
default:
|
|
|
|
return -1;
|
|
|
|
}
|
2009-01-25 08:52:05 +08:00
|
|
|
|
2011-03-25 17:34:18 +08:00
|
|
|
if (S_ISDIR(entry.mode))
|
2017-05-07 06:10:15 +08:00
|
|
|
oidcpy(&oid, entry.oid);
|
2011-03-25 17:34:18 +08:00
|
|
|
else if (S_ISGITLINK(entry.mode)) {
|
|
|
|
struct commit *commit;
|
2009-01-25 08:52:05 +08:00
|
|
|
|
Convert lookup_commit* to struct object_id
Convert lookup_commit, lookup_commit_or_die,
lookup_commit_reference, and lookup_commit_reference_gently to take
struct object_id arguments.
Introduce a temporary in parse_object buffer in order to convert this
function. This is required since in order to convert parse_object and
parse_object_buffer, lookup_commit_reference_gently and
lookup_commit_or_die would need to be converted. Not introducing a
temporary would therefore require that lookup_commit_or_die take a
struct object_id *, but lookup_commit would take unsigned char *,
leaving a confusing and hard-to-use interface.
parse_object_buffer will lose this temporary in a later patch.
This commit was created with manual changes to commit.c, commit.h, and
object.c, plus the following semantic patch:
@@
expression E1, E2;
@@
- lookup_commit_reference_gently(E1.hash, E2)
+ lookup_commit_reference_gently(&E1, E2)
@@
expression E1, E2;
@@
- lookup_commit_reference_gently(E1->hash, E2)
+ lookup_commit_reference_gently(E1, E2)
@@
expression E1;
@@
- lookup_commit_reference(E1.hash)
+ lookup_commit_reference(&E1)
@@
expression E1;
@@
- lookup_commit_reference(E1->hash)
+ lookup_commit_reference(E1)
@@
expression E1;
@@
- lookup_commit(E1.hash)
+ lookup_commit(&E1)
@@
expression E1;
@@
- lookup_commit(E1->hash)
+ lookup_commit(E1)
@@
expression E1, E2;
@@
- lookup_commit_or_die(E1.hash, E2)
+ lookup_commit_or_die(&E1, E2)
@@
expression E1, E2;
@@
- lookup_commit_or_die(E1->hash, E2)
+ lookup_commit_or_die(E1, E2)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-05-07 06:10:10 +08:00
|
|
|
commit = lookup_commit(entry.oid);
|
2009-01-25 08:52:05 +08:00
|
|
|
if (!commit)
|
2011-03-25 17:34:18 +08:00
|
|
|
die("Commit %s in submodule path %s%s not found",
|
2016-04-18 07:10:39 +08:00
|
|
|
oid_to_hex(entry.oid),
|
2011-03-25 17:34:18 +08:00
|
|
|
base->buf, entry.path);
|
2009-01-25 08:52:05 +08:00
|
|
|
|
|
|
|
if (parse_commit(commit))
|
2011-03-25 17:34:18 +08:00
|
|
|
die("Invalid commit %s in submodule path %s%s",
|
2016-04-18 07:10:39 +08:00
|
|
|
oid_to_hex(entry.oid),
|
2011-03-25 17:34:18 +08:00
|
|
|
base->buf, entry.path);
|
|
|
|
|
2017-05-07 06:10:15 +08:00
|
|
|
oidcpy(&oid, &commit->tree->object.oid);
|
2005-04-23 07:42:37 +08:00
|
|
|
}
|
2011-03-25 17:34:18 +08:00
|
|
|
else
|
|
|
|
continue;
|
|
|
|
|
2011-10-24 14:36:09 +08:00
|
|
|
len = tree_entry_len(&entry);
|
2011-03-25 17:34:18 +08:00
|
|
|
strbuf_add(base, entry.path, len);
|
|
|
|
strbuf_addch(base, '/');
|
2017-05-07 06:10:17 +08:00
|
|
|
retval = read_tree_1(lookup_tree(&oid),
|
2011-03-25 17:34:18 +08:00
|
|
|
base, stage, pathspec,
|
|
|
|
fn, context);
|
|
|
|
strbuf_setlen(base, oldlen);
|
|
|
|
if (retval)
|
|
|
|
return -1;
|
2005-04-23 07:42:37 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-03-25 17:34:18 +08:00
|
|
|
int read_tree_recursive(struct tree *tree,
|
|
|
|
const char *base, int baselen,
|
2013-07-14 16:35:52 +08:00
|
|
|
int stage, const struct pathspec *pathspec,
|
2011-03-25 17:34:18 +08:00
|
|
|
read_tree_fn_t fn, void *context)
|
|
|
|
{
|
|
|
|
struct strbuf sb = STRBUF_INIT;
|
2011-03-25 17:34:19 +08:00
|
|
|
int ret;
|
2011-03-25 17:34:18 +08:00
|
|
|
|
|
|
|
strbuf_add(&sb, base, baselen);
|
2011-03-25 17:34:19 +08:00
|
|
|
ret = read_tree_1(tree, &sb, stage, pathspec, fn, context);
|
2011-03-25 17:34:18 +08:00
|
|
|
strbuf_release(&sb);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-08-10 04:42:50 +08:00
|
|
|
static int cmp_cache_name_compare(const void *a_, const void *b_)
|
|
|
|
{
|
|
|
|
const struct cache_entry *ce1, *ce2;
|
|
|
|
|
|
|
|
ce1 = *((const struct cache_entry **)a_);
|
|
|
|
ce2 = *((const struct cache_entry **)b_);
|
2012-07-11 17:22:37 +08:00
|
|
|
return cache_name_stage_compare(ce1->name, ce1->ce_namelen, ce_stage(ce1),
|
|
|
|
ce2->name, ce2->ce_namelen, ce_stage(ce2));
|
2007-08-10 04:42:50 +08:00
|
|
|
}
|
|
|
|
|
2017-06-13 06:13:57 +08:00
|
|
|
int read_tree(struct tree *tree, int stage, struct pathspec *match,
|
|
|
|
struct index_state *istate)
|
2005-04-23 07:42:37 +08:00
|
|
|
{
|
2007-08-10 04:42:50 +08:00
|
|
|
read_tree_fn_t fn = NULL;
|
|
|
|
int i, err;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Currently the only existing callers of this function all
|
|
|
|
* call it with stage=1 and after making sure there is nothing
|
|
|
|
* at that stage; we could always use read_one_entry_quick().
|
|
|
|
*
|
|
|
|
* But when we decide to straighten out git-read-tree not to
|
|
|
|
* use unpack_trees() in some cases, this will probably start
|
|
|
|
* to matter.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* See if we have cache entry at the stage. If so,
|
|
|
|
* do it the original slow way, otherwise, append and then
|
|
|
|
* sort at the end.
|
|
|
|
*/
|
2017-06-13 06:13:57 +08:00
|
|
|
for (i = 0; !fn && i < istate->cache_nr; i++) {
|
|
|
|
const struct cache_entry *ce = istate->cache[i];
|
2007-08-10 04:42:50 +08:00
|
|
|
if (ce_stage(ce) == stage)
|
|
|
|
fn = read_one_entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!fn)
|
|
|
|
fn = read_one_entry_quick;
|
2017-06-13 06:13:57 +08:00
|
|
|
err = read_tree_recursive(tree, "", 0, stage, match, fn, istate);
|
2007-08-10 04:42:50 +08:00
|
|
|
if (fn == read_one_entry || err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sort the cache entry -- we need to nuke the cache tree, though.
|
|
|
|
*/
|
2017-06-13 06:13:57 +08:00
|
|
|
cache_tree_free(&istate->cache_tree);
|
|
|
|
QSORT(istate->cache, istate->cache_nr, cmp_cache_name_compare);
|
2007-08-10 04:42:50 +08:00
|
|
|
return 0;
|
2005-04-23 07:42:37 +08:00
|
|
|
}
|
|
|
|
|
2017-05-07 06:10:17 +08:00
|
|
|
struct tree *lookup_tree(const struct object_id *oid)
|
2005-04-19 02:39:48 +08:00
|
|
|
{
|
2017-05-07 06:10:17 +08:00
|
|
|
struct object *obj = lookup_object(oid->hash);
|
2007-04-17 13:11:43 +08:00
|
|
|
if (!obj)
|
2017-05-07 06:10:17 +08:00
|
|
|
return create_object(oid->hash, alloc_tree_node());
|
add object_as_type helper for casting objects
When we call lookup_commit, lookup_tree, etc, the logic goes
something like:
1. Look for an existing object struct. If we don't have
one, allocate and return a new one.
2. Double check that any object we have is the expected
type (and complain and return NULL otherwise).
3. Convert an object with type OBJ_NONE (from a prior
call to lookup_unknown_object) to the expected type.
We can encapsulate steps 2 and 3 in a helper function which
checks whether we have the expected object type, converts
OBJ_NONE as appropriate, and returns the object.
Not only does this shorten the code, but it also provides
one central location for converting OBJ_NONE objects into
objects of other types. Future patches will use that to
enforce type-specific invariants.
Since this is a refactoring, we would want it to behave
exactly as the current code. It takes a little reasoning to
see that this is the case:
- for lookup_{commit,tree,etc} functions, we are just
pulling steps 2 and 3 into a function that does the same
thing.
- for the call in peel_object, we currently only do step 3
(but we want to consolidate it with the others, as
mentioned above). However, step 2 is a noop here, as the
surrounding conditional makes sure we have OBJ_NONE
(which we want to keep to avoid an extraneous call to
sha1_object_info).
- for the call in lookup_commit_reference_gently, we are
currently doing step 2 but not step 3. However, step 3
is a noop here. The object we got will have just come
from deref_tag, which must have figured out the type for
each object in order to know when to stop peeling.
Therefore the type will never be OBJ_NONE.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-07-13 14:42:03 +08:00
|
|
|
return object_as_type(obj, OBJ_TREE, 0);
|
2005-04-19 02:39:48 +08:00
|
|
|
}
|
|
|
|
|
2006-05-30 03:18:33 +08:00
|
|
|
int parse_tree_buffer(struct tree *item, void *buffer, unsigned long size)
|
|
|
|
{
|
2005-04-19 02:39:48 +08:00
|
|
|
if (item->object.parsed)
|
|
|
|
return 0;
|
|
|
|
item->object.parsed = 1;
|
2006-05-30 03:16:12 +08:00
|
|
|
item->buffer = buffer;
|
|
|
|
item->size = size;
|
|
|
|
|
2006-05-30 03:18:33 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
add quieter versions of parse_{tree,commit}
When we call parse_commit, it will complain to stderr if the
object does not exist or cannot be read. This means that we
may produce useless error messages if this situation is
expected (e.g., because the object is marked UNINTERESTING,
or because revs->ignore_missing_links is set).
We can fix this by adding a new "parse_X_gently" form that
takes a flag to suppress the messages. The existing
"parse_X" form is already gentle in the sense that it
returns an error rather than dying, and we could in theory
just add a "quiet" flag to it (with existing callers passing
"0"). But doing it this way means we do not have to disturb
existing callers.
Note also that the new flag is "quiet_on_missing", and not
just "quiet". We could add a flag to suppress _all_ errors,
but besides being a more invasive change (we would have to
pass the flag down to sub-functions, too), there is a good
reason not to: we would never want to use it. Missing a
linked object is expected in some circumstances, but it is
never expected to have a malformed commit, or to get a tree
when we wanted a commit. We should always complain about
these corruptions.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-06-01 17:56:26 +08:00
|
|
|
int parse_tree_gently(struct tree *item, int quiet_on_missing)
|
2005-05-07 01:48:34 +08:00
|
|
|
{
|
2007-02-27 03:55:59 +08:00
|
|
|
enum object_type type;
|
2005-05-07 01:48:34 +08:00
|
|
|
void *buffer;
|
|
|
|
unsigned long size;
|
|
|
|
|
|
|
|
if (item->object.parsed)
|
|
|
|
return 0;
|
2015-11-10 10:22:29 +08:00
|
|
|
buffer = read_sha1_file(item->object.oid.hash, &type, &size);
|
2005-05-07 01:48:34 +08:00
|
|
|
if (!buffer)
|
add quieter versions of parse_{tree,commit}
When we call parse_commit, it will complain to stderr if the
object does not exist or cannot be read. This means that we
may produce useless error messages if this situation is
expected (e.g., because the object is marked UNINTERESTING,
or because revs->ignore_missing_links is set).
We can fix this by adding a new "parse_X_gently" form that
takes a flag to suppress the messages. The existing
"parse_X" form is already gentle in the sense that it
returns an error rather than dying, and we could in theory
just add a "quiet" flag to it (with existing callers passing
"0"). But doing it this way means we do not have to disturb
existing callers.
Note also that the new flag is "quiet_on_missing", and not
just "quiet". We could add a flag to suppress _all_ errors,
but besides being a more invasive change (we would have to
pass the flag down to sub-functions, too), there is a good
reason not to: we would never want to use it. Missing a
linked object is expected in some circumstances, but it is
never expected to have a malformed commit, or to get a tree
when we wanted a commit. We should always complain about
these corruptions.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-06-01 17:56:26 +08:00
|
|
|
return quiet_on_missing ? -1 :
|
|
|
|
error("Could not read %s",
|
2015-11-10 10:22:28 +08:00
|
|
|
oid_to_hex(&item->object.oid));
|
2007-02-27 03:55:59 +08:00
|
|
|
if (type != OBJ_TREE) {
|
2005-05-07 01:48:34 +08:00
|
|
|
free(buffer);
|
|
|
|
return error("Object %s not a tree",
|
2015-11-10 10:22:28 +08:00
|
|
|
oid_to_hex(&item->object.oid));
|
2005-05-07 01:48:34 +08:00
|
|
|
}
|
2006-05-30 03:16:12 +08:00
|
|
|
return parse_tree_buffer(item, buffer, size);
|
2005-05-07 01:48:34 +08:00
|
|
|
}
|
2005-09-05 14:03:51 +08:00
|
|
|
|
2013-06-06 06:37:39 +08:00
|
|
|
void free_tree_buffer(struct tree *tree)
|
|
|
|
{
|
2017-06-16 07:15:46 +08:00
|
|
|
FREE_AND_NULL(tree->buffer);
|
2013-06-06 06:37:39 +08:00
|
|
|
tree->size = 0;
|
|
|
|
tree->object.parsed = 0;
|
|
|
|
}
|
|
|
|
|
2017-05-07 06:10:37 +08:00
|
|
|
struct tree *parse_tree_indirect(const struct object_id *oid)
|
2005-09-05 14:03:51 +08:00
|
|
|
{
|
object: convert parse_object* to take struct object_id
Make parse_object, parse_object_or_die, and parse_object_buffer take a
pointer to struct object_id. Remove the temporary variables inserted
earlier, since they are no longer necessary. Transform all of the
callers using the following semantic patch:
@@
expression E1;
@@
- parse_object(E1.hash)
+ parse_object(&E1)
@@
expression E1;
@@
- parse_object(E1->hash)
+ parse_object(E1)
@@
expression E1, E2;
@@
- parse_object_or_die(E1.hash, E2)
+ parse_object_or_die(&E1, E2)
@@
expression E1, E2;
@@
- parse_object_or_die(E1->hash, E2)
+ parse_object_or_die(E1, E2)
@@
expression E1, E2, E3, E4, E5;
@@
- parse_object_buffer(E1.hash, E2, E3, E4, E5)
+ parse_object_buffer(&E1, E2, E3, E4, E5)
@@
expression E1, E2, E3, E4, E5;
@@
- parse_object_buffer(E1->hash, E2, E3, E4, E5)
+ parse_object_buffer(E1, E2, E3, E4, E5)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-05-07 06:10:38 +08:00
|
|
|
struct object *obj = parse_object(oid);
|
2005-09-05 14:03:51 +08:00
|
|
|
do {
|
|
|
|
if (!obj)
|
|
|
|
return NULL;
|
2006-07-12 11:45:31 +08:00
|
|
|
if (obj->type == OBJ_TREE)
|
2005-09-05 14:03:51 +08:00
|
|
|
return (struct tree *) obj;
|
2006-07-12 11:45:31 +08:00
|
|
|
else if (obj->type == OBJ_COMMIT)
|
2005-09-05 14:03:51 +08:00
|
|
|
obj = &(((struct commit *) obj)->tree->object);
|
2006-07-12 11:45:31 +08:00
|
|
|
else if (obj->type == OBJ_TAG)
|
2005-09-05 14:03:51 +08:00
|
|
|
obj = ((struct tag *) obj)->tagged;
|
|
|
|
else
|
|
|
|
return NULL;
|
|
|
|
if (!obj->parsed)
|
object: convert parse_object* to take struct object_id
Make parse_object, parse_object_or_die, and parse_object_buffer take a
pointer to struct object_id. Remove the temporary variables inserted
earlier, since they are no longer necessary. Transform all of the
callers using the following semantic patch:
@@
expression E1;
@@
- parse_object(E1.hash)
+ parse_object(&E1)
@@
expression E1;
@@
- parse_object(E1->hash)
+ parse_object(E1)
@@
expression E1, E2;
@@
- parse_object_or_die(E1.hash, E2)
+ parse_object_or_die(&E1, E2)
@@
expression E1, E2;
@@
- parse_object_or_die(E1->hash, E2)
+ parse_object_or_die(E1, E2)
@@
expression E1, E2, E3, E4, E5;
@@
- parse_object_buffer(E1.hash, E2, E3, E4, E5)
+ parse_object_buffer(&E1, E2, E3, E4, E5)
@@
expression E1, E2, E3, E4, E5;
@@
- parse_object_buffer(E1->hash, E2, E3, E4, E5)
+ parse_object_buffer(E1, E2, E3, E4, E5)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-05-07 06:10:38 +08:00
|
|
|
parse_object(&obj->oid);
|
2005-09-05 14:03:51 +08:00
|
|
|
} while (1);
|
|
|
|
}
|