2007-09-11 11:03:25 +08:00
|
|
|
/*
|
|
|
|
* "git fetch"
|
|
|
|
*/
|
|
|
|
#include "cache.h"
|
2017-06-15 02:07:36 +08:00
|
|
|
#include "config.h"
|
2017-12-13 03:53:52 +08:00
|
|
|
#include "repository.h"
|
2007-09-11 11:03:25 +08:00
|
|
|
#include "refs.h"
|
2018-05-17 06:57:48 +08:00
|
|
|
#include "refspec.h"
|
2018-05-16 07:42:15 +08:00
|
|
|
#include "object-store.h"
|
2019-09-16 05:18:02 +08:00
|
|
|
#include "oidset.h"
|
2007-09-11 11:03:25 +08:00
|
|
|
#include "commit.h"
|
|
|
|
#include "builtin.h"
|
2008-07-22 02:03:49 +08:00
|
|
|
#include "string-list.h"
|
2007-09-11 11:03:25 +08:00
|
|
|
#include "remote.h"
|
|
|
|
#include "transport.h"
|
2007-11-11 15:29:47 +08:00
|
|
|
#include "run-command.h"
|
2007-12-04 15:25:47 +08:00
|
|
|
#include "parse-options.h"
|
chain kill signals for cleanup functions
If a piece of code wanted to do some cleanup before exiting
(e.g., cleaning up a lockfile or a tempfile), our usual
strategy was to install a signal handler that did something
like this:
do_cleanup(); /* actual work */
signal(signo, SIG_DFL); /* restore previous behavior */
raise(signo); /* deliver signal, killing ourselves */
For a single handler, this works fine. However, if we want
to clean up two _different_ things, we run into a problem.
The most recently installed handler will run, but when it
removes itself as a handler, it doesn't put back the first
handler.
This patch introduces sigchain, a tiny library for handling
a stack of signal handlers. You sigchain_push each handler,
and use sigchain_pop to restore whoever was before you in
the stack.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-01-22 14:02:35 +08:00
|
|
|
#include "sigchain.h"
|
2015-08-18 08:22:00 +08:00
|
|
|
#include "submodule-config.h"
|
2010-11-12 20:54:52 +08:00
|
|
|
#include "submodule.h"
|
2011-09-03 07:33:22 +08:00
|
|
|
#include "connected.h"
|
2020-07-29 04:23:39 +08:00
|
|
|
#include "strvec.h"
|
2016-07-02 00:03:30 +08:00
|
|
|
#include "utf8.h"
|
2017-08-19 06:20:21 +08:00
|
|
|
#include "packfile.h"
|
2017-12-08 23:58:44 +08:00
|
|
|
#include "list-objects-filter-options.h"
|
2018-07-21 00:33:04 +08:00
|
|
|
#include "commit-reach.h"
|
2019-08-19 17:11:20 +08:00
|
|
|
#include "branch.h"
|
2019-06-25 21:40:31 +08:00
|
|
|
#include "promisor-remote.h"
|
2019-09-03 10:22:02 +08:00
|
|
|
#include "commit-graph.h"
|
2020-05-01 03:48:50 +08:00
|
|
|
#include "shallow.h"
|
2007-09-11 11:03:25 +08:00
|
|
|
|
2019-06-19 04:25:27 +08:00
|
|
|
#define FORCED_UPDATES_DELAY_WARNING_IN_MS (10 * 1000)
|
|
|
|
|
2007-12-04 15:25:47 +08:00
|
|
|
static const char * const builtin_fetch_usage[] = {
|
2012-08-20 20:32:09 +08:00
|
|
|
N_("git fetch [<options>] [<repository> [<refspec>...]]"),
|
|
|
|
N_("git fetch [<options>] <group>"),
|
|
|
|
N_("git fetch --multiple [<options>] [(<repository> | <group>)...]"),
|
|
|
|
N_("git fetch --all [<options>]"),
|
2007-12-04 15:25:47 +08:00
|
|
|
NULL
|
|
|
|
};
|
2007-09-11 11:03:25 +08:00
|
|
|
|
2007-12-04 15:25:47 +08:00
|
|
|
enum {
|
|
|
|
TAGS_UNSET = 0,
|
|
|
|
TAGS_DEFAULT = 1,
|
|
|
|
TAGS_SET = 2
|
|
|
|
};
|
|
|
|
|
2013-07-13 17:36:24 +08:00
|
|
|
static int fetch_prune_config = -1; /* unspecified */
|
2019-06-19 04:25:26 +08:00
|
|
|
static int fetch_show_forced_updates = 1;
|
2019-06-19 04:25:27 +08:00
|
|
|
static uint64_t forced_updates_ms = 0;
|
2021-04-16 20:49:57 +08:00
|
|
|
static int prefetch = 0;
|
2013-07-13 17:36:24 +08:00
|
|
|
static int prune = -1; /* unspecified */
|
|
|
|
#define PRUNE_BY_DEFAULT 0 /* do we prune by default? */
|
|
|
|
|
fetch: add a --prune-tags option and fetch.pruneTags config
Add a --prune-tags option to git-fetch, along with fetch.pruneTags
config option and a -P shorthand (-p is --prune). This allows for
doing any of:
git fetch -p -P
git fetch --prune --prune-tags
git fetch -p -P origin
git fetch --prune --prune-tags origin
Or simply:
git config fetch.prune true &&
git config fetch.pruneTags true &&
git fetch
Instead of the much more verbose:
git fetch --prune origin 'refs/tags/*:refs/tags/*' '+refs/heads/*:refs/remotes/origin/*'
Before this feature it was painful to support the use-case of pulling
from a repo which is having both its branches *and* tags deleted
regularly, and have our local references to reflect upstream.
At work we create deployment tags in the repo for each rollout, and
there's *lots* of those, so they're archived within weeks for
performance reasons.
Without this change it's hard to centrally configure such repos in
/etc/gitconfig (on servers that are only used for working with
them). You need to set fetch.prune=true globally, and then for each
repo:
git -C {} config --replace-all remote.origin.fetch "refs/tags/*:refs/tags/*" "^\+*refs/tags/\*:refs/tags/\*$"
Now I can simply set fetch.pruneTags=true in /etc/gitconfig as well,
and users running "git pull" will automatically get the pruning
semantics I want.
Even though "git remote" has corresponding "prune" and "update
--prune" subcommands I'm intentionally not adding a corresponding
prune-tags or "update --prune --prune-tags" mode to that command.
It's advertised (as noted in my recent "git remote doc: correct
dangerous lies about what prune does") as only modifying remote
tracking references, whereas any --prune-tags option is always going
to modify what from the user's perspective is a local copy of the tag,
since there's no such thing as a remote tracking tag.
Ideally add_prune_tags_to_fetch_refspec() would be something that
would use ALLOC_GROW() to grow the 'fetch` member of the 'remote'
struct. Instead I'm realloc-ing remote->fetch and adding the
tag_refspec to the end.
The reason is that parse_{fetch,push}_refspec which allocate the
refspec (ultimately remote->fetch) struct are called many places that
don't have access to a 'remote' struct. It would be hard to change all
their callsites to be amenable to carry around the bookkeeping
variables required for dynamic allocation.
All the other callers of the API first incrementally construct the
string version of the refspec in remote->fetch_refspec via
add_fetch_refspec(), before finally calling parse_fetch_refspec() via
some variation of remote_get().
It's less of a pain to deal with the one special case that needs to
modify already constructed refspecs than to chase down and change all
the other callsites. The API I'm adding is intentionally not
generalized because if we add more of these we'd probably want to
re-visit how this is done.
See my "Re: [BUG] git remote prune removes local tags, depending on
fetch config" (87po6ahx87.fsf@evledraar.gmail.com;
https://public-inbox.org/git/87po6ahx87.fsf@evledraar.gmail.com/) for
more background info.
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-10 04:32:15 +08:00
|
|
|
static int fetch_prune_tags_config = -1; /* unspecified */
|
|
|
|
static int prune_tags = -1; /* unspecified */
|
|
|
|
#define PRUNE_TAGS_BY_DEFAULT 0 /* do we prune tags by default? */
|
|
|
|
|
2019-08-19 17:11:20 +08:00
|
|
|
static int all, append, dry_run, force, keep, multiple, update_head_ok;
|
2020-08-18 22:25:22 +08:00
|
|
|
static int write_fetch_head = 1;
|
2019-08-19 17:11:20 +08:00
|
|
|
static int verbosity, deepen_relative, set_upstream;
|
2017-06-24 03:13:01 +08:00
|
|
|
static int progress = -1;
|
2019-06-19 17:46:30 +08:00
|
|
|
static int enable_auto_gc = 1;
|
2016-06-12 18:53:59 +08:00
|
|
|
static int tags = TAGS_DEFAULT, unshallow, update_shallow, deepen;
|
2019-10-06 02:46:40 +08:00
|
|
|
static int max_jobs = -1, submodule_fetch_jobs_config = -1;
|
|
|
|
static int fetch_parallel_config = 1;
|
fetch: implement support for atomic reference updates
When executing a fetch, then git will currently allocate one reference
transaction per reference update and directly commit it. This means that
fetches are non-atomic: even if some of the reference updates fail,
others may still succeed and modify local references.
This is fine in many scenarios, but this strategy has its downsides.
- The view of remote references may be inconsistent and may show a
bastardized state of the remote repository.
- Batching together updates may improve performance in certain
scenarios. While the impact probably isn't as pronounced with loose
references, the upcoming reftable backend may benefit as it needs to
write less files in case the update is batched.
- The reference-update hook is currently being executed twice per
updated reference. While this doesn't matter when there is no such
hook, we have seen severe performance regressions when doing a
git-fetch(1) with reference-transaction hook when the remote
repository has hundreds of thousands of references.
Similar to `git push --atomic`, this commit thus introduces atomic
fetches. Instead of allocating one reference transaction per updated
reference, it causes us to only allocate a single transaction and commit
it as soon as all updates were received. If locking of any reference
fails, then we abort the complete transaction and don't update any
reference, which gives us an all-or-nothing fetch.
Note that this may not completely fix the first of above downsides, as
the consistent view also depends on the server-side. If the server
doesn't have a consistent view of its own references during the
reference negotiation phase, then the client would get the same
inconsistent view the server has. This is a separate problem though and,
if it actually exists, can be fixed at a later point.
This commit also changes the way we write FETCH_HEAD in case `--atomic`
is passed. Instead of writing changes as we go, we need to accumulate
all changes first and only commit them at the end when we know that all
reference updates succeeded. Ideally, we'd just do so via a temporary
file so that we don't need to carry all updates in-memory. This isn't
trivially doable though considering the `--append` mode, where we do not
truncate the file but simply append to it. And given that we support
concurrent processes appending to FETCH_HEAD at the same time without
any loss of data, seeding the temporary file with current contents of
FETCH_HEAD initially and then doing a rename wouldn't work either. So
this commit implements the simple strategy of buffering all changes and
appending them to the file on commit.
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-12 20:27:52 +08:00
|
|
|
static int atomic_fetch;
|
2016-02-03 12:09:14 +08:00
|
|
|
static enum transport_family family;
|
2007-11-11 15:29:47 +08:00
|
|
|
static const char *depth;
|
2016-06-12 18:53:59 +08:00
|
|
|
static const char *deepen_since;
|
2007-12-04 15:25:47 +08:00
|
|
|
static const char *upload_pack;
|
2016-06-12 18:54:04 +08:00
|
|
|
static struct string_list deepen_not = STRING_LIST_INIT_NODUP;
|
2007-12-04 15:25:46 +08:00
|
|
|
static struct strbuf default_rla = STRBUF_INIT;
|
2013-08-08 06:38:45 +08:00
|
|
|
static struct transport *gtransport;
|
fetch: work around "transport-take-over" hack
A Git-aware "connect" transport allows the "transport_take_over" to
redirect generic transport requests like fetch(), push_refs() and
get_refs_list() to the native Git transport handling methods. The
take-over process replaces transport->data with a fake data that
these method implementations understand.
While this hack works OK for a single request, it breaks when the
transport needs to make more than one requests. transport->data
that used to hold necessary information for the specific helper to
work correctly is destroyed during the take-over process.
One codepath that this matters is "git fetch" in auto-follow mode;
when it does not get all the tags that ought to point at the history
it got (which can be determined by looking at the peeled tags in the
initial advertisement) from the primary transfer, it internally
makes a second request to complete the fetch. Because "take-over"
hack has already destroyed the data necessary to talk to the
transport helper by the time this happens, the second request cannot
make a request to the helper to make another connection to fetch
these additional tags.
Mark such a transport as "cannot_reuse", and use a separate
transport to perform the backfill fetch in order to work around
this breakage.
Note that this problem does not manifest itself when running t5802,
because our upload-pack gives you all the necessary auto-followed
tags during the primary transfer. You would need to step through
"git fetch" in a debugger, stop immediately after the primary
transfer finishes and writes these auto-followed tags, remove the
tag references and repack/prune the repository to convince the
"find-non-local-tags" procedure that the primary transfer failed to
give us all the necessary tags, and then let it continue, in order
to trigger the bug in the secondary transfer this patch fixes.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-08-08 06:47:18 +08:00
|
|
|
static struct transport *gsecondary;
|
2010-11-12 20:54:52 +08:00
|
|
|
static const char *submodule_prefix = "";
|
2017-06-24 03:13:01 +08:00
|
|
|
static int recurse_submodules = RECURSE_SUBMODULES_DEFAULT;
|
2017-06-28 05:31:59 +08:00
|
|
|
static int recurse_submodules_default = RECURSE_SUBMODULES_ON_DEMAND;
|
2014-01-03 10:28:51 +08:00
|
|
|
static int shown_url = 0;
|
2018-05-17 06:58:05 +08:00
|
|
|
static struct refspec refmap = REFSPEC_INIT_FETCH;
|
2017-12-08 23:58:44 +08:00
|
|
|
static struct list_objects_filter_options filter_options;
|
2018-04-24 06:46:24 +08:00
|
|
|
static struct string_list server_options = STRING_LIST_INIT_DUP;
|
2018-07-03 06:39:44 +08:00
|
|
|
static struct string_list negotiation_tip = STRING_LIST_INIT_NODUP;
|
2019-11-03 08:21:56 +08:00
|
|
|
static int fetch_write_commit_graph = -1;
|
2020-08-18 12:01:32 +08:00
|
|
|
static int stdin_refspecs = 0;
|
fetch: teach independent negotiation (no packfile)
Currently, the packfile negotiation step within a Git fetch cannot be
done independent of sending the packfile, even though there is at least
one application wherein this is useful. Therefore, make it possible for
this negotiation step to be done independently. A subsequent commit will
use this for one such application - push negotiation.
This feature is for protocol v2 only. (An implementation for protocol v0
would require a separate implementation in the fetch, transport, and
transport helper code.)
In the protocol, the main hindrance towards independent negotiation is
that the server can unilaterally decide to send the packfile. This is
solved by a "wait-for-done" argument: the server will then wait for the
client to say "done". In practice, the client will never say it; instead
it will cease requests once it is satisfied.
In the client, the main change lies in the transport and transport
helper code. fetch_refs_via_pack() performs everything needed - protocol
version and capability checks, and the negotiation itself.
There are 2 code paths that do not go through fetch_refs_via_pack() that
needed to be individually excluded: the bundle transport (excluded
through requiring smart_options, which the bundle transport doesn't
support) and transport helpers that do not support takeover. If or when
we support independent negotiation for protocol v0, we will need to
modify these 2 code paths to support it. But for now, report failure if
independent negotiation is requested in these cases.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-05-05 05:16:01 +08:00
|
|
|
static int negotiate_only;
|
2007-09-14 15:31:25 +08:00
|
|
|
|
2013-07-13 17:36:24 +08:00
|
|
|
static int git_fetch_config(const char *k, const char *v, void *cb)
|
|
|
|
{
|
|
|
|
if (!strcmp(k, "fetch.prune")) {
|
|
|
|
fetch_prune_config = git_config_bool(k, v);
|
|
|
|
return 0;
|
|
|
|
}
|
2017-06-01 08:30:50 +08:00
|
|
|
|
fetch: add a --prune-tags option and fetch.pruneTags config
Add a --prune-tags option to git-fetch, along with fetch.pruneTags
config option and a -P shorthand (-p is --prune). This allows for
doing any of:
git fetch -p -P
git fetch --prune --prune-tags
git fetch -p -P origin
git fetch --prune --prune-tags origin
Or simply:
git config fetch.prune true &&
git config fetch.pruneTags true &&
git fetch
Instead of the much more verbose:
git fetch --prune origin 'refs/tags/*:refs/tags/*' '+refs/heads/*:refs/remotes/origin/*'
Before this feature it was painful to support the use-case of pulling
from a repo which is having both its branches *and* tags deleted
regularly, and have our local references to reflect upstream.
At work we create deployment tags in the repo for each rollout, and
there's *lots* of those, so they're archived within weeks for
performance reasons.
Without this change it's hard to centrally configure such repos in
/etc/gitconfig (on servers that are only used for working with
them). You need to set fetch.prune=true globally, and then for each
repo:
git -C {} config --replace-all remote.origin.fetch "refs/tags/*:refs/tags/*" "^\+*refs/tags/\*:refs/tags/\*$"
Now I can simply set fetch.pruneTags=true in /etc/gitconfig as well,
and users running "git pull" will automatically get the pruning
semantics I want.
Even though "git remote" has corresponding "prune" and "update
--prune" subcommands I'm intentionally not adding a corresponding
prune-tags or "update --prune --prune-tags" mode to that command.
It's advertised (as noted in my recent "git remote doc: correct
dangerous lies about what prune does") as only modifying remote
tracking references, whereas any --prune-tags option is always going
to modify what from the user's perspective is a local copy of the tag,
since there's no such thing as a remote tracking tag.
Ideally add_prune_tags_to_fetch_refspec() would be something that
would use ALLOC_GROW() to grow the 'fetch` member of the 'remote'
struct. Instead I'm realloc-ing remote->fetch and adding the
tag_refspec to the end.
The reason is that parse_{fetch,push}_refspec which allocate the
refspec (ultimately remote->fetch) struct are called many places that
don't have access to a 'remote' struct. It would be hard to change all
their callsites to be amenable to carry around the bookkeeping
variables required for dynamic allocation.
All the other callers of the API first incrementally construct the
string version of the refspec in remote->fetch_refspec via
add_fetch_refspec(), before finally calling parse_fetch_refspec() via
some variation of remote_get().
It's less of a pain to deal with the one special case that needs to
modify already constructed refspecs than to chase down and change all
the other callsites. The API I'm adding is intentionally not
generalized because if we add more of these we'd probably want to
re-visit how this is done.
See my "Re: [BUG] git remote prune removes local tags, depending on
fetch config" (87po6ahx87.fsf@evledraar.gmail.com;
https://public-inbox.org/git/87po6ahx87.fsf@evledraar.gmail.com/) for
more background info.
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-10 04:32:15 +08:00
|
|
|
if (!strcmp(k, "fetch.prunetags")) {
|
|
|
|
fetch_prune_tags_config = git_config_bool(k, v);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-06-19 04:25:26 +08:00
|
|
|
if (!strcmp(k, "fetch.showforcedupdates")) {
|
|
|
|
fetch_show_forced_updates = git_config_bool(k, v);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-01 08:30:50 +08:00
|
|
|
if (!strcmp(k, "submodule.recurse")) {
|
|
|
|
int r = git_config_bool(k, v) ?
|
|
|
|
RECURSE_SUBMODULES_ON : RECURSE_SUBMODULES_OFF;
|
|
|
|
recurse_submodules = r;
|
|
|
|
}
|
|
|
|
|
2017-08-03 03:49:18 +08:00
|
|
|
if (!strcmp(k, "submodule.fetchjobs")) {
|
2019-10-06 02:46:40 +08:00
|
|
|
submodule_fetch_jobs_config = parse_submodule_fetchjobs(k, v);
|
2017-08-03 03:49:18 +08:00
|
|
|
return 0;
|
2017-08-03 03:49:19 +08:00
|
|
|
} else if (!strcmp(k, "fetch.recursesubmodules")) {
|
|
|
|
recurse_submodules = parse_fetch_recurse_submodules_arg(k, v);
|
|
|
|
return 0;
|
2017-08-03 03:49:18 +08:00
|
|
|
}
|
|
|
|
|
2019-10-06 02:46:40 +08:00
|
|
|
if (!strcmp(k, "fetch.parallel")) {
|
|
|
|
fetch_parallel_config = git_config_int(k, v);
|
|
|
|
if (fetch_parallel_config < 0)
|
|
|
|
die(_("fetch.parallel cannot be negative"));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
fetch: load all default config at startup
When we start the git-fetch program, we call git_config to
load all config, but our callback only processes the
fetch.prune option; we do not chain to git_default_config at
all.
This means that we may not load some core configuration
which will have an effect. For instance, we do not load
core.logAllRefUpdates, which impacts whether or not we
create reflogs in a bare repository.
Note that I said "may" above. It gets even more exciting. If
we have to transfer actual objects as part of the fetch,
then we call fetch_pack as part of the same process. That
function loads its own config, which does chain to
git_default_config, impacting global variables which are
used by the rest of fetch. But if the fetch is a pure ref
update (e.g., a new ref which is a copy of an old one), we
skip fetch_pack entirely. So we get inconsistent results
depending on whether or not we have actual objects to
transfer or not!
Let's just load the core config at the start of fetch, so we
know we have it (we may also load it again as part of
fetch_pack, but that's OK; it's designed to be idempotent).
Our tests check both cases (with and without a pack). We
also check similar behavior for push for good measure, but
it already works as expected.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-11-04 21:11:19 +08:00
|
|
|
return git_default_config(k, v, cb);
|
2013-07-13 17:36:24 +08:00
|
|
|
}
|
|
|
|
|
2014-05-30 06:21:31 +08:00
|
|
|
static int parse_refmap_arg(const struct option *opt, const char *arg, int unset)
|
|
|
|
{
|
assert NOARG/NONEG behavior of parse-options callbacks
When we define a parse-options callback, the flags we put in the option
struct must match what the callback expects. For example, a callback
which does not handle the "unset" parameter should only be used with
PARSE_OPT_NONEG. But since the callback and the option struct are not
defined next to each other, it's easy to get this wrong (as earlier
patches in this series show).
Fortunately, the compiler can help us here: compiling with
-Wunused-parameters can show us which callbacks ignore their "unset"
parameters (and likewise, ones that ignore "arg" expect to be triggered
with PARSE_OPT_NOARG).
But after we've inspected a callback and determined that all of its
callers use the right flags, what do we do next? We'd like to silence
the compiler warning, but do so in a way that will catch any wrong calls
in the future.
We can do that by actually checking those variables and asserting that
they match our expectations. Because this is such a common pattern,
we'll introduce some helper macros. The resulting messages aren't
as descriptive as we could make them, but the file/line information from
BUG() is enough to identify the problem (and anyway, the point is that
these should never be seen).
Each of the annotated callbacks in this patch triggers
-Wunused-parameters, and was manually inspected to make sure all callers
use the correct options (so none of these BUGs should be triggerable).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-05 14:45:42 +08:00
|
|
|
BUG_ON_OPT_NEG(unset);
|
|
|
|
|
2014-05-30 06:21:31 +08:00
|
|
|
/*
|
|
|
|
* "git fetch --refmap='' origin foo"
|
|
|
|
* can be used to tell the command not to store anywhere
|
|
|
|
*/
|
2018-05-17 06:58:05 +08:00
|
|
|
refspec_append(&refmap, arg);
|
|
|
|
|
2014-05-30 06:21:31 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-12-04 15:25:47 +08:00
|
|
|
static struct option builtin_fetch_options[] = {
|
2008-11-15 08:14:24 +08:00
|
|
|
OPT__VERBOSITY(&verbosity),
|
2013-08-03 19:51:19 +08:00
|
|
|
OPT_BOOL(0, "all", &all,
|
|
|
|
N_("fetch from all remotes")),
|
2019-08-19 17:11:20 +08:00
|
|
|
OPT_BOOL(0, "set-upstream", &set_upstream,
|
|
|
|
N_("set upstream for git pull/fetch")),
|
2013-08-03 19:51:19 +08:00
|
|
|
OPT_BOOL('a', "append", &append,
|
|
|
|
N_("append to .git/FETCH_HEAD instead of overwriting")),
|
fetch: implement support for atomic reference updates
When executing a fetch, then git will currently allocate one reference
transaction per reference update and directly commit it. This means that
fetches are non-atomic: even if some of the reference updates fail,
others may still succeed and modify local references.
This is fine in many scenarios, but this strategy has its downsides.
- The view of remote references may be inconsistent and may show a
bastardized state of the remote repository.
- Batching together updates may improve performance in certain
scenarios. While the impact probably isn't as pronounced with loose
references, the upcoming reftable backend may benefit as it needs to
write less files in case the update is batched.
- The reference-update hook is currently being executed twice per
updated reference. While this doesn't matter when there is no such
hook, we have seen severe performance regressions when doing a
git-fetch(1) with reference-transaction hook when the remote
repository has hundreds of thousands of references.
Similar to `git push --atomic`, this commit thus introduces atomic
fetches. Instead of allocating one reference transaction per updated
reference, it causes us to only allocate a single transaction and commit
it as soon as all updates were received. If locking of any reference
fails, then we abort the complete transaction and don't update any
reference, which gives us an all-or-nothing fetch.
Note that this may not completely fix the first of above downsides, as
the consistent view also depends on the server-side. If the server
doesn't have a consistent view of its own references during the
reference negotiation phase, then the client would get the same
inconsistent view the server has. This is a separate problem though and,
if it actually exists, can be fixed at a later point.
This commit also changes the way we write FETCH_HEAD in case `--atomic`
is passed. Instead of writing changes as we go, we need to accumulate
all changes first and only commit them at the end when we know that all
reference updates succeeded. Ideally, we'd just do so via a temporary
file so that we don't need to carry all updates in-memory. This isn't
trivially doable though considering the `--append` mode, where we do not
truncate the file but simply append to it. And given that we support
concurrent processes appending to FETCH_HEAD at the same time without
any loss of data, seeding the temporary file with current contents of
FETCH_HEAD initially and then doing a rename wouldn't work either. So
this commit implements the simple strategy of buffering all changes and
appending them to the file on commit.
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-12 20:27:52 +08:00
|
|
|
OPT_BOOL(0, "atomic", &atomic_fetch,
|
|
|
|
N_("use atomic transaction to update references")),
|
2012-08-20 20:32:09 +08:00
|
|
|
OPT_STRING(0, "upload-pack", &upload_pack, N_("path"),
|
|
|
|
N_("path to upload pack on remote end")),
|
2018-09-01 04:09:56 +08:00
|
|
|
OPT__FORCE(&force, N_("force overwrite of local reference"), 0),
|
2013-08-03 19:51:19 +08:00
|
|
|
OPT_BOOL('m', "multiple", &multiple,
|
|
|
|
N_("fetch from multiple remotes")),
|
2007-12-04 15:25:47 +08:00
|
|
|
OPT_SET_INT('t', "tags", &tags,
|
2012-08-20 20:32:09 +08:00
|
|
|
N_("fetch all tags and associated objects"), TAGS_SET),
|
2008-03-13 15:13:15 +08:00
|
|
|
OPT_SET_INT('n', NULL, &tags,
|
2012-08-20 20:32:09 +08:00
|
|
|
N_("do not fetch all tags (--no-tags)"), TAGS_UNSET),
|
2019-10-06 02:46:40 +08:00
|
|
|
OPT_INTEGER('j', "jobs", &max_jobs,
|
2015-12-16 08:04:12 +08:00
|
|
|
N_("number of submodules fetched in parallel")),
|
2021-04-16 20:49:57 +08:00
|
|
|
OPT_BOOL(0, "prefetch", &prefetch,
|
|
|
|
N_("modify the refspec to place all refs within refs/prefetch/")),
|
2013-08-03 19:51:19 +08:00
|
|
|
OPT_BOOL('p', "prune", &prune,
|
|
|
|
N_("prune remote-tracking branches no longer on remote")),
|
fetch: add a --prune-tags option and fetch.pruneTags config
Add a --prune-tags option to git-fetch, along with fetch.pruneTags
config option and a -P shorthand (-p is --prune). This allows for
doing any of:
git fetch -p -P
git fetch --prune --prune-tags
git fetch -p -P origin
git fetch --prune --prune-tags origin
Or simply:
git config fetch.prune true &&
git config fetch.pruneTags true &&
git fetch
Instead of the much more verbose:
git fetch --prune origin 'refs/tags/*:refs/tags/*' '+refs/heads/*:refs/remotes/origin/*'
Before this feature it was painful to support the use-case of pulling
from a repo which is having both its branches *and* tags deleted
regularly, and have our local references to reflect upstream.
At work we create deployment tags in the repo for each rollout, and
there's *lots* of those, so they're archived within weeks for
performance reasons.
Without this change it's hard to centrally configure such repos in
/etc/gitconfig (on servers that are only used for working with
them). You need to set fetch.prune=true globally, and then for each
repo:
git -C {} config --replace-all remote.origin.fetch "refs/tags/*:refs/tags/*" "^\+*refs/tags/\*:refs/tags/\*$"
Now I can simply set fetch.pruneTags=true in /etc/gitconfig as well,
and users running "git pull" will automatically get the pruning
semantics I want.
Even though "git remote" has corresponding "prune" and "update
--prune" subcommands I'm intentionally not adding a corresponding
prune-tags or "update --prune --prune-tags" mode to that command.
It's advertised (as noted in my recent "git remote doc: correct
dangerous lies about what prune does") as only modifying remote
tracking references, whereas any --prune-tags option is always going
to modify what from the user's perspective is a local copy of the tag,
since there's no such thing as a remote tracking tag.
Ideally add_prune_tags_to_fetch_refspec() would be something that
would use ALLOC_GROW() to grow the 'fetch` member of the 'remote'
struct. Instead I'm realloc-ing remote->fetch and adding the
tag_refspec to the end.
The reason is that parse_{fetch,push}_refspec which allocate the
refspec (ultimately remote->fetch) struct are called many places that
don't have access to a 'remote' struct. It would be hard to change all
their callsites to be amenable to carry around the bookkeeping
variables required for dynamic allocation.
All the other callers of the API first incrementally construct the
string version of the refspec in remote->fetch_refspec via
add_fetch_refspec(), before finally calling parse_fetch_refspec() via
some variation of remote_get().
It's less of a pain to deal with the one special case that needs to
modify already constructed refspecs than to chase down and change all
the other callsites. The API I'm adding is intentionally not
generalized because if we add more of these we'd probably want to
re-visit how this is done.
See my "Re: [BUG] git remote prune removes local tags, depending on
fetch config" (87po6ahx87.fsf@evledraar.gmail.com;
https://public-inbox.org/git/87po6ahx87.fsf@evledraar.gmail.com/) for
more background info.
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-10 04:32:15 +08:00
|
|
|
OPT_BOOL('P', "prune-tags", &prune_tags,
|
|
|
|
N_("prune local tags no longer on remote and clobber changed tags")),
|
Use OPT_CALLBACK and OPT_CALLBACK_F
In the codebase, there are many options which use OPTION_CALLBACK in a
plain ol' struct definition. However, we have the OPT_CALLBACK and
OPT_CALLBACK_F macros which are meant to abstract these plain struct
definitions away. These macros are useful as they semantically signal to
developers that these are just normal callback option with nothing fancy
happening.
Replace plain struct definitions of OPTION_CALLBACK with OPT_CALLBACK or
OPT_CALLBACK_F where applicable. The heavy lifting was done using the
following (disgusting) shell script:
#!/bin/sh
do_replacement () {
tr '\n' '\r' |
sed -e 's/{\s*OPTION_CALLBACK,\s*\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\s*0,\(\s*[^[:space:]}]*\)\s*}/OPT_CALLBACK(\1,\2,\3,\4,\5,\6)/g' |
sed -e 's/{\s*OPTION_CALLBACK,\s*\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\(\s*[^[:space:]}]*\)\s*}/OPT_CALLBACK_F(\1,\2,\3,\4,\5,\6,\7)/g' |
tr '\r' '\n'
}
for f in $(git ls-files \*.c)
do
do_replacement <"$f" >"$f.tmp"
mv "$f.tmp" "$f"
done
The result was manually inspected and then reformatted to match the
style of the surrounding code. Finally, using
`git grep OPTION_CALLBACK \*.c`, leftover results which were not handled
by the script were manually transformed.
Signed-off-by: Denton Liu <liu.denton@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-28 16:36:28 +08:00
|
|
|
OPT_CALLBACK_F(0, "recurse-submodules", &recurse_submodules, N_("on-demand"),
|
2012-08-20 20:32:09 +08:00
|
|
|
N_("control recursive fetching of submodules"),
|
Use OPT_CALLBACK and OPT_CALLBACK_F
In the codebase, there are many options which use OPTION_CALLBACK in a
plain ol' struct definition. However, we have the OPT_CALLBACK and
OPT_CALLBACK_F macros which are meant to abstract these plain struct
definitions away. These macros are useful as they semantically signal to
developers that these are just normal callback option with nothing fancy
happening.
Replace plain struct definitions of OPTION_CALLBACK with OPT_CALLBACK or
OPT_CALLBACK_F where applicable. The heavy lifting was done using the
following (disgusting) shell script:
#!/bin/sh
do_replacement () {
tr '\n' '\r' |
sed -e 's/{\s*OPTION_CALLBACK,\s*\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\s*0,\(\s*[^[:space:]}]*\)\s*}/OPT_CALLBACK(\1,\2,\3,\4,\5,\6)/g' |
sed -e 's/{\s*OPTION_CALLBACK,\s*\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\(\s*[^[:space:]}]*\)\s*}/OPT_CALLBACK_F(\1,\2,\3,\4,\5,\6,\7)/g' |
tr '\r' '\n'
}
for f in $(git ls-files \*.c)
do
do_replacement <"$f" >"$f.tmp"
mv "$f.tmp" "$f"
done
The result was manually inspected and then reformatted to match the
style of the surrounding code. Finally, using
`git grep OPTION_CALLBACK \*.c`, leftover results which were not handled
by the script were manually transformed.
Signed-off-by: Denton Liu <liu.denton@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-28 16:36:28 +08:00
|
|
|
PARSE_OPT_OPTARG, option_fetch_parse_recurse_submodules),
|
2013-08-03 19:51:19 +08:00
|
|
|
OPT_BOOL(0, "dry-run", &dry_run,
|
|
|
|
N_("dry run")),
|
2020-08-18 22:25:22 +08:00
|
|
|
OPT_BOOL(0, "write-fetch-head", &write_fetch_head,
|
|
|
|
N_("write fetched references to the FETCH_HEAD file")),
|
2013-08-03 19:51:19 +08:00
|
|
|
OPT_BOOL('k', "keep", &keep, N_("keep downloaded pack")),
|
|
|
|
OPT_BOOL('u', "update-head-ok", &update_head_ok,
|
2012-08-20 20:32:09 +08:00
|
|
|
N_("allow updating of HEAD ref")),
|
|
|
|
OPT_BOOL(0, "progress", &progress, N_("force progress reporting")),
|
|
|
|
OPT_STRING(0, "depth", &depth, N_("depth"),
|
|
|
|
N_("deepen history of shallow clone")),
|
2016-06-12 18:53:59 +08:00
|
|
|
OPT_STRING(0, "shallow-since", &deepen_since, N_("time"),
|
|
|
|
N_("deepen history of shallow repository based on time")),
|
2016-06-12 18:54:04 +08:00
|
|
|
OPT_STRING_LIST(0, "shallow-exclude", &deepen_not, N_("revision"),
|
2016-12-05 06:03:59 +08:00
|
|
|
N_("deepen history of shallow clone, excluding rev")),
|
fetch, upload-pack: --deepen=N extends shallow boundary by N commits
In git-fetch, --depth argument is always relative with the latest
remote refs. This makes it a bit difficult to cover this use case,
where the user wants to make the shallow history, say 3 levels
deeper. It would work if remote refs have not moved yet, but nobody
can guarantee that, especially when that use case is performed a
couple months after the last clone or "git fetch --depth". Also,
modifying shallow boundary using --depth does not work well with
clones created by --since or --not.
This patch fixes that. A new argument --deepen=<N> will add <N> more (*)
parent commits to the current history regardless of where remote refs
are.
Have/Want negotiation is still respected. So if remote refs move, the
server will send two chunks: one between "have" and "want" and another
to extend shallow history. In theory, the client could send no "want"s
in order to get the second chunk only. But the protocol does not allow
that. Either you send no want lines, which means ls-remote; or you
have to send at least one want line that carries deep-relative to the
server..
The main work was done by Dongcan Jiang. I fixed it up here and there.
And of course all the bugs belong to me.
(*) We could even support --deepen=<N> where <N> is negative. In that
case we can cut some history from the shallow clone. This operation
(and --depth=<shorter depth>) does not require interaction with remote
side (and more complicated to implement as a result).
Helped-by: Duy Nguyen <pclouds@gmail.com>
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Helped-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Dongcan Jiang <dongcan.jiang@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-12 18:54:09 +08:00
|
|
|
OPT_INTEGER(0, "deepen", &deepen_relative,
|
|
|
|
N_("deepen history of shallow clone")),
|
2018-05-20 23:42:58 +08:00
|
|
|
OPT_SET_INT_F(0, "unshallow", &unshallow,
|
|
|
|
N_("convert to a complete repository"),
|
|
|
|
1, PARSE_OPT_NONEG),
|
2012-08-20 20:32:09 +08:00
|
|
|
{ OPTION_STRING, 0, "submodule-prefix", &submodule_prefix, N_("dir"),
|
|
|
|
N_("prepend this to submodule path output"), PARSE_OPT_HIDDEN },
|
Use OPT_CALLBACK and OPT_CALLBACK_F
In the codebase, there are many options which use OPTION_CALLBACK in a
plain ol' struct definition. However, we have the OPT_CALLBACK and
OPT_CALLBACK_F macros which are meant to abstract these plain struct
definitions away. These macros are useful as they semantically signal to
developers that these are just normal callback option with nothing fancy
happening.
Replace plain struct definitions of OPTION_CALLBACK with OPT_CALLBACK or
OPT_CALLBACK_F where applicable. The heavy lifting was done using the
following (disgusting) shell script:
#!/bin/sh
do_replacement () {
tr '\n' '\r' |
sed -e 's/{\s*OPTION_CALLBACK,\s*\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\s*0,\(\s*[^[:space:]}]*\)\s*}/OPT_CALLBACK(\1,\2,\3,\4,\5,\6)/g' |
sed -e 's/{\s*OPTION_CALLBACK,\s*\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\(\s*[^[:space:]}]*\)\s*}/OPT_CALLBACK_F(\1,\2,\3,\4,\5,\6,\7)/g' |
tr '\r' '\n'
}
for f in $(git ls-files \*.c)
do
do_replacement <"$f" >"$f.tmp"
mv "$f.tmp" "$f"
done
The result was manually inspected and then reformatted to match the
style of the surrounding code. Finally, using
`git grep OPTION_CALLBACK \*.c`, leftover results which were not handled
by the script were manually transformed.
Signed-off-by: Denton Liu <liu.denton@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-28 16:36:28 +08:00
|
|
|
OPT_CALLBACK_F(0, "recurse-submodules-default",
|
2017-06-24 03:13:01 +08:00
|
|
|
&recurse_submodules_default, N_("on-demand"),
|
|
|
|
N_("default for recursive fetching of submodules "
|
|
|
|
"(lower priority than config files)"),
|
Use OPT_CALLBACK and OPT_CALLBACK_F
In the codebase, there are many options which use OPTION_CALLBACK in a
plain ol' struct definition. However, we have the OPT_CALLBACK and
OPT_CALLBACK_F macros which are meant to abstract these plain struct
definitions away. These macros are useful as they semantically signal to
developers that these are just normal callback option with nothing fancy
happening.
Replace plain struct definitions of OPTION_CALLBACK with OPT_CALLBACK or
OPT_CALLBACK_F where applicable. The heavy lifting was done using the
following (disgusting) shell script:
#!/bin/sh
do_replacement () {
tr '\n' '\r' |
sed -e 's/{\s*OPTION_CALLBACK,\s*\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\s*0,\(\s*[^[:space:]}]*\)\s*}/OPT_CALLBACK(\1,\2,\3,\4,\5,\6)/g' |
sed -e 's/{\s*OPTION_CALLBACK,\s*\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\(\s*[^[:space:]}]*\)\s*}/OPT_CALLBACK_F(\1,\2,\3,\4,\5,\6,\7)/g' |
tr '\r' '\n'
}
for f in $(git ls-files \*.c)
do
do_replacement <"$f" >"$f.tmp"
mv "$f.tmp" "$f"
done
The result was manually inspected and then reformatted to match the
style of the surrounding code. Finally, using
`git grep OPTION_CALLBACK \*.c`, leftover results which were not handled
by the script were manually transformed.
Signed-off-by: Denton Liu <liu.denton@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-28 16:36:28 +08:00
|
|
|
PARSE_OPT_HIDDEN, option_fetch_parse_recurse_submodules),
|
2013-12-05 21:02:42 +08:00
|
|
|
OPT_BOOL(0, "update-shallow", &update_shallow,
|
|
|
|
N_("accept refs that update .git/shallow")),
|
Use OPT_CALLBACK and OPT_CALLBACK_F
In the codebase, there are many options which use OPTION_CALLBACK in a
plain ol' struct definition. However, we have the OPT_CALLBACK and
OPT_CALLBACK_F macros which are meant to abstract these plain struct
definitions away. These macros are useful as they semantically signal to
developers that these are just normal callback option with nothing fancy
happening.
Replace plain struct definitions of OPTION_CALLBACK with OPT_CALLBACK or
OPT_CALLBACK_F where applicable. The heavy lifting was done using the
following (disgusting) shell script:
#!/bin/sh
do_replacement () {
tr '\n' '\r' |
sed -e 's/{\s*OPTION_CALLBACK,\s*\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\s*0,\(\s*[^[:space:]}]*\)\s*}/OPT_CALLBACK(\1,\2,\3,\4,\5,\6)/g' |
sed -e 's/{\s*OPTION_CALLBACK,\s*\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\),\(\s*[^[:space:]}]*\)\s*}/OPT_CALLBACK_F(\1,\2,\3,\4,\5,\6,\7)/g' |
tr '\r' '\n'
}
for f in $(git ls-files \*.c)
do
do_replacement <"$f" >"$f.tmp"
mv "$f.tmp" "$f"
done
The result was manually inspected and then reformatted to match the
style of the surrounding code. Finally, using
`git grep OPTION_CALLBACK \*.c`, leftover results which were not handled
by the script were manually transformed.
Signed-off-by: Denton Liu <liu.denton@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-28 16:36:28 +08:00
|
|
|
OPT_CALLBACK_F(0, "refmap", NULL, N_("refmap"),
|
|
|
|
N_("specify fetch refmap"), PARSE_OPT_NONEG, parse_refmap_arg),
|
2018-04-24 06:46:24 +08:00
|
|
|
OPT_STRING_LIST('o', "server-option", &server_options, N_("server-specific"), N_("option to transmit")),
|
2016-02-03 12:09:14 +08:00
|
|
|
OPT_SET_INT('4', "ipv4", &family, N_("use IPv4 addresses only"),
|
|
|
|
TRANSPORT_FAMILY_IPV4),
|
|
|
|
OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"),
|
|
|
|
TRANSPORT_FAMILY_IPV6),
|
2018-07-03 06:39:44 +08:00
|
|
|
OPT_STRING_LIST(0, "negotiation-tip", &negotiation_tip, N_("revision"),
|
|
|
|
N_("report that we have only objects reachable from this object")),
|
fetch: teach independent negotiation (no packfile)
Currently, the packfile negotiation step within a Git fetch cannot be
done independent of sending the packfile, even though there is at least
one application wherein this is useful. Therefore, make it possible for
this negotiation step to be done independently. A subsequent commit will
use this for one such application - push negotiation.
This feature is for protocol v2 only. (An implementation for protocol v0
would require a separate implementation in the fetch, transport, and
transport helper code.)
In the protocol, the main hindrance towards independent negotiation is
that the server can unilaterally decide to send the packfile. This is
solved by a "wait-for-done" argument: the server will then wait for the
client to say "done". In practice, the client will never say it; instead
it will cease requests once it is satisfied.
In the client, the main change lies in the transport and transport
helper code. fetch_refs_via_pack() performs everything needed - protocol
version and capability checks, and the negotiation itself.
There are 2 code paths that do not go through fetch_refs_via_pack() that
needed to be individually excluded: the bundle transport (excluded
through requiring smart_options, which the bundle transport doesn't
support) and transport helpers that do not support takeover. If or when
we support independent negotiation for protocol v0, we will need to
modify these 2 code paths to support it. But for now, report failure if
independent negotiation is requested in these cases.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-05-05 05:16:01 +08:00
|
|
|
OPT_BOOL(0, "negotiate-only", &negotiate_only,
|
|
|
|
N_("do not fetch a packfile; instead, print ancestors of negotiation tips")),
|
2017-12-08 23:58:44 +08:00
|
|
|
OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
|
2020-09-18 02:11:44 +08:00
|
|
|
OPT_BOOL(0, "auto-maintenance", &enable_auto_gc,
|
|
|
|
N_("run 'maintenance --auto' after fetching")),
|
2019-06-19 17:46:30 +08:00
|
|
|
OPT_BOOL(0, "auto-gc", &enable_auto_gc,
|
2020-09-18 02:11:44 +08:00
|
|
|
N_("run 'maintenance --auto' after fetching")),
|
2019-06-19 04:25:26 +08:00
|
|
|
OPT_BOOL(0, "show-forced-updates", &fetch_show_forced_updates,
|
|
|
|
N_("check for forced-updates on all updated branches")),
|
2019-11-03 08:21:56 +08:00
|
|
|
OPT_BOOL(0, "write-commit-graph", &fetch_write_commit_graph,
|
|
|
|
N_("write the commit-graph after fetching")),
|
2020-08-18 12:01:32 +08:00
|
|
|
OPT_BOOL(0, "stdin", &stdin_refspecs,
|
|
|
|
N_("accept refspecs from stdin")),
|
2007-12-04 15:25:47 +08:00
|
|
|
OPT_END()
|
|
|
|
};
|
|
|
|
|
2007-09-14 15:31:25 +08:00
|
|
|
static void unlock_pack(void)
|
|
|
|
{
|
2013-08-08 06:38:45 +08:00
|
|
|
if (gtransport)
|
|
|
|
transport_unlock_pack(gtransport);
|
fetch: work around "transport-take-over" hack
A Git-aware "connect" transport allows the "transport_take_over" to
redirect generic transport requests like fetch(), push_refs() and
get_refs_list() to the native Git transport handling methods. The
take-over process replaces transport->data with a fake data that
these method implementations understand.
While this hack works OK for a single request, it breaks when the
transport needs to make more than one requests. transport->data
that used to hold necessary information for the specific helper to
work correctly is destroyed during the take-over process.
One codepath that this matters is "git fetch" in auto-follow mode;
when it does not get all the tags that ought to point at the history
it got (which can be determined by looking at the peeled tags in the
initial advertisement) from the primary transfer, it internally
makes a second request to complete the fetch. Because "take-over"
hack has already destroyed the data necessary to talk to the
transport helper by the time this happens, the second request cannot
make a request to the helper to make another connection to fetch
these additional tags.
Mark such a transport as "cannot_reuse", and use a separate
transport to perform the backfill fetch in order to work around
this breakage.
Note that this problem does not manifest itself when running t5802,
because our upload-pack gives you all the necessary auto-followed
tags during the primary transfer. You would need to step through
"git fetch" in a debugger, stop immediately after the primary
transfer finishes and writes these auto-followed tags, remove the
tag references and repack/prune the repository to convince the
"find-non-local-tags" procedure that the primary transfer failed to
give us all the necessary tags, and then let it continue, in order
to trigger the bug in the secondary transfer this patch fixes.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-08-08 06:47:18 +08:00
|
|
|
if (gsecondary)
|
|
|
|
transport_unlock_pack(gsecondary);
|
2007-09-14 15:31:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void unlock_pack_on_signal(int signo)
|
|
|
|
{
|
|
|
|
unlock_pack();
|
chain kill signals for cleanup functions
If a piece of code wanted to do some cleanup before exiting
(e.g., cleaning up a lockfile or a tempfile), our usual
strategy was to install a signal handler that did something
like this:
do_cleanup(); /* actual work */
signal(signo, SIG_DFL); /* restore previous behavior */
raise(signo); /* deliver signal, killing ourselves */
For a single handler, this works fine. However, if we want
to clean up two _different_ things, we run into a problem.
The most recently installed handler will run, but when it
removes itself as a handler, it doesn't put back the first
handler.
This patch introduces sigchain, a tiny library for handling
a stack of signal handlers. You sigchain_push each handler,
and use sigchain_pop to restore whoever was before you in
the stack.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-01-22 14:02:35 +08:00
|
|
|
sigchain_pop(signo);
|
2007-09-14 15:31:25 +08:00
|
|
|
raise(signo);
|
|
|
|
}
|
2007-09-11 11:03:25 +08:00
|
|
|
|
2007-09-18 16:54:53 +08:00
|
|
|
static void add_merge_config(struct ref **head,
|
2007-10-30 09:05:40 +08:00
|
|
|
const struct ref *remote_refs,
|
2007-09-18 16:54:53 +08:00
|
|
|
struct branch *branch,
|
|
|
|
struct ref ***tail)
|
2007-09-11 11:03:25 +08:00
|
|
|
{
|
2007-09-18 16:54:53 +08:00
|
|
|
int i;
|
2007-09-11 11:03:25 +08:00
|
|
|
|
2007-09-18 16:54:53 +08:00
|
|
|
for (i = 0; i < branch->merge_nr; i++) {
|
|
|
|
struct ref *rm, **old_tail = *tail;
|
2018-05-17 06:57:49 +08:00
|
|
|
struct refspec_item refspec;
|
2007-09-18 16:54:53 +08:00
|
|
|
|
|
|
|
for (rm = *head; rm; rm = rm->next) {
|
|
|
|
if (branch_merge_matches(branch, i, rm->name)) {
|
2013-05-12 00:15:59 +08:00
|
|
|
rm->fetch_head_status = FETCH_HEAD_MERGE;
|
2007-09-18 16:54:53 +08:00
|
|
|
break;
|
|
|
|
}
|
2007-09-11 11:03:25 +08:00
|
|
|
}
|
2007-09-18 16:54:53 +08:00
|
|
|
if (rm)
|
|
|
|
continue;
|
|
|
|
|
2007-10-27 14:09:48 +08:00
|
|
|
/*
|
2010-11-02 23:31:23 +08:00
|
|
|
* Not fetched to a remote-tracking branch? We need to fetch
|
2007-09-18 16:54:53 +08:00
|
|
|
* it anyway to allow this branch's "branch.$name.merge"
|
2008-09-09 18:28:30 +08:00
|
|
|
* to be honored by 'git pull', but we do not have to
|
2007-10-27 14:09:48 +08:00
|
|
|
* fail if branch.$name.merge is misconfigured to point
|
|
|
|
* at a nonexisting branch. If we were indeed called by
|
2008-09-09 18:28:30 +08:00
|
|
|
* 'git pull', it will notice the misconfiguration because
|
2007-10-27 14:09:48 +08:00
|
|
|
* there is no entry in the resulting FETCH_HEAD marked
|
|
|
|
* for merging.
|
2007-09-18 16:54:53 +08:00
|
|
|
*/
|
2010-03-13 06:27:33 +08:00
|
|
|
memset(&refspec, 0, sizeof(refspec));
|
2007-09-18 16:54:53 +08:00
|
|
|
refspec.src = branch->merge[i]->src;
|
2007-10-27 14:09:48 +08:00
|
|
|
get_fetch_map(remote_refs, &refspec, tail, 1);
|
2007-09-18 16:54:53 +08:00
|
|
|
for (rm = *old_tail; rm; rm = rm->next)
|
2013-05-12 00:15:59 +08:00
|
|
|
rm->fetch_head_status = FETCH_HEAD_MERGE;
|
2007-09-11 11:03:25 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-16 05:18:02 +08:00
|
|
|
static void create_fetch_oidset(struct ref **head, struct oidset *out)
|
2013-10-30 13:32:55 +08:00
|
|
|
{
|
|
|
|
struct ref *rm = *head;
|
|
|
|
while (rm) {
|
2019-09-16 05:18:02 +08:00
|
|
|
oidset_insert(out, &rm->old_oid);
|
2013-10-30 13:32:55 +08:00
|
|
|
rm = rm->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-26 04:25:04 +08:00
|
|
|
struct refname_hash_entry {
|
2019-10-07 07:30:43 +08:00
|
|
|
struct hashmap_entry ent;
|
2018-09-26 04:25:04 +08:00
|
|
|
struct object_id oid;
|
2019-06-04 10:13:30 +08:00
|
|
|
int ignore;
|
2018-09-26 04:25:04 +08:00
|
|
|
char refname[FLEX_ARRAY];
|
|
|
|
};
|
|
|
|
|
|
|
|
static int refname_hash_entry_cmp(const void *hashmap_cmp_fn_data,
|
2019-10-07 07:30:37 +08:00
|
|
|
const struct hashmap_entry *eptr,
|
|
|
|
const struct hashmap_entry *entry_or_key,
|
2018-09-26 04:25:04 +08:00
|
|
|
const void *keydata)
|
|
|
|
{
|
2019-10-07 07:30:37 +08:00
|
|
|
const struct refname_hash_entry *e1, *e2;
|
2018-09-26 04:25:04 +08:00
|
|
|
|
2019-10-07 07:30:37 +08:00
|
|
|
e1 = container_of(eptr, const struct refname_hash_entry, ent);
|
|
|
|
e2 = container_of(entry_or_key, const struct refname_hash_entry, ent);
|
2018-09-26 04:25:04 +08:00
|
|
|
return strcmp(e1->refname, keydata ? keydata : e2->refname);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct refname_hash_entry *refname_hash_add(struct hashmap *map,
|
|
|
|
const char *refname,
|
|
|
|
const struct object_id *oid)
|
|
|
|
{
|
|
|
|
struct refname_hash_entry *ent;
|
|
|
|
size_t len = strlen(refname);
|
|
|
|
|
|
|
|
FLEX_ALLOC_MEM(ent, refname, refname, len);
|
2019-10-07 07:30:27 +08:00
|
|
|
hashmap_entry_init(&ent->ent, strhash(refname));
|
2018-09-26 04:25:04 +08:00
|
|
|
oidcpy(&ent->oid, oid);
|
2019-10-07 07:30:29 +08:00
|
|
|
hashmap_add(map, &ent->ent);
|
2018-09-26 04:25:04 +08:00
|
|
|
return ent;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int add_one_refname(const char *refname,
|
|
|
|
const struct object_id *oid,
|
|
|
|
int flag, void *cbdata)
|
|
|
|
{
|
|
|
|
struct hashmap *refname_map = cbdata;
|
|
|
|
|
|
|
|
(void) refname_hash_add(refname_map, refname, oid);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void refname_hash_init(struct hashmap *map)
|
|
|
|
{
|
|
|
|
hashmap_init(map, refname_hash_entry_cmp, NULL, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int refname_hash_exists(struct hashmap *map, const char *refname)
|
|
|
|
{
|
|
|
|
return !!hashmap_get_from_hash(map, strhash(refname), refname);
|
|
|
|
}
|
|
|
|
|
2019-06-04 10:13:28 +08:00
|
|
|
static void clear_item(struct refname_hash_entry *item)
|
|
|
|
{
|
2019-06-04 10:13:30 +08:00
|
|
|
item->ignore = 1;
|
2019-06-04 10:13:28 +08:00
|
|
|
}
|
|
|
|
|
2018-06-28 06:30:21 +08:00
|
|
|
static void find_non_local_tags(const struct ref *refs,
|
|
|
|
struct ref **head,
|
|
|
|
struct ref ***tail)
|
2013-10-30 13:32:55 +08:00
|
|
|
{
|
2018-09-26 04:25:04 +08:00
|
|
|
struct hashmap existing_refs;
|
|
|
|
struct hashmap remote_refs;
|
2019-09-16 05:18:02 +08:00
|
|
|
struct oidset fetch_oids = OIDSET_INIT;
|
2018-09-26 04:25:04 +08:00
|
|
|
struct string_list remote_refs_list = STRING_LIST_INIT_NODUP;
|
|
|
|
struct string_list_item *remote_ref_item;
|
2013-10-30 13:32:55 +08:00
|
|
|
const struct ref *ref;
|
2018-09-26 04:25:04 +08:00
|
|
|
struct refname_hash_entry *item = NULL;
|
2020-02-22 05:47:28 +08:00
|
|
|
const int quick_flags = OBJECT_INFO_QUICK | OBJECT_INFO_SKIP_FETCH_OBJECT;
|
2018-09-26 04:25:04 +08:00
|
|
|
|
|
|
|
refname_hash_init(&existing_refs);
|
|
|
|
refname_hash_init(&remote_refs);
|
2019-09-16 05:18:02 +08:00
|
|
|
create_fetch_oidset(head, &fetch_oids);
|
2013-10-30 13:32:55 +08:00
|
|
|
|
2018-09-26 04:25:04 +08:00
|
|
|
for_each_ref(add_one_refname, &existing_refs);
|
2018-06-28 06:30:21 +08:00
|
|
|
for (ref = refs; ref; ref = ref->next) {
|
2013-12-18 03:47:35 +08:00
|
|
|
if (!starts_with(ref->name, "refs/tags/"))
|
2013-10-30 13:32:55 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The peeled ref always follows the matching base
|
|
|
|
* ref, so if we see a peeled ref that we don't want
|
|
|
|
* to fetch then we can mark the ref entry in the list
|
|
|
|
* as one to ignore by setting util to NULL.
|
|
|
|
*/
|
2013-12-18 03:47:35 +08:00
|
|
|
if (ends_with(ref->name, "^{}")) {
|
fetch: use "quick" has_sha1_file for tag following
When we auto-follow tags in a fetch, we look at all of the
tags advertised by the remote and fetch ones where we don't
already have the tag, but we do have the object it peels to.
This involves a lot of calls to has_sha1_file(), some of
which we can reasonably expect to fail. Since 45e8a74
(has_sha1_file: re-check pack directory before giving up,
2013-08-30), this may cause many calls to
reprepare_packed_git(), which is potentially expensive.
This has gone unnoticed for several years because it
requires a fairly unique setup to matter:
1. You need to have a lot of packs on the client side to
make reprepare_packed_git() expensive (the most
expensive part is finding duplicates in an unsorted
list, which is currently quadratic).
2. You need a large number of tag refs on the server side
that are candidates for auto-following (i.e., that the
client doesn't have). Each one triggers a re-read of
the pack directory.
3. Under normal circumstances, the client would
auto-follow those tags and after one large fetch, (2)
would no longer be true. But if those tags point to
history which is disconnected from what the client
otherwise fetches, then it will never auto-follow, and
those candidates will impact it on every fetch.
So when all three are true, each fetch pays an extra
O(nr_tags * nr_packs^2) cost, mostly in string comparisons
on the pack names. This was exacerbated by 47bf4b0
(prepare_packed_git_one: refactor duplicate-pack check,
2014-06-30) which uses a slightly more expensive string
check, under the assumption that the duplicate check doesn't
happen very often (and it shouldn't; the real problem here
is how often we are calling reprepare_packed_git()).
This patch teaches fetch to use HAS_SHA1_QUICK to sacrifice
accuracy for speed, in cases where we might be racy with a
simultaneous repack. This is similar to the fix in 0eeb077
(index-pack: avoid excessive re-reading of pack directory,
2015-06-09). As with that case, it's OK for has_sha1_file()
occasionally say "no I don't have it" when we do, because
the worst case is not a corruption, but simply that we may
fail to auto-follow a tag that points to it.
Here are results from the included perf script, which sets
up a situation similar to the one described above:
Test HEAD^ HEAD
----------------------------------------------------------
5550.4: fetch 11.21(10.42+0.78) 0.08(0.04+0.02) -99.3%
Reported-by: Vegard Nossum <vegard.nossum@oracle.com>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-10-14 00:53:44 +08:00
|
|
|
if (item &&
|
2020-02-22 05:47:28 +08:00
|
|
|
!has_object_file_with_flags(&ref->old_oid, quick_flags) &&
|
2019-09-16 05:18:02 +08:00
|
|
|
!oidset_contains(&fetch_oids, &ref->old_oid) &&
|
2020-02-22 05:47:28 +08:00
|
|
|
!has_object_file_with_flags(&item->oid, quick_flags) &&
|
2019-09-16 05:18:02 +08:00
|
|
|
!oidset_contains(&fetch_oids, &item->oid))
|
2019-06-04 10:13:28 +08:00
|
|
|
clear_item(item);
|
2013-10-30 13:32:55 +08:00
|
|
|
item = NULL;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If item is non-NULL here, then we previously saw a
|
|
|
|
* ref not followed by a peeled reference, so we need
|
|
|
|
* to check if it is a lightweight tag that we want to
|
|
|
|
* fetch.
|
|
|
|
*/
|
fetch: use "quick" has_sha1_file for tag following
When we auto-follow tags in a fetch, we look at all of the
tags advertised by the remote and fetch ones where we don't
already have the tag, but we do have the object it peels to.
This involves a lot of calls to has_sha1_file(), some of
which we can reasonably expect to fail. Since 45e8a74
(has_sha1_file: re-check pack directory before giving up,
2013-08-30), this may cause many calls to
reprepare_packed_git(), which is potentially expensive.
This has gone unnoticed for several years because it
requires a fairly unique setup to matter:
1. You need to have a lot of packs on the client side to
make reprepare_packed_git() expensive (the most
expensive part is finding duplicates in an unsorted
list, which is currently quadratic).
2. You need a large number of tag refs on the server side
that are candidates for auto-following (i.e., that the
client doesn't have). Each one triggers a re-read of
the pack directory.
3. Under normal circumstances, the client would
auto-follow those tags and after one large fetch, (2)
would no longer be true. But if those tags point to
history which is disconnected from what the client
otherwise fetches, then it will never auto-follow, and
those candidates will impact it on every fetch.
So when all three are true, each fetch pays an extra
O(nr_tags * nr_packs^2) cost, mostly in string comparisons
on the pack names. This was exacerbated by 47bf4b0
(prepare_packed_git_one: refactor duplicate-pack check,
2014-06-30) which uses a slightly more expensive string
check, under the assumption that the duplicate check doesn't
happen very often (and it shouldn't; the real problem here
is how often we are calling reprepare_packed_git()).
This patch teaches fetch to use HAS_SHA1_QUICK to sacrifice
accuracy for speed, in cases where we might be racy with a
simultaneous repack. This is similar to the fix in 0eeb077
(index-pack: avoid excessive re-reading of pack directory,
2015-06-09). As with that case, it's OK for has_sha1_file()
occasionally say "no I don't have it" when we do, because
the worst case is not a corruption, but simply that we may
fail to auto-follow a tag that points to it.
Here are results from the included perf script, which sets
up a situation similar to the one described above:
Test HEAD^ HEAD
----------------------------------------------------------
5550.4: fetch 11.21(10.42+0.78) 0.08(0.04+0.02) -99.3%
Reported-by: Vegard Nossum <vegard.nossum@oracle.com>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-10-14 00:53:44 +08:00
|
|
|
if (item &&
|
2020-02-22 05:47:28 +08:00
|
|
|
!has_object_file_with_flags(&item->oid, quick_flags) &&
|
2019-09-16 05:18:02 +08:00
|
|
|
!oidset_contains(&fetch_oids, &item->oid))
|
2019-06-04 10:13:28 +08:00
|
|
|
clear_item(item);
|
2013-10-30 13:32:55 +08:00
|
|
|
|
|
|
|
item = NULL;
|
|
|
|
|
|
|
|
/* skip duplicates and refs that we already have */
|
2018-09-26 04:25:04 +08:00
|
|
|
if (refname_hash_exists(&remote_refs, ref->name) ||
|
|
|
|
refname_hash_exists(&existing_refs, ref->name))
|
2013-10-30 13:32:55 +08:00
|
|
|
continue;
|
|
|
|
|
2018-09-26 04:25:04 +08:00
|
|
|
item = refname_hash_add(&remote_refs, ref->name, &ref->old_oid);
|
|
|
|
string_list_insert(&remote_refs_list, ref->name);
|
2013-10-30 13:32:55 +08:00
|
|
|
}
|
2020-11-03 02:55:05 +08:00
|
|
|
hashmap_clear_and_free(&existing_refs, struct refname_hash_entry, ent);
|
2013-10-30 13:32:55 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We may have a final lightweight tag that needs to be
|
|
|
|
* checked to see if it needs fetching.
|
|
|
|
*/
|
fetch: use "quick" has_sha1_file for tag following
When we auto-follow tags in a fetch, we look at all of the
tags advertised by the remote and fetch ones where we don't
already have the tag, but we do have the object it peels to.
This involves a lot of calls to has_sha1_file(), some of
which we can reasonably expect to fail. Since 45e8a74
(has_sha1_file: re-check pack directory before giving up,
2013-08-30), this may cause many calls to
reprepare_packed_git(), which is potentially expensive.
This has gone unnoticed for several years because it
requires a fairly unique setup to matter:
1. You need to have a lot of packs on the client side to
make reprepare_packed_git() expensive (the most
expensive part is finding duplicates in an unsorted
list, which is currently quadratic).
2. You need a large number of tag refs on the server side
that are candidates for auto-following (i.e., that the
client doesn't have). Each one triggers a re-read of
the pack directory.
3. Under normal circumstances, the client would
auto-follow those tags and after one large fetch, (2)
would no longer be true. But if those tags point to
history which is disconnected from what the client
otherwise fetches, then it will never auto-follow, and
those candidates will impact it on every fetch.
So when all three are true, each fetch pays an extra
O(nr_tags * nr_packs^2) cost, mostly in string comparisons
on the pack names. This was exacerbated by 47bf4b0
(prepare_packed_git_one: refactor duplicate-pack check,
2014-06-30) which uses a slightly more expensive string
check, under the assumption that the duplicate check doesn't
happen very often (and it shouldn't; the real problem here
is how often we are calling reprepare_packed_git()).
This patch teaches fetch to use HAS_SHA1_QUICK to sacrifice
accuracy for speed, in cases where we might be racy with a
simultaneous repack. This is similar to the fix in 0eeb077
(index-pack: avoid excessive re-reading of pack directory,
2015-06-09). As with that case, it's OK for has_sha1_file()
occasionally say "no I don't have it" when we do, because
the worst case is not a corruption, but simply that we may
fail to auto-follow a tag that points to it.
Here are results from the included perf script, which sets
up a situation similar to the one described above:
Test HEAD^ HEAD
----------------------------------------------------------
5550.4: fetch 11.21(10.42+0.78) 0.08(0.04+0.02) -99.3%
Reported-by: Vegard Nossum <vegard.nossum@oracle.com>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-10-14 00:53:44 +08:00
|
|
|
if (item &&
|
2020-02-22 05:47:28 +08:00
|
|
|
!has_object_file_with_flags(&item->oid, quick_flags) &&
|
2019-09-16 05:18:02 +08:00
|
|
|
!oidset_contains(&fetch_oids, &item->oid))
|
2019-06-04 10:13:28 +08:00
|
|
|
clear_item(item);
|
2013-10-30 13:32:55 +08:00
|
|
|
|
|
|
|
/*
|
2018-09-26 04:25:04 +08:00
|
|
|
* For all the tags in the remote_refs_list,
|
2013-10-30 13:32:55 +08:00
|
|
|
* add them to the list of refs to be fetched
|
|
|
|
*/
|
2018-09-26 04:25:04 +08:00
|
|
|
for_each_string_list_item(remote_ref_item, &remote_refs_list) {
|
|
|
|
const char *refname = remote_ref_item->string;
|
2019-06-04 10:13:29 +08:00
|
|
|
struct ref *rm;
|
2019-10-07 07:30:36 +08:00
|
|
|
unsigned int hash = strhash(refname);
|
2018-09-26 04:25:04 +08:00
|
|
|
|
2019-10-07 07:30:36 +08:00
|
|
|
item = hashmap_get_entry_from_hash(&remote_refs, hash, refname,
|
|
|
|
struct refname_hash_entry, ent);
|
2018-09-26 04:25:04 +08:00
|
|
|
if (!item)
|
|
|
|
BUG("unseen remote ref?");
|
|
|
|
|
2013-10-30 13:32:55 +08:00
|
|
|
/* Unless we have already decided to ignore this item... */
|
2019-06-04 10:13:30 +08:00
|
|
|
if (item->ignore)
|
2019-06-04 10:13:29 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
rm = alloc_ref(item->refname);
|
|
|
|
rm->peer_ref = alloc_ref(item->refname);
|
|
|
|
oidcpy(&rm->old_oid, &item->oid);
|
|
|
|
**tail = rm;
|
|
|
|
*tail = &rm->next;
|
2013-10-30 13:32:55 +08:00
|
|
|
}
|
2020-11-03 02:55:05 +08:00
|
|
|
hashmap_clear_and_free(&remote_refs, struct refname_hash_entry, ent);
|
2018-09-26 04:25:04 +08:00
|
|
|
string_list_clear(&remote_refs_list, 0);
|
2019-09-16 05:18:02 +08:00
|
|
|
oidset_clear(&fetch_oids);
|
2013-10-30 13:32:55 +08:00
|
|
|
}
|
2008-03-03 10:35:25 +08:00
|
|
|
|
2021-04-16 20:49:57 +08:00
|
|
|
static void filter_prefetch_refspec(struct refspec *rs)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!prefetch)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < rs->nr; i++) {
|
|
|
|
struct strbuf new_dst = STRBUF_INIT;
|
|
|
|
char *old_dst;
|
|
|
|
const char *sub = NULL;
|
|
|
|
|
|
|
|
if (rs->items[i].negative)
|
|
|
|
continue;
|
|
|
|
if (!rs->items[i].dst ||
|
|
|
|
(rs->items[i].src &&
|
|
|
|
!strncmp(rs->items[i].src, "refs/tags/", 10))) {
|
|
|
|
int j;
|
|
|
|
|
|
|
|
free(rs->items[i].src);
|
|
|
|
free(rs->items[i].dst);
|
|
|
|
|
|
|
|
for (j = i + 1; j < rs->nr; j++) {
|
|
|
|
rs->items[j - 1] = rs->items[j];
|
|
|
|
rs->raw[j - 1] = rs->raw[j];
|
|
|
|
}
|
|
|
|
rs->nr--;
|
|
|
|
i--;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
old_dst = rs->items[i].dst;
|
|
|
|
strbuf_addstr(&new_dst, "refs/prefetch/");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If old_dst starts with "refs/", then place
|
|
|
|
* sub after that prefix. Otherwise, start at
|
|
|
|
* the beginning of the string.
|
|
|
|
*/
|
|
|
|
if (!skip_prefix(old_dst, "refs/", &sub))
|
|
|
|
sub = old_dst;
|
|
|
|
strbuf_addstr(&new_dst, sub);
|
|
|
|
|
|
|
|
rs->items[i].dst = strbuf_detach(&new_dst, NULL);
|
|
|
|
rs->items[i].force = 1;
|
|
|
|
|
|
|
|
free(old_dst);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-28 06:30:21 +08:00
|
|
|
static struct ref *get_ref_map(struct remote *remote,
|
|
|
|
const struct ref *remote_refs,
|
2018-05-17 06:58:08 +08:00
|
|
|
struct refspec *rs,
|
2013-10-23 23:50:38 +08:00
|
|
|
int tags, int *autotags)
|
2007-09-11 11:03:25 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct ref *rm;
|
|
|
|
struct ref *ref_map = NULL;
|
|
|
|
struct ref **tail = &ref_map;
|
|
|
|
|
2013-10-30 13:32:59 +08:00
|
|
|
/* opportunistically-updated references: */
|
|
|
|
struct ref *orefs = NULL, **oref_tail = &orefs;
|
2007-09-11 11:03:25 +08:00
|
|
|
|
2018-09-26 04:25:04 +08:00
|
|
|
struct hashmap existing_refs;
|
2020-08-18 12:01:34 +08:00
|
|
|
int existing_refs_populated = 0;
|
2013-05-12 00:16:52 +08:00
|
|
|
|
2021-04-16 20:49:57 +08:00
|
|
|
filter_prefetch_refspec(rs);
|
|
|
|
if (remote)
|
|
|
|
filter_prefetch_refspec(&remote->fetch);
|
|
|
|
|
2018-05-17 06:58:08 +08:00
|
|
|
if (rs->nr) {
|
2014-05-30 06:21:31 +08:00
|
|
|
struct refspec *fetch_refspec;
|
|
|
|
|
2018-05-17 06:58:08 +08:00
|
|
|
for (i = 0; i < rs->nr; i++) {
|
|
|
|
get_fetch_map(remote_refs, &rs->items[i], &tail, 0);
|
|
|
|
if (rs->items[i].dst && rs->items[i].dst[0])
|
2007-09-11 11:03:25 +08:00
|
|
|
*autotags = 1;
|
|
|
|
}
|
2013-10-30 13:32:58 +08:00
|
|
|
/* Merge everything on the command line (but not --tags) */
|
2007-09-11 11:03:25 +08:00
|
|
|
for (rm = ref_map; rm; rm = rm->next)
|
2013-05-12 00:15:59 +08:00
|
|
|
rm->fetch_head_status = FETCH_HEAD_MERGE;
|
2013-05-12 00:16:52 +08:00
|
|
|
|
|
|
|
/*
|
2013-10-30 13:32:58 +08:00
|
|
|
* For any refs that we happen to be fetching via
|
|
|
|
* command-line arguments, the destination ref might
|
|
|
|
* have been missing or have been different than the
|
|
|
|
* remote-tracking ref that would be derived from the
|
|
|
|
* configured refspec. In these cases, we want to
|
|
|
|
* take the opportunity to update their configured
|
|
|
|
* remote-tracking reference. However, we do not want
|
|
|
|
* to mention these entries in FETCH_HEAD at all, as
|
|
|
|
* they would simply be duplicates of existing
|
|
|
|
* entries, so we set them FETCH_HEAD_IGNORE below.
|
|
|
|
*
|
|
|
|
* We compute these entries now, based only on the
|
|
|
|
* refspecs specified on the command line. But we add
|
|
|
|
* them to the list following the refspecs resulting
|
|
|
|
* from the tags option so that one of the latter,
|
|
|
|
* which has FETCH_HEAD_NOT_FOR_MERGE, is not removed
|
|
|
|
* by ref_remove_duplicates() in favor of one of these
|
|
|
|
* opportunistic entries with FETCH_HEAD_IGNORE.
|
2013-05-12 00:16:52 +08:00
|
|
|
*/
|
2018-05-17 06:58:08 +08:00
|
|
|
if (refmap.nr)
|
|
|
|
fetch_refspec = &refmap;
|
|
|
|
else
|
2018-06-28 06:30:21 +08:00
|
|
|
fetch_refspec = &remote->fetch;
|
2014-05-30 06:21:31 +08:00
|
|
|
|
2018-05-17 06:58:08 +08:00
|
|
|
for (i = 0; i < fetch_refspec->nr; i++)
|
|
|
|
get_fetch_map(ref_map, &fetch_refspec->items[i], &oref_tail, 1);
|
2018-05-17 06:58:05 +08:00
|
|
|
} else if (refmap.nr) {
|
2014-05-30 06:21:31 +08:00
|
|
|
die("--refmap option is only meaningful with command-line refspec(s).");
|
2007-09-11 11:03:25 +08:00
|
|
|
} else {
|
|
|
|
/* Use the defaults */
|
2007-09-18 16:54:53 +08:00
|
|
|
struct branch *branch = branch_get(NULL);
|
|
|
|
int has_merge = branch_has_merge_config(branch);
|
builtin/fetch.c: ignore merge config when not fetching from branch's remote
When 'git fetch' is supplied a single argument, it tries to match it
against a configured remote and then fetch the refs specified by the
named remote's fetchspec. Additionally, or alternatively, if the current
branch has a merge ref configured, and if the name of the remote supplied
to fetch matches the one in the branch's configuration, then git also adds
the merge ref to the list of refs to update.
If the argument to fetch does not specify a named remote, or if the name
supplied does not match the remote configured for the current branch, then
the current branch's merge configuration should not be considered.
git currently mishandles the case when the argument to fetch specifies a
GIT URL(i.e. not a named remote) and the current branch has a configured
merge ref. In this case, fetch should ignore the branch's merge ref and
attempt to fetch from the remote repository's HEAD branch. But, since
fetch only checks _whether_ the current branch has a merge ref configured,
and does _not_ check whether the branch's configured remote matches the
command line argument (until later), it will mistakenly enter the wrong
branch of an 'if' statement and will not fall back to fetch the HEAD branch.
The fetch ends up doing nothing and returns with a successful zero status.
Fix this by comparing the remote repository's name to the branch's remote
name, in addition to whether it has a configured merge ref, sooner, so that
fetch can correctly decide whether the branch's configuration is interesting
or not, and fall back to fetching from the remote's HEAD branch when
appropriate.
This fixes the test in t5510.
Signed-off-by: Brandon Casey <casey@nrlssc.navy.mil>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-08-26 01:52:56 +08:00
|
|
|
if (remote &&
|
2018-05-17 06:58:01 +08:00
|
|
|
(remote->fetch.nr ||
|
2010-09-10 02:56:36 +08:00
|
|
|
/* Note: has_merge implies non-NULL branch->remote_name */
|
builtin/fetch.c: ignore merge config when not fetching from branch's remote
When 'git fetch' is supplied a single argument, it tries to match it
against a configured remote and then fetch the refs specified by the
named remote's fetchspec. Additionally, or alternatively, if the current
branch has a merge ref configured, and if the name of the remote supplied
to fetch matches the one in the branch's configuration, then git also adds
the merge ref to the list of refs to update.
If the argument to fetch does not specify a named remote, or if the name
supplied does not match the remote configured for the current branch, then
the current branch's merge configuration should not be considered.
git currently mishandles the case when the argument to fetch specifies a
GIT URL(i.e. not a named remote) and the current branch has a configured
merge ref. In this case, fetch should ignore the branch's merge ref and
attempt to fetch from the remote repository's HEAD branch. But, since
fetch only checks _whether_ the current branch has a merge ref configured,
and does _not_ check whether the branch's configured remote matches the
command line argument (until later), it will mistakenly enter the wrong
branch of an 'if' statement and will not fall back to fetch the HEAD branch.
The fetch ends up doing nothing and returns with a successful zero status.
Fix this by comparing the remote repository's name to the branch's remote
name, in addition to whether it has a configured merge ref, sooner, so that
fetch can correctly decide whether the branch's configuration is interesting
or not, and fall back to fetching from the remote's HEAD branch when
appropriate.
This fixes the test in t5510.
Signed-off-by: Brandon Casey <casey@nrlssc.navy.mil>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-08-26 01:52:56 +08:00
|
|
|
(has_merge && !strcmp(branch->remote_name, remote->name)))) {
|
2018-05-17 06:58:01 +08:00
|
|
|
for (i = 0; i < remote->fetch.nr; i++) {
|
|
|
|
get_fetch_map(remote_refs, &remote->fetch.items[i], &tail, 0);
|
|
|
|
if (remote->fetch.items[i].dst &&
|
|
|
|
remote->fetch.items[i].dst[0])
|
2007-09-11 11:03:25 +08:00
|
|
|
*autotags = 1;
|
2007-09-18 16:54:53 +08:00
|
|
|
if (!i && !has_merge && ref_map &&
|
2018-05-17 06:58:01 +08:00
|
|
|
!remote->fetch.items[0].pattern)
|
2013-05-12 00:15:59 +08:00
|
|
|
ref_map->fetch_head_status = FETCH_HEAD_MERGE;
|
2007-09-11 11:03:25 +08:00
|
|
|
}
|
2007-10-11 08:47:55 +08:00
|
|
|
/*
|
|
|
|
* if the remote we're fetching from is the same
|
|
|
|
* as given in branch.<name>.remote, we add the
|
|
|
|
* ref given in branch.<name>.merge, too.
|
2010-09-10 02:56:36 +08:00
|
|
|
*
|
|
|
|
* Note: has_merge implies non-NULL branch->remote_name
|
2007-10-11 08:47:55 +08:00
|
|
|
*/
|
2007-10-27 14:09:48 +08:00
|
|
|
if (has_merge &&
|
|
|
|
!strcmp(branch->remote_name, remote->name))
|
2007-09-18 16:54:53 +08:00
|
|
|
add_merge_config(&ref_map, remote_refs, branch, &tail);
|
2021-04-16 20:49:57 +08:00
|
|
|
} else if (!prefetch) {
|
2007-09-11 11:03:25 +08:00
|
|
|
ref_map = get_remote_ref(remote_refs, "HEAD");
|
2007-10-27 14:09:48 +08:00
|
|
|
if (!ref_map)
|
2011-02-23 07:41:51 +08:00
|
|
|
die(_("Couldn't find remote ref HEAD"));
|
2013-05-12 00:15:59 +08:00
|
|
|
ref_map->fetch_head_status = FETCH_HEAD_MERGE;
|
2008-03-03 10:34:51 +08:00
|
|
|
tail = &ref_map->next;
|
2007-09-11 11:03:25 +08:00
|
|
|
}
|
|
|
|
}
|
2013-10-30 13:32:58 +08:00
|
|
|
|
2013-10-30 13:32:59 +08:00
|
|
|
if (tags == TAGS_SET)
|
|
|
|
/* also fetch all tags */
|
|
|
|
get_fetch_map(remote_refs, tag_refspec, &tail, 0);
|
|
|
|
else if (tags == TAGS_DEFAULT && *autotags)
|
2018-06-28 06:30:21 +08:00
|
|
|
find_non_local_tags(remote_refs, &ref_map, &tail);
|
2013-10-30 13:32:58 +08:00
|
|
|
|
2013-10-30 13:32:59 +08:00
|
|
|
/* Now append any refs to be updated opportunistically: */
|
|
|
|
*tail = orefs;
|
|
|
|
for (rm = orefs; rm; rm = rm->next) {
|
|
|
|
rm->fetch_head_status = FETCH_HEAD_IGNORE;
|
|
|
|
tail = &rm->next;
|
|
|
|
}
|
|
|
|
|
refspec: add support for negative refspecs
Both fetch and push support pattern refspecs which allow fetching or
pushing references that match a specific pattern. Because these patterns
are globs, they have somewhat limited ability to express more complex
situations.
For example, suppose you wish to fetch all branches from a remote except
for a specific one. To allow this, you must setup a set of refspecs
which match only the branches you want. Because refspecs are either
explicit name matches, or simple globs, many patterns cannot be
expressed.
Add support for a new type of refspec, referred to as "negative"
refspecs. These are prefixed with a '^' and mean "exclude any ref
matching this refspec". They can only have one "side" which always
refers to the source. During a fetch, this refers to the name of the ref
on the remote. During a push, this refers to the name of the ref on the
local side.
With negative refspecs, users can express more complex patterns. For
example:
git fetch origin refs/heads/*:refs/remotes/origin/* ^refs/heads/dontwant
will fetch all branches on origin into remotes/origin, but will exclude
fetching the branch named dontwant.
Refspecs today are commutative, meaning that order doesn't expressly
matter. Rather than forcing an implied order, negative refspecs will
always be applied last. That is, in order to match, a ref must match at
least one positive refspec, and match none of the negative refspecs.
This is similar to how negative pathspecs work.
Signed-off-by: Jacob Keller <jacob.keller@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-10-01 05:25:29 +08:00
|
|
|
/*
|
|
|
|
* apply negative refspecs first, before we remove duplicates. This is
|
|
|
|
* necessary as negative refspecs might remove an otherwise conflicting
|
|
|
|
* duplicate.
|
|
|
|
*/
|
|
|
|
if (rs->nr)
|
|
|
|
ref_map = apply_negative_refspecs(ref_map, rs);
|
|
|
|
else
|
|
|
|
ref_map = apply_negative_refspecs(ref_map, &remote->fetch);
|
|
|
|
|
2018-06-28 06:30:19 +08:00
|
|
|
ref_map = ref_remove_duplicates(ref_map);
|
|
|
|
|
|
|
|
for (rm = ref_map; rm; rm = rm->next) {
|
|
|
|
if (rm->peer_ref) {
|
2018-09-26 04:25:04 +08:00
|
|
|
const char *refname = rm->peer_ref->name;
|
|
|
|
struct refname_hash_entry *peer_item;
|
2019-10-07 07:30:36 +08:00
|
|
|
unsigned int hash = strhash(refname);
|
2018-09-26 04:25:04 +08:00
|
|
|
|
2020-08-18 12:01:34 +08:00
|
|
|
if (!existing_refs_populated) {
|
|
|
|
refname_hash_init(&existing_refs);
|
|
|
|
for_each_ref(add_one_refname, &existing_refs);
|
|
|
|
existing_refs_populated = 1;
|
|
|
|
}
|
|
|
|
|
2019-10-07 07:30:36 +08:00
|
|
|
peer_item = hashmap_get_entry_from_hash(&existing_refs,
|
|
|
|
hash, refname,
|
|
|
|
struct refname_hash_entry, ent);
|
2018-06-28 06:30:19 +08:00
|
|
|
if (peer_item) {
|
2018-09-26 04:25:04 +08:00
|
|
|
struct object_id *old_oid = &peer_item->oid;
|
2018-06-28 06:30:19 +08:00
|
|
|
oidcpy(&rm->peer_ref->old_oid, old_oid);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-08-18 12:01:34 +08:00
|
|
|
if (existing_refs_populated)
|
2020-11-03 02:55:05 +08:00
|
|
|
hashmap_clear_and_free(&existing_refs, struct refname_hash_entry, ent);
|
2018-06-28 06:30:19 +08:00
|
|
|
|
|
|
|
return ref_map;
|
2007-09-11 11:03:25 +08:00
|
|
|
}
|
|
|
|
|
2009-05-25 18:40:54 +08:00
|
|
|
#define STORE_REF_ERROR_OTHER 1
|
|
|
|
#define STORE_REF_ERROR_DF_CONFLICT 2
|
|
|
|
|
2007-09-11 11:03:25 +08:00
|
|
|
static int s_update_ref(const char *action,
|
|
|
|
struct ref *ref,
|
2021-01-12 20:27:48 +08:00
|
|
|
struct ref_transaction *transaction,
|
2007-09-11 11:03:25 +08:00
|
|
|
int check_old)
|
|
|
|
{
|
2017-03-29 03:46:26 +08:00
|
|
|
char *msg;
|
2007-09-11 11:03:25 +08:00
|
|
|
char *rla = getenv("GIT_REFLOG_ACTION");
|
2021-01-12 20:27:48 +08:00
|
|
|
struct ref_transaction *our_transaction = NULL;
|
2014-04-29 04:49:07 +08:00
|
|
|
struct strbuf err = STRBUF_INIT;
|
2021-01-12 20:27:43 +08:00
|
|
|
int ret;
|
2007-09-11 11:03:25 +08:00
|
|
|
|
2009-11-10 16:19:43 +08:00
|
|
|
if (dry_run)
|
|
|
|
return 0;
|
2007-09-11 11:03:25 +08:00
|
|
|
if (!rla)
|
2007-12-04 15:25:46 +08:00
|
|
|
rla = default_rla.buf;
|
2017-03-29 03:46:26 +08:00
|
|
|
msg = xstrfmt("%s: %s", rla, action);
|
2014-04-29 04:49:07 +08:00
|
|
|
|
2021-01-12 20:27:48 +08:00
|
|
|
/*
|
|
|
|
* If no transaction was passed to us, we manage the transaction
|
|
|
|
* ourselves. Otherwise, we trust the caller to handle the transaction
|
|
|
|
* lifecycle.
|
|
|
|
*/
|
2021-01-12 20:27:43 +08:00
|
|
|
if (!transaction) {
|
2021-01-12 20:27:48 +08:00
|
|
|
transaction = our_transaction = ref_transaction_begin(&err);
|
|
|
|
if (!transaction) {
|
|
|
|
ret = STORE_REF_ERROR_OTHER;
|
|
|
|
goto out;
|
|
|
|
}
|
2021-01-12 20:27:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = ref_transaction_update(transaction, ref->name, &ref->new_oid,
|
|
|
|
check_old ? &ref->old_oid : NULL,
|
|
|
|
0, msg, &err);
|
2014-04-29 04:49:07 +08:00
|
|
|
if (ret) {
|
2021-01-12 20:27:43 +08:00
|
|
|
ret = STORE_REF_ERROR_OTHER;
|
|
|
|
goto out;
|
2014-04-29 04:49:07 +08:00
|
|
|
}
|
|
|
|
|
2021-01-12 20:27:48 +08:00
|
|
|
if (our_transaction) {
|
|
|
|
switch (ref_transaction_commit(our_transaction, &err)) {
|
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
case TRANSACTION_NAME_CONFLICT:
|
|
|
|
ret = STORE_REF_ERROR_DF_CONFLICT;
|
|
|
|
goto out;
|
|
|
|
default:
|
|
|
|
ret = STORE_REF_ERROR_OTHER;
|
|
|
|
goto out;
|
|
|
|
}
|
2021-01-12 20:27:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2021-01-12 20:27:48 +08:00
|
|
|
ref_transaction_free(our_transaction);
|
2021-01-12 20:27:43 +08:00
|
|
|
if (ret)
|
|
|
|
error("%s", err.buf);
|
2014-04-29 04:49:07 +08:00
|
|
|
strbuf_release(&err);
|
2017-03-29 03:46:26 +08:00
|
|
|
free(msg);
|
2021-01-12 20:27:43 +08:00
|
|
|
return ret;
|
2007-09-11 11:03:25 +08:00
|
|
|
}
|
|
|
|
|
2016-07-02 00:03:30 +08:00
|
|
|
static int refcol_width = 10;
|
2016-07-02 00:03:31 +08:00
|
|
|
static int compact_format;
|
2016-07-02 00:03:30 +08:00
|
|
|
|
|
|
|
static void adjust_refcol_width(const struct ref *ref)
|
|
|
|
{
|
|
|
|
int max, rlen, llen, len;
|
|
|
|
|
|
|
|
/* uptodate lines are only shown on high verbosity level */
|
convert "oidcmp() == 0" to oideq()
Using the more restrictive oideq() should, in the long run,
give the compiler more opportunities to optimize these
callsites. For now, this conversion should be a complete
noop with respect to the generated code.
The result is also perhaps a little more readable, as it
avoids the "zero is equal" idiom. Since it's so prevalent in
C, I think seasoned programmers tend not to even notice it
anymore, but it can sometimes make for awkward double
negations (e.g., we can drop a few !!oidcmp() instances
here).
This patch was generated almost entirely by the included
coccinelle patch. This mechanical conversion should be
completely safe, because we check explicitly for cases where
oidcmp() is compared to 0, which is what oideq() is doing
under the hood. Note that we don't have to catch "!oidcmp()"
separately; coccinelle's standard isomorphisms make sure the
two are treated equivalently.
I say "almost" because I did hand-edit the coccinelle output
to fix up a few style violations (it mostly keeps the
original formatting, but sometimes unwraps long lines).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-29 05:22:40 +08:00
|
|
|
if (!verbosity && oideq(&ref->peer_ref->old_oid, &ref->old_oid))
|
2016-07-02 00:03:30 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
max = term_columns();
|
|
|
|
rlen = utf8_strwidth(prettify_refname(ref->name));
|
2016-07-02 00:03:31 +08:00
|
|
|
|
2016-07-02 00:03:30 +08:00
|
|
|
llen = utf8_strwidth(prettify_refname(ref->peer_ref->name));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* rough estimation to see if the output line is too long and
|
|
|
|
* should not be counted (we can't do precise calculation
|
|
|
|
* anyway because we don't know if the error explanation part
|
|
|
|
* will be printed in update_local_ref)
|
|
|
|
*/
|
2016-07-02 00:03:31 +08:00
|
|
|
if (compact_format) {
|
|
|
|
llen = 0;
|
|
|
|
max = max * 2 / 3;
|
|
|
|
}
|
2016-07-02 00:03:30 +08:00
|
|
|
len = 21 /* flag and summary */ + rlen + 4 /* -> */ + llen;
|
|
|
|
if (len >= max)
|
|
|
|
return;
|
|
|
|
|
2016-07-02 00:03:31 +08:00
|
|
|
/*
|
|
|
|
* Not precise calculation for compact mode because '*' can
|
|
|
|
* appear on the left hand side of '->' and shrink the column
|
|
|
|
* back.
|
|
|
|
*/
|
2016-07-02 00:03:30 +08:00
|
|
|
if (refcol_width < rlen)
|
|
|
|
refcol_width = rlen;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prepare_format_display(struct ref *ref_map)
|
|
|
|
{
|
|
|
|
struct ref *rm;
|
2016-07-02 00:03:31 +08:00
|
|
|
const char *format = "full";
|
|
|
|
|
2020-08-15 00:17:36 +08:00
|
|
|
git_config_get_string_tmp("fetch.output", &format);
|
2016-07-02 00:03:31 +08:00
|
|
|
if (!strcasecmp(format, "full"))
|
|
|
|
compact_format = 0;
|
|
|
|
else if (!strcasecmp(format, "compact"))
|
|
|
|
compact_format = 1;
|
|
|
|
else
|
|
|
|
die(_("configuration fetch.output contains invalid value %s"),
|
|
|
|
format);
|
2016-07-02 00:03:30 +08:00
|
|
|
|
|
|
|
for (rm = ref_map; rm; rm = rm->next) {
|
|
|
|
if (rm->status == REF_STATUS_REJECT_SHALLOW ||
|
|
|
|
!rm->peer_ref ||
|
|
|
|
!strcmp(rm->name, "HEAD"))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
adjust_refcol_width(rm);
|
|
|
|
}
|
|
|
|
}
|
2007-11-03 13:32:48 +08:00
|
|
|
|
2016-07-02 00:03:31 +08:00
|
|
|
static void print_remote_to_local(struct strbuf *display,
|
|
|
|
const char *remote, const char *local)
|
|
|
|
{
|
|
|
|
strbuf_addf(display, "%-*s -> %s", refcol_width, remote, local);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int find_and_replace(struct strbuf *haystack,
|
|
|
|
const char *needle,
|
|
|
|
const char *placeholder)
|
|
|
|
{
|
2019-01-25 17:51:22 +08:00
|
|
|
const char *p = NULL;
|
2016-07-02 00:03:31 +08:00
|
|
|
int plen, nlen;
|
|
|
|
|
2019-01-25 17:51:22 +08:00
|
|
|
nlen = strlen(needle);
|
|
|
|
if (ends_with(haystack->buf, needle))
|
|
|
|
p = haystack->buf + haystack->len - nlen;
|
|
|
|
else
|
|
|
|
p = strstr(haystack->buf, needle);
|
2016-07-02 00:03:31 +08:00
|
|
|
if (!p)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (p > haystack->buf && p[-1] != '/')
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
plen = strlen(p);
|
|
|
|
if (plen > nlen && p[nlen] != '/')
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
strbuf_splice(haystack, p - haystack->buf, nlen,
|
|
|
|
placeholder, strlen(placeholder));
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print_compact(struct strbuf *display,
|
|
|
|
const char *remote, const char *local)
|
|
|
|
{
|
|
|
|
struct strbuf r = STRBUF_INIT;
|
|
|
|
struct strbuf l = STRBUF_INIT;
|
|
|
|
|
|
|
|
if (!strcmp(remote, local)) {
|
|
|
|
strbuf_addf(display, "%-*s -> *", refcol_width, remote);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
strbuf_addstr(&r, remote);
|
|
|
|
strbuf_addstr(&l, local);
|
|
|
|
|
|
|
|
if (!find_and_replace(&r, local, "*"))
|
|
|
|
find_and_replace(&l, remote, "*");
|
|
|
|
print_remote_to_local(display, r.buf, l.buf);
|
|
|
|
|
|
|
|
strbuf_release(&r);
|
|
|
|
strbuf_release(&l);
|
|
|
|
}
|
|
|
|
|
2016-06-26 13:58:07 +08:00
|
|
|
static void format_display(struct strbuf *display, char code,
|
|
|
|
const char *summary, const char *error,
|
2016-10-22 06:22:55 +08:00
|
|
|
const char *remote, const char *local,
|
|
|
|
int summary_width)
|
2016-06-26 13:58:07 +08:00
|
|
|
{
|
2016-10-22 06:22:55 +08:00
|
|
|
int width = (summary_width + strlen(summary) - gettext_width(summary));
|
|
|
|
|
|
|
|
strbuf_addf(display, "%c %-*s ", code, width, summary);
|
2016-07-02 00:03:31 +08:00
|
|
|
if (!compact_format)
|
|
|
|
print_remote_to_local(display, remote, local);
|
|
|
|
else
|
|
|
|
print_compact(display, remote, local);
|
2016-06-26 13:58:07 +08:00
|
|
|
if (error)
|
|
|
|
strbuf_addf(display, " (%s)", error);
|
|
|
|
}
|
2007-11-03 13:32:48 +08:00
|
|
|
|
2007-09-11 11:03:25 +08:00
|
|
|
static int update_local_ref(struct ref *ref,
|
2021-01-12 20:27:48 +08:00
|
|
|
struct ref_transaction *transaction,
|
2007-11-03 13:32:48 +08:00
|
|
|
const char *remote,
|
2012-04-17 06:08:49 +08:00
|
|
|
const struct ref *remote_ref,
|
2016-10-22 06:22:55 +08:00
|
|
|
struct strbuf *display,
|
|
|
|
int summary_width)
|
2007-09-11 11:03:25 +08:00
|
|
|
{
|
|
|
|
struct commit *current = NULL, *updated;
|
|
|
|
enum object_type type;
|
|
|
|
struct branch *current_branch = branch_get(NULL);
|
2009-05-14 05:22:04 +08:00
|
|
|
const char *pretty_ref = prettify_refname(ref->name);
|
2019-06-19 04:25:27 +08:00
|
|
|
int fast_forward = 0;
|
2007-09-11 11:03:25 +08:00
|
|
|
|
2018-04-26 02:20:59 +08:00
|
|
|
type = oid_object_info(the_repository, &ref->new_oid, NULL);
|
2007-09-11 11:03:25 +08:00
|
|
|
if (type < 0)
|
2015-11-10 10:22:20 +08:00
|
|
|
die(_("object %s not found"), oid_to_hex(&ref->new_oid));
|
2007-09-11 11:03:25 +08:00
|
|
|
|
convert "oidcmp() == 0" to oideq()
Using the more restrictive oideq() should, in the long run,
give the compiler more opportunities to optimize these
callsites. For now, this conversion should be a complete
noop with respect to the generated code.
The result is also perhaps a little more readable, as it
avoids the "zero is equal" idiom. Since it's so prevalent in
C, I think seasoned programmers tend not to even notice it
anymore, but it can sometimes make for awkward double
negations (e.g., we can drop a few !!oidcmp() instances
here).
This patch was generated almost entirely by the included
coccinelle patch. This mechanical conversion should be
completely safe, because we check explicitly for cases where
oidcmp() is compared to 0, which is what oideq() is doing
under the hood. Note that we don't have to catch "!oidcmp()"
separately; coccinelle's standard isomorphisms make sure the
two are treated equivalently.
I say "almost" because I did hand-edit the coccinelle output
to fix up a few style violations (it mostly keeps the
original formatting, but sometimes unwraps long lines).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-29 05:22:40 +08:00
|
|
|
if (oideq(&ref->old_oid, &ref->new_oid)) {
|
2008-11-15 08:14:24 +08:00
|
|
|
if (verbosity > 0)
|
2016-06-26 13:58:07 +08:00
|
|
|
format_display(display, '=', _("[up to date]"), NULL,
|
2016-10-22 06:22:55 +08:00
|
|
|
remote, pretty_ref, summary_width);
|
2007-09-11 11:03:25 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-09-16 14:31:26 +08:00
|
|
|
if (current_branch &&
|
|
|
|
!strcmp(ref->name, current_branch->name) &&
|
2007-09-11 11:03:25 +08:00
|
|
|
!(update_head_ok || is_bare_repository()) &&
|
2015-11-10 10:22:20 +08:00
|
|
|
!is_null_oid(&ref->old_oid)) {
|
2007-09-11 11:03:25 +08:00
|
|
|
/*
|
|
|
|
* If this is the head, and it's not okay to update
|
|
|
|
* the head, and the old value of the head isn't empty...
|
|
|
|
*/
|
2016-06-26 13:58:07 +08:00
|
|
|
format_display(display, '!', _("[rejected]"),
|
|
|
|
_("can't fetch in current branch"),
|
2016-10-22 06:22:55 +08:00
|
|
|
remote, pretty_ref, summary_width);
|
2007-09-11 11:03:25 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2015-11-10 10:22:20 +08:00
|
|
|
if (!is_null_oid(&ref->old_oid) &&
|
2013-12-01 04:55:40 +08:00
|
|
|
starts_with(ref->name, "refs/tags/")) {
|
fetch: stop clobbering existing tags without --force
Change "fetch" to treat "+" in refspecs (aka --force) to mean we
should clobber a local tag of the same name.
This changes the long-standing behavior of "fetch" added in
853a3697dc ("[PATCH] Multi-head fetch.", 2005-08-20). Before this
change, all tag fetches effectively had --force enabled. See the
git-fetch-script code in fast_forward_local() with the comment:
> Tags need not be pointing at commits so there is no way to
> guarantee "fast-forward" anyway.
That commit and the rest of the history of "fetch" shows that the
"+" (--force) part of refpecs was only conceived for branch updates,
while tags have accepted any changes from upstream unconditionally and
clobbered the local tag object. Changing this behavior has been
discussed as early as 2011[1].
The current behavior doesn't make sense to me, it easily results in
local tags accidentally being clobbered. We could namespace our tags
per-remote and not locally populate refs/tags/*, but as with my
97716d217c ("fetch: add a --prune-tags option and fetch.pruneTags
config", 2018-02-09) it's easier to work around the current
implementation than to fix the root cause.
So this change implements suggestion #1 from Jeff's 2011 E-Mail[1],
"fetch" now only clobbers the tag if either "+" is provided as part of
the refspec, or if "--force" is provided on the command-line.
This also makes it nicely symmetrical with how "tag" itself works when
creating tags. I.e. we refuse to clobber any existing tags unless
"--force" is supplied. Now we can refuse all such clobbering, whether
it would happen by clobbering a local tag with "tag", or by fetching
it from the remote with "fetch".
Ref updates outside refs/{tags,heads/* are still still not symmetrical
with how "git push" works, as discussed in the recently changed
pull-fetch-param.txt documentation. This change brings the two
divergent behaviors more into line with one another. I don't think
there's any reason "fetch" couldn't fully converge with the behavior
used by "push", but that's a topic for another change.
One of the tests added in 31b808a032 ("clone --single: limit the fetch
refspec to fetched branch", 2012-09-20) is being changed to use
--force where a clone would clobber a tag. This changes nothing about
the existing behavior of the test.
1. https://public-inbox.org/git/20111123221658.GA22313@sigill.intra.peff.net/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-09-01 04:10:04 +08:00
|
|
|
if (force || ref->force) {
|
|
|
|
int r;
|
2021-01-12 20:27:48 +08:00
|
|
|
r = s_update_ref("updating tag", ref, transaction, 0);
|
fetch: stop clobbering existing tags without --force
Change "fetch" to treat "+" in refspecs (aka --force) to mean we
should clobber a local tag of the same name.
This changes the long-standing behavior of "fetch" added in
853a3697dc ("[PATCH] Multi-head fetch.", 2005-08-20). Before this
change, all tag fetches effectively had --force enabled. See the
git-fetch-script code in fast_forward_local() with the comment:
> Tags need not be pointing at commits so there is no way to
> guarantee "fast-forward" anyway.
That commit and the rest of the history of "fetch" shows that the
"+" (--force) part of refpecs was only conceived for branch updates,
while tags have accepted any changes from upstream unconditionally and
clobbered the local tag object. Changing this behavior has been
discussed as early as 2011[1].
The current behavior doesn't make sense to me, it easily results in
local tags accidentally being clobbered. We could namespace our tags
per-remote and not locally populate refs/tags/*, but as with my
97716d217c ("fetch: add a --prune-tags option and fetch.pruneTags
config", 2018-02-09) it's easier to work around the current
implementation than to fix the root cause.
So this change implements suggestion #1 from Jeff's 2011 E-Mail[1],
"fetch" now only clobbers the tag if either "+" is provided as part of
the refspec, or if "--force" is provided on the command-line.
This also makes it nicely symmetrical with how "tag" itself works when
creating tags. I.e. we refuse to clobber any existing tags unless
"--force" is supplied. Now we can refuse all such clobbering, whether
it would happen by clobbering a local tag with "tag", or by fetching
it from the remote with "fetch".
Ref updates outside refs/{tags,heads/* are still still not symmetrical
with how "git push" works, as discussed in the recently changed
pull-fetch-param.txt documentation. This change brings the two
divergent behaviors more into line with one another. I don't think
there's any reason "fetch" couldn't fully converge with the behavior
used by "push", but that's a topic for another change.
One of the tests added in 31b808a032 ("clone --single: limit the fetch
refspec to fetched branch", 2012-09-20) is being changed to use
--force where a clone would clobber a tag. This changes nothing about
the existing behavior of the test.
1. https://public-inbox.org/git/20111123221658.GA22313@sigill.intra.peff.net/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-09-01 04:10:04 +08:00
|
|
|
format_display(display, r ? '!' : 't', _("[tag update]"),
|
|
|
|
r ? _("unable to update local ref") : NULL,
|
|
|
|
remote, pretty_ref, summary_width);
|
|
|
|
return r;
|
|
|
|
} else {
|
|
|
|
format_display(display, '!', _("[rejected]"), _("would clobber existing tag"),
|
|
|
|
remote, pretty_ref, summary_width);
|
|
|
|
return 1;
|
|
|
|
}
|
2007-09-11 11:03:25 +08:00
|
|
|
}
|
|
|
|
|
2018-06-29 09:21:57 +08:00
|
|
|
current = lookup_commit_reference_gently(the_repository,
|
|
|
|
&ref->old_oid, 1);
|
|
|
|
updated = lookup_commit_reference_gently(the_repository,
|
|
|
|
&ref->new_oid, 1);
|
2007-09-11 11:03:25 +08:00
|
|
|
if (!current || !updated) {
|
2007-11-03 13:32:48 +08:00
|
|
|
const char *msg;
|
|
|
|
const char *what;
|
2008-06-27 11:59:50 +08:00
|
|
|
int r;
|
2012-04-17 06:08:50 +08:00
|
|
|
/*
|
|
|
|
* Nicely describe the new ref we're fetching.
|
|
|
|
* Base this on the remote's ref name, as it's
|
|
|
|
* more likely to follow a standard layout.
|
|
|
|
*/
|
|
|
|
const char *name = remote_ref ? remote_ref->name : "";
|
2013-12-01 04:55:40 +08:00
|
|
|
if (starts_with(name, "refs/tags/")) {
|
2007-09-11 11:03:25 +08:00
|
|
|
msg = "storing tag";
|
2011-02-23 07:41:53 +08:00
|
|
|
what = _("[new tag]");
|
2013-12-01 04:55:40 +08:00
|
|
|
} else if (starts_with(name, "refs/heads/")) {
|
2007-09-11 11:03:25 +08:00
|
|
|
msg = "storing head";
|
2011-02-23 07:41:53 +08:00
|
|
|
what = _("[new branch]");
|
2012-04-17 06:08:50 +08:00
|
|
|
} else {
|
|
|
|
msg = "storing ref";
|
|
|
|
what = _("[new ref]");
|
2007-11-03 13:32:48 +08:00
|
|
|
}
|
|
|
|
|
2021-01-12 20:27:48 +08:00
|
|
|
r = s_update_ref(msg, ref, transaction, 0);
|
2016-06-26 13:58:07 +08:00
|
|
|
format_display(display, r ? '!' : '*', what,
|
|
|
|
r ? _("unable to update local ref") : NULL,
|
2016-10-22 06:22:55 +08:00
|
|
|
remote, pretty_ref, summary_width);
|
2008-06-27 11:59:50 +08:00
|
|
|
return r;
|
2007-09-11 11:03:25 +08:00
|
|
|
}
|
|
|
|
|
2019-06-19 04:25:27 +08:00
|
|
|
if (fetch_show_forced_updates) {
|
|
|
|
uint64_t t_before = getnanotime();
|
|
|
|
fast_forward = in_merge_bases(current, updated);
|
|
|
|
forced_updates_ms += (getnanotime() - t_before) / 1000000;
|
|
|
|
} else {
|
|
|
|
fast_forward = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fast_forward) {
|
2015-09-25 05:07:40 +08:00
|
|
|
struct strbuf quickref = STRBUF_INIT;
|
2008-06-27 11:59:50 +08:00
|
|
|
int r;
|
2019-06-19 04:25:26 +08:00
|
|
|
|
strbuf: convert strbuf_add_unique_abbrev to use struct object_id
Convert the declaration and definition of strbuf_add_unique_abbrev to
make it take a pointer to struct object_id. Predeclare the struct in
strbuf.h, as cache.h includes strbuf.h before it declares the struct,
and otherwise the struct declaration would have the wrong scope.
Apply the following semantic patch, along with the standard object_id
transforms, to adjust the callers:
@@
expression E1, E2, E3;
@@
- strbuf_add_unique_abbrev(E1, E2.hash, E3);
+ strbuf_add_unique_abbrev(E1, &E2, E3);
@@
expression E1, E2, E3;
@@
- strbuf_add_unique_abbrev(E1, E2->hash, E3);
+ strbuf_add_unique_abbrev(E1, E2, E3);
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-03-12 10:27:28 +08:00
|
|
|
strbuf_add_unique_abbrev(&quickref, ¤t->object.oid, DEFAULT_ABBREV);
|
2015-09-25 05:07:40 +08:00
|
|
|
strbuf_addstr(&quickref, "..");
|
strbuf: convert strbuf_add_unique_abbrev to use struct object_id
Convert the declaration and definition of strbuf_add_unique_abbrev to
make it take a pointer to struct object_id. Predeclare the struct in
strbuf.h, as cache.h includes strbuf.h before it declares the struct,
and otherwise the struct declaration would have the wrong scope.
Apply the following semantic patch, along with the standard object_id
transforms, to adjust the callers:
@@
expression E1, E2, E3;
@@
- strbuf_add_unique_abbrev(E1, E2.hash, E3);
+ strbuf_add_unique_abbrev(E1, &E2, E3);
@@
expression E1, E2, E3;
@@
- strbuf_add_unique_abbrev(E1, E2->hash, E3);
+ strbuf_add_unique_abbrev(E1, E2, E3);
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-03-12 10:27:28 +08:00
|
|
|
strbuf_add_unique_abbrev(&quickref, &ref->new_oid, DEFAULT_ABBREV);
|
2021-01-12 20:27:48 +08:00
|
|
|
r = s_update_ref("fast-forward", ref, transaction, 1);
|
2016-06-26 13:58:07 +08:00
|
|
|
format_display(display, r ? '!' : ' ', quickref.buf,
|
|
|
|
r ? _("unable to update local ref") : NULL,
|
2016-10-22 06:22:55 +08:00
|
|
|
remote, pretty_ref, summary_width);
|
2015-09-25 05:07:40 +08:00
|
|
|
strbuf_release(&quickref);
|
2008-06-27 11:59:50 +08:00
|
|
|
return r;
|
2007-11-03 13:32:48 +08:00
|
|
|
} else if (force || ref->force) {
|
2015-09-25 05:07:40 +08:00
|
|
|
struct strbuf quickref = STRBUF_INIT;
|
2008-06-27 11:59:50 +08:00
|
|
|
int r;
|
strbuf: convert strbuf_add_unique_abbrev to use struct object_id
Convert the declaration and definition of strbuf_add_unique_abbrev to
make it take a pointer to struct object_id. Predeclare the struct in
strbuf.h, as cache.h includes strbuf.h before it declares the struct,
and otherwise the struct declaration would have the wrong scope.
Apply the following semantic patch, along with the standard object_id
transforms, to adjust the callers:
@@
expression E1, E2, E3;
@@
- strbuf_add_unique_abbrev(E1, E2.hash, E3);
+ strbuf_add_unique_abbrev(E1, &E2, E3);
@@
expression E1, E2, E3;
@@
- strbuf_add_unique_abbrev(E1, E2->hash, E3);
+ strbuf_add_unique_abbrev(E1, E2, E3);
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-03-12 10:27:28 +08:00
|
|
|
strbuf_add_unique_abbrev(&quickref, ¤t->object.oid, DEFAULT_ABBREV);
|
2015-09-25 05:07:40 +08:00
|
|
|
strbuf_addstr(&quickref, "...");
|
strbuf: convert strbuf_add_unique_abbrev to use struct object_id
Convert the declaration and definition of strbuf_add_unique_abbrev to
make it take a pointer to struct object_id. Predeclare the struct in
strbuf.h, as cache.h includes strbuf.h before it declares the struct,
and otherwise the struct declaration would have the wrong scope.
Apply the following semantic patch, along with the standard object_id
transforms, to adjust the callers:
@@
expression E1, E2, E3;
@@
- strbuf_add_unique_abbrev(E1, E2.hash, E3);
+ strbuf_add_unique_abbrev(E1, &E2, E3);
@@
expression E1, E2, E3;
@@
- strbuf_add_unique_abbrev(E1, E2->hash, E3);
+ strbuf_add_unique_abbrev(E1, E2, E3);
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-03-12 10:27:28 +08:00
|
|
|
strbuf_add_unique_abbrev(&quickref, &ref->new_oid, DEFAULT_ABBREV);
|
2021-01-12 20:27:48 +08:00
|
|
|
r = s_update_ref("forced-update", ref, transaction, 1);
|
2016-06-26 13:58:07 +08:00
|
|
|
format_display(display, r ? '!' : '+', quickref.buf,
|
|
|
|
r ? _("unable to update local ref") : _("forced update"),
|
2016-10-22 06:22:55 +08:00
|
|
|
remote, pretty_ref, summary_width);
|
2015-09-25 05:07:40 +08:00
|
|
|
strbuf_release(&quickref);
|
2008-06-27 11:59:50 +08:00
|
|
|
return r;
|
2007-11-03 13:32:48 +08:00
|
|
|
} else {
|
2016-06-26 13:58:07 +08:00
|
|
|
format_display(display, '!', _("[rejected]"), _("non-fast-forward"),
|
2016-10-22 06:22:55 +08:00
|
|
|
remote, pretty_ref, summary_width);
|
2007-09-11 11:03:25 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-16 06:06:54 +08:00
|
|
|
static int iterate_ref_map(void *cb_data, struct object_id *oid)
|
2011-09-02 06:43:35 +08:00
|
|
|
{
|
2011-09-03 07:22:47 +08:00
|
|
|
struct ref **rm = cb_data;
|
|
|
|
struct ref *ref = *rm;
|
2011-09-02 06:43:35 +08:00
|
|
|
|
2013-12-05 21:02:40 +08:00
|
|
|
while (ref && ref->status == REF_STATUS_REJECT_SHALLOW)
|
|
|
|
ref = ref->next;
|
2011-09-03 07:22:47 +08:00
|
|
|
if (!ref)
|
|
|
|
return -1; /* end of the list */
|
|
|
|
*rm = ref->next;
|
2017-10-16 06:06:54 +08:00
|
|
|
oidcpy(oid, &ref->old_oid);
|
2011-09-03 07:22:47 +08:00
|
|
|
return 0;
|
2011-09-02 06:43:35 +08:00
|
|
|
}
|
|
|
|
|
2021-01-12 20:27:35 +08:00
|
|
|
struct fetch_head {
|
|
|
|
FILE *fp;
|
2021-01-12 20:27:39 +08:00
|
|
|
struct strbuf buf;
|
2021-01-12 20:27:35 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static int open_fetch_head(struct fetch_head *fetch_head)
|
|
|
|
{
|
|
|
|
const char *filename = git_path_fetch_head(the_repository);
|
|
|
|
|
|
|
|
if (write_fetch_head) {
|
|
|
|
fetch_head->fp = fopen(filename, "a");
|
|
|
|
if (!fetch_head->fp)
|
|
|
|
return error_errno(_("cannot open %s"), filename);
|
2021-01-12 20:27:39 +08:00
|
|
|
strbuf_init(&fetch_head->buf, 0);
|
2021-01-12 20:27:35 +08:00
|
|
|
} else {
|
|
|
|
fetch_head->fp = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void append_fetch_head(struct fetch_head *fetch_head,
|
|
|
|
const struct object_id *old_oid,
|
|
|
|
enum fetch_head_status fetch_head_status,
|
|
|
|
const char *note,
|
|
|
|
const char *url, size_t url_len)
|
|
|
|
{
|
|
|
|
char old_oid_hex[GIT_MAX_HEXSZ + 1];
|
|
|
|
const char *merge_status_marker;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (!fetch_head->fp)
|
|
|
|
return;
|
|
|
|
|
|
|
|
switch (fetch_head_status) {
|
|
|
|
case FETCH_HEAD_NOT_FOR_MERGE:
|
|
|
|
merge_status_marker = "not-for-merge";
|
|
|
|
break;
|
|
|
|
case FETCH_HEAD_MERGE:
|
|
|
|
merge_status_marker = "";
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* do not write anything to FETCH_HEAD */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-01-12 20:27:39 +08:00
|
|
|
strbuf_addf(&fetch_head->buf, "%s\t%s\t%s",
|
|
|
|
oid_to_hex_r(old_oid_hex, old_oid), merge_status_marker, note);
|
2021-01-12 20:27:35 +08:00
|
|
|
for (i = 0; i < url_len; ++i)
|
|
|
|
if ('\n' == url[i])
|
2021-01-12 20:27:39 +08:00
|
|
|
strbuf_addstr(&fetch_head->buf, "\\n");
|
2021-01-12 20:27:35 +08:00
|
|
|
else
|
2021-01-12 20:27:39 +08:00
|
|
|
strbuf_addch(&fetch_head->buf, url[i]);
|
|
|
|
strbuf_addch(&fetch_head->buf, '\n');
|
|
|
|
|
fetch: implement support for atomic reference updates
When executing a fetch, then git will currently allocate one reference
transaction per reference update and directly commit it. This means that
fetches are non-atomic: even if some of the reference updates fail,
others may still succeed and modify local references.
This is fine in many scenarios, but this strategy has its downsides.
- The view of remote references may be inconsistent and may show a
bastardized state of the remote repository.
- Batching together updates may improve performance in certain
scenarios. While the impact probably isn't as pronounced with loose
references, the upcoming reftable backend may benefit as it needs to
write less files in case the update is batched.
- The reference-update hook is currently being executed twice per
updated reference. While this doesn't matter when there is no such
hook, we have seen severe performance regressions when doing a
git-fetch(1) with reference-transaction hook when the remote
repository has hundreds of thousands of references.
Similar to `git push --atomic`, this commit thus introduces atomic
fetches. Instead of allocating one reference transaction per updated
reference, it causes us to only allocate a single transaction and commit
it as soon as all updates were received. If locking of any reference
fails, then we abort the complete transaction and don't update any
reference, which gives us an all-or-nothing fetch.
Note that this may not completely fix the first of above downsides, as
the consistent view also depends on the server-side. If the server
doesn't have a consistent view of its own references during the
reference negotiation phase, then the client would get the same
inconsistent view the server has. This is a separate problem though and,
if it actually exists, can be fixed at a later point.
This commit also changes the way we write FETCH_HEAD in case `--atomic`
is passed. Instead of writing changes as we go, we need to accumulate
all changes first and only commit them at the end when we know that all
reference updates succeeded. Ideally, we'd just do so via a temporary
file so that we don't need to carry all updates in-memory. This isn't
trivially doable though considering the `--append` mode, where we do not
truncate the file but simply append to it. And given that we support
concurrent processes appending to FETCH_HEAD at the same time without
any loss of data, seeding the temporary file with current contents of
FETCH_HEAD initially and then doing a rename wouldn't work either. So
this commit implements the simple strategy of buffering all changes and
appending them to the file on commit.
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-12 20:27:52 +08:00
|
|
|
/*
|
|
|
|
* When using an atomic fetch, we do not want to update FETCH_HEAD if
|
|
|
|
* any of the reference updates fails. We thus have to write all
|
|
|
|
* updates to a buffer first and only commit it as soon as all
|
|
|
|
* references have been successfully updated.
|
|
|
|
*/
|
|
|
|
if (!atomic_fetch) {
|
|
|
|
strbuf_write(&fetch_head->buf, fetch_head->fp);
|
|
|
|
strbuf_reset(&fetch_head->buf);
|
|
|
|
}
|
2021-01-12 20:27:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void commit_fetch_head(struct fetch_head *fetch_head)
|
|
|
|
{
|
fetch: implement support for atomic reference updates
When executing a fetch, then git will currently allocate one reference
transaction per reference update and directly commit it. This means that
fetches are non-atomic: even if some of the reference updates fail,
others may still succeed and modify local references.
This is fine in many scenarios, but this strategy has its downsides.
- The view of remote references may be inconsistent and may show a
bastardized state of the remote repository.
- Batching together updates may improve performance in certain
scenarios. While the impact probably isn't as pronounced with loose
references, the upcoming reftable backend may benefit as it needs to
write less files in case the update is batched.
- The reference-update hook is currently being executed twice per
updated reference. While this doesn't matter when there is no such
hook, we have seen severe performance regressions when doing a
git-fetch(1) with reference-transaction hook when the remote
repository has hundreds of thousands of references.
Similar to `git push --atomic`, this commit thus introduces atomic
fetches. Instead of allocating one reference transaction per updated
reference, it causes us to only allocate a single transaction and commit
it as soon as all updates were received. If locking of any reference
fails, then we abort the complete transaction and don't update any
reference, which gives us an all-or-nothing fetch.
Note that this may not completely fix the first of above downsides, as
the consistent view also depends on the server-side. If the server
doesn't have a consistent view of its own references during the
reference negotiation phase, then the client would get the same
inconsistent view the server has. This is a separate problem though and,
if it actually exists, can be fixed at a later point.
This commit also changes the way we write FETCH_HEAD in case `--atomic`
is passed. Instead of writing changes as we go, we need to accumulate
all changes first and only commit them at the end when we know that all
reference updates succeeded. Ideally, we'd just do so via a temporary
file so that we don't need to carry all updates in-memory. This isn't
trivially doable though considering the `--append` mode, where we do not
truncate the file but simply append to it. And given that we support
concurrent processes appending to FETCH_HEAD at the same time without
any loss of data, seeding the temporary file with current contents of
FETCH_HEAD initially and then doing a rename wouldn't work either. So
this commit implements the simple strategy of buffering all changes and
appending them to the file on commit.
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-12 20:27:52 +08:00
|
|
|
if (!fetch_head->fp || !atomic_fetch)
|
|
|
|
return;
|
|
|
|
strbuf_write(&fetch_head->buf, fetch_head->fp);
|
2021-01-12 20:27:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void close_fetch_head(struct fetch_head *fetch_head)
|
|
|
|
{
|
|
|
|
if (!fetch_head->fp)
|
|
|
|
return;
|
|
|
|
|
|
|
|
fclose(fetch_head->fp);
|
2021-01-12 20:27:39 +08:00
|
|
|
strbuf_release(&fetch_head->buf);
|
2021-01-12 20:27:35 +08:00
|
|
|
}
|
|
|
|
|
2019-08-07 01:19:52 +08:00
|
|
|
static const char warn_show_forced_updates[] =
|
|
|
|
N_("Fetch normally indicates which branches had a forced update,\n"
|
|
|
|
"but that check has been disabled. To re-enable, use '--show-forced-updates'\n"
|
|
|
|
"flag or run 'git config fetch.showForcedUpdates true'.");
|
|
|
|
static const char warn_time_show_forced_updates[] =
|
|
|
|
N_("It took %.2f seconds to check forced updates. You can use\n"
|
|
|
|
"'--no-show-forced-updates' or run 'git config fetch.showForcedUpdates false'\n"
|
|
|
|
" to avoid this check.\n");
|
|
|
|
|
2009-04-17 16:20:11 +08:00
|
|
|
static int store_updated_refs(const char *raw_url, const char *remote_name,
|
fetch-pack: write shallow, then check connectivity
When fetching, connectivity is checked after the shallow file is
updated. There are 2 issues with this: (1) the connectivity check is
only performed up to ancestors of existing refs (which is not thorough
enough if we were deepening an existing ref in the first place), and (2)
there is no rollback of the shallow file if the connectivity check
fails.
To solve (1), update the connectivity check to check the ancestry chain
completely in the case of a deepening fetch by refraining from passing
"--not --all" when invoking rev-list in connected.c.
To solve (2), have fetch_pack() perform its own connectivity check
before updating the shallow file. To support existing use cases in which
"git fetch-pack" is used to download objects without much regard as to
the connectivity of the resulting objects with respect to the existing
repository, the connectivity check is only done if necessary (that is,
the fetch is not a clone, and the fetch involves shallow/deepen
functionality). "git fetch" still performs its own connectivity check,
preserving correctness but sometimes performing redundant work. This
redundancy is mitigated by the fact that fetch_pack() reports if it has
performed a connectivity check itself, and if the transport supports
connect or stateless-connect, it will bubble up that report so that "git
fetch" knows not to perform the connectivity check in such a case.
This was noticed when a user tried to deepen an existing repository by
fetching with --no-shallow from a server that did not send all necessary
objects - the connectivity check as run by "git fetch" succeeded, but a
subsequent "git fsck" failed.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-03 06:08:43 +08:00
|
|
|
int connectivity_checked, struct ref *ref_map)
|
2007-09-11 11:03:25 +08:00
|
|
|
{
|
2021-01-12 20:27:35 +08:00
|
|
|
struct fetch_head fetch_head;
|
2007-09-11 11:03:25 +08:00
|
|
|
struct commit *commit;
|
2014-01-03 10:28:51 +08:00
|
|
|
int url_len, i, rc = 0;
|
fetch: implement support for atomic reference updates
When executing a fetch, then git will currently allocate one reference
transaction per reference update and directly commit it. This means that
fetches are non-atomic: even if some of the reference updates fail,
others may still succeed and modify local references.
This is fine in many scenarios, but this strategy has its downsides.
- The view of remote references may be inconsistent and may show a
bastardized state of the remote repository.
- Batching together updates may improve performance in certain
scenarios. While the impact probably isn't as pronounced with loose
references, the upcoming reftable backend may benefit as it needs to
write less files in case the update is batched.
- The reference-update hook is currently being executed twice per
updated reference. While this doesn't matter when there is no such
hook, we have seen severe performance regressions when doing a
git-fetch(1) with reference-transaction hook when the remote
repository has hundreds of thousands of references.
Similar to `git push --atomic`, this commit thus introduces atomic
fetches. Instead of allocating one reference transaction per updated
reference, it causes us to only allocate a single transaction and commit
it as soon as all updates were received. If locking of any reference
fails, then we abort the complete transaction and don't update any
reference, which gives us an all-or-nothing fetch.
Note that this may not completely fix the first of above downsides, as
the consistent view also depends on the server-side. If the server
doesn't have a consistent view of its own references during the
reference negotiation phase, then the client would get the same
inconsistent view the server has. This is a separate problem though and,
if it actually exists, can be fixed at a later point.
This commit also changes the way we write FETCH_HEAD in case `--atomic`
is passed. Instead of writing changes as we go, we need to accumulate
all changes first and only commit them at the end when we know that all
reference updates succeeded. Ideally, we'd just do so via a temporary
file so that we don't need to carry all updates in-memory. This isn't
trivially doable though considering the `--append` mode, where we do not
truncate the file but simply append to it. And given that we support
concurrent processes appending to FETCH_HEAD at the same time without
any loss of data, seeding the temporary file with current contents of
FETCH_HEAD initially and then doing a rename wouldn't work either. So
this commit implements the simple strategy of buffering all changes and
appending them to the file on commit.
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-12 20:27:52 +08:00
|
|
|
struct strbuf note = STRBUF_INIT, err = STRBUF_INIT;
|
|
|
|
struct ref_transaction *transaction = NULL;
|
2007-09-11 11:03:25 +08:00
|
|
|
const char *what, *kind;
|
|
|
|
struct ref *rm;
|
2014-11-30 16:24:27 +08:00
|
|
|
char *url;
|
2013-05-12 00:15:59 +08:00
|
|
|
int want_status;
|
2016-10-22 06:28:07 +08:00
|
|
|
int summary_width = transport_summary_width(ref_map);
|
2007-09-11 11:03:25 +08:00
|
|
|
|
2021-01-12 20:27:35 +08:00
|
|
|
rc = open_fetch_head(&fetch_head);
|
|
|
|
if (rc)
|
|
|
|
return -1;
|
2009-04-17 16:20:11 +08:00
|
|
|
|
2009-11-18 09:42:22 +08:00
|
|
|
if (raw_url)
|
|
|
|
url = transport_anonymize_url(raw_url);
|
|
|
|
else
|
|
|
|
url = xstrdup("foreign");
|
2011-09-02 06:43:35 +08:00
|
|
|
|
fetch-pack: write shallow, then check connectivity
When fetching, connectivity is checked after the shallow file is
updated. There are 2 issues with this: (1) the connectivity check is
only performed up to ancestors of existing refs (which is not thorough
enough if we were deepening an existing ref in the first place), and (2)
there is no rollback of the shallow file if the connectivity check
fails.
To solve (1), update the connectivity check to check the ancestry chain
completely in the case of a deepening fetch by refraining from passing
"--not --all" when invoking rev-list in connected.c.
To solve (2), have fetch_pack() perform its own connectivity check
before updating the shallow file. To support existing use cases in which
"git fetch-pack" is used to download objects without much regard as to
the connectivity of the resulting objects with respect to the existing
repository, the connectivity check is only done if necessary (that is,
the fetch is not a clone, and the fetch involves shallow/deepen
functionality). "git fetch" still performs its own connectivity check,
preserving correctness but sometimes performing redundant work. This
redundancy is mitigated by the fact that fetch_pack() reports if it has
performed a connectivity check itself, and if the transport supports
connect or stateless-connect, it will bubble up that report so that "git
fetch" knows not to perform the connectivity check in such a case.
This was noticed when a user tried to deepen an existing repository by
fetching with --no-shallow from a server that did not send all necessary
objects - the connectivity check as run by "git fetch" succeeded, but a
subsequent "git fsck" failed.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-03 06:08:43 +08:00
|
|
|
if (!connectivity_checked) {
|
2020-01-12 12:15:25 +08:00
|
|
|
struct check_connected_options opt = CHECK_CONNECTED_INIT;
|
|
|
|
|
fetch-pack: write shallow, then check connectivity
When fetching, connectivity is checked after the shallow file is
updated. There are 2 issues with this: (1) the connectivity check is
only performed up to ancestors of existing refs (which is not thorough
enough if we were deepening an existing ref in the first place), and (2)
there is no rollback of the shallow file if the connectivity check
fails.
To solve (1), update the connectivity check to check the ancestry chain
completely in the case of a deepening fetch by refraining from passing
"--not --all" when invoking rev-list in connected.c.
To solve (2), have fetch_pack() perform its own connectivity check
before updating the shallow file. To support existing use cases in which
"git fetch-pack" is used to download objects without much regard as to
the connectivity of the resulting objects with respect to the existing
repository, the connectivity check is only done if necessary (that is,
the fetch is not a clone, and the fetch involves shallow/deepen
functionality). "git fetch" still performs its own connectivity check,
preserving correctness but sometimes performing redundant work. This
redundancy is mitigated by the fact that fetch_pack() reports if it has
performed a connectivity check itself, and if the transport supports
connect or stateless-connect, it will bubble up that report so that "git
fetch" knows not to perform the connectivity check in such a case.
This was noticed when a user tried to deepen an existing repository by
fetching with --no-shallow from a server that did not send all necessary
objects - the connectivity check as run by "git fetch" succeeded, but a
subsequent "git fsck" failed.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-03 06:08:43 +08:00
|
|
|
rm = ref_map;
|
2020-01-12 12:15:25 +08:00
|
|
|
if (check_connected(iterate_ref_map, &rm, &opt)) {
|
fetch-pack: write shallow, then check connectivity
When fetching, connectivity is checked after the shallow file is
updated. There are 2 issues with this: (1) the connectivity check is
only performed up to ancestors of existing refs (which is not thorough
enough if we were deepening an existing ref in the first place), and (2)
there is no rollback of the shallow file if the connectivity check
fails.
To solve (1), update the connectivity check to check the ancestry chain
completely in the case of a deepening fetch by refraining from passing
"--not --all" when invoking rev-list in connected.c.
To solve (2), have fetch_pack() perform its own connectivity check
before updating the shallow file. To support existing use cases in which
"git fetch-pack" is used to download objects without much regard as to
the connectivity of the resulting objects with respect to the existing
repository, the connectivity check is only done if necessary (that is,
the fetch is not a clone, and the fetch involves shallow/deepen
functionality). "git fetch" still performs its own connectivity check,
preserving correctness but sometimes performing redundant work. This
redundancy is mitigated by the fact that fetch_pack() reports if it has
performed a connectivity check itself, and if the transport supports
connect or stateless-connect, it will bubble up that report so that "git
fetch" knows not to perform the connectivity check in such a case.
This was noticed when a user tried to deepen an existing repository by
fetching with --no-shallow from a server that did not send all necessary
objects - the connectivity check as run by "git fetch" succeeded, but a
subsequent "git fsck" failed.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-03 06:08:43 +08:00
|
|
|
rc = error(_("%s did not send all necessary objects\n"), url);
|
|
|
|
goto abort;
|
|
|
|
}
|
2011-10-07 15:40:22 +08:00
|
|
|
}
|
2011-09-02 06:43:35 +08:00
|
|
|
|
fetch: implement support for atomic reference updates
When executing a fetch, then git will currently allocate one reference
transaction per reference update and directly commit it. This means that
fetches are non-atomic: even if some of the reference updates fail,
others may still succeed and modify local references.
This is fine in many scenarios, but this strategy has its downsides.
- The view of remote references may be inconsistent and may show a
bastardized state of the remote repository.
- Batching together updates may improve performance in certain
scenarios. While the impact probably isn't as pronounced with loose
references, the upcoming reftable backend may benefit as it needs to
write less files in case the update is batched.
- The reference-update hook is currently being executed twice per
updated reference. While this doesn't matter when there is no such
hook, we have seen severe performance regressions when doing a
git-fetch(1) with reference-transaction hook when the remote
repository has hundreds of thousands of references.
Similar to `git push --atomic`, this commit thus introduces atomic
fetches. Instead of allocating one reference transaction per updated
reference, it causes us to only allocate a single transaction and commit
it as soon as all updates were received. If locking of any reference
fails, then we abort the complete transaction and don't update any
reference, which gives us an all-or-nothing fetch.
Note that this may not completely fix the first of above downsides, as
the consistent view also depends on the server-side. If the server
doesn't have a consistent view of its own references during the
reference negotiation phase, then the client would get the same
inconsistent view the server has. This is a separate problem though and,
if it actually exists, can be fixed at a later point.
This commit also changes the way we write FETCH_HEAD in case `--atomic`
is passed. Instead of writing changes as we go, we need to accumulate
all changes first and only commit them at the end when we know that all
reference updates succeeded. Ideally, we'd just do so via a temporary
file so that we don't need to carry all updates in-memory. This isn't
trivially doable though considering the `--append` mode, where we do not
truncate the file but simply append to it. And given that we support
concurrent processes appending to FETCH_HEAD at the same time without
any loss of data, seeding the temporary file with current contents of
FETCH_HEAD initially and then doing a rename wouldn't work either. So
this commit implements the simple strategy of buffering all changes and
appending them to the file on commit.
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-12 20:27:52 +08:00
|
|
|
if (atomic_fetch) {
|
|
|
|
transaction = ref_transaction_begin(&err);
|
|
|
|
if (!transaction) {
|
|
|
|
error("%s", err.buf);
|
|
|
|
goto abort;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-02 00:03:30 +08:00
|
|
|
prepare_format_display(ref_map);
|
|
|
|
|
2011-12-27 00:16:56 +08:00
|
|
|
/*
|
2013-05-12 00:15:59 +08:00
|
|
|
* We do a pass for each fetch_head_status type in their enum order, so
|
|
|
|
* merged entries are written before not-for-merge. That lets readers
|
|
|
|
* use FETCH_HEAD as a refname to refer to the ref to be merged.
|
2011-12-27 00:16:56 +08:00
|
|
|
*/
|
2013-05-12 00:15:59 +08:00
|
|
|
for (want_status = FETCH_HEAD_MERGE;
|
|
|
|
want_status <= FETCH_HEAD_IGNORE;
|
|
|
|
want_status++) {
|
2011-12-27 00:16:56 +08:00
|
|
|
for (rm = ref_map; rm; rm = rm->next) {
|
|
|
|
struct ref *ref = NULL;
|
|
|
|
|
2013-12-05 21:02:40 +08:00
|
|
|
if (rm->status == REF_STATUS_REJECT_SHALLOW) {
|
|
|
|
if (want_status == FETCH_HEAD_MERGE)
|
2021-05-18 14:18:55 +08:00
|
|
|
warning(_("rejected %s because shallow roots are not allowed to be updated"),
|
2013-12-05 21:02:40 +08:00
|
|
|
rm->peer_ref ? rm->peer_ref->name : rm->name);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-06-29 09:21:57 +08:00
|
|
|
commit = lookup_commit_reference_gently(the_repository,
|
|
|
|
&rm->old_oid,
|
Convert lookup_commit* to struct object_id
Convert lookup_commit, lookup_commit_or_die,
lookup_commit_reference, and lookup_commit_reference_gently to take
struct object_id arguments.
Introduce a temporary in parse_object buffer in order to convert this
function. This is required since in order to convert parse_object and
parse_object_buffer, lookup_commit_reference_gently and
lookup_commit_or_die would need to be converted. Not introducing a
temporary would therefore require that lookup_commit_or_die take a
struct object_id *, but lookup_commit would take unsigned char *,
leaving a confusing and hard-to-use interface.
parse_object_buffer will lose this temporary in a later patch.
This commit was created with manual changes to commit.c, commit.h, and
object.c, plus the following semantic patch:
@@
expression E1, E2;
@@
- lookup_commit_reference_gently(E1.hash, E2)
+ lookup_commit_reference_gently(&E1, E2)
@@
expression E1, E2;
@@
- lookup_commit_reference_gently(E1->hash, E2)
+ lookup_commit_reference_gently(E1, E2)
@@
expression E1;
@@
- lookup_commit_reference(E1.hash)
+ lookup_commit_reference(&E1)
@@
expression E1;
@@
- lookup_commit_reference(E1->hash)
+ lookup_commit_reference(E1)
@@
expression E1;
@@
- lookup_commit(E1.hash)
+ lookup_commit(&E1)
@@
expression E1;
@@
- lookup_commit(E1->hash)
+ lookup_commit(E1)
@@
expression E1, E2;
@@
- lookup_commit_or_die(E1.hash, E2)
+ lookup_commit_or_die(&E1, E2)
@@
expression E1, E2;
@@
- lookup_commit_or_die(E1->hash, E2)
+ lookup_commit_or_die(E1, E2)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-05-07 06:10:10 +08:00
|
|
|
1);
|
2011-12-27 00:16:56 +08:00
|
|
|
if (!commit)
|
2013-05-12 00:15:59 +08:00
|
|
|
rm->fetch_head_status = FETCH_HEAD_NOT_FOR_MERGE;
|
2011-12-27 00:16:56 +08:00
|
|
|
|
2013-05-12 00:15:59 +08:00
|
|
|
if (rm->fetch_head_status != want_status)
|
2011-12-27 00:16:56 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (rm->peer_ref) {
|
2015-09-25 05:08:09 +08:00
|
|
|
ref = alloc_ref(rm->peer_ref->name);
|
2015-11-10 10:22:20 +08:00
|
|
|
oidcpy(&ref->old_oid, &rm->peer_ref->old_oid);
|
|
|
|
oidcpy(&ref->new_oid, &rm->old_oid);
|
2011-12-27 00:16:56 +08:00
|
|
|
ref->force = rm->peer_ref->force;
|
|
|
|
}
|
2007-09-11 11:03:25 +08:00
|
|
|
|
fetch: do not look for submodule changes in unchanged refs
When fetching recursively with submodules, for each ref in the
superproject, we call check_for_new_submodule_commits() which collects all
the objects that have to be checked for submodule changes on
calculate_changed_submodule_paths(). On the first call, it also collects all
the existing refs for excluding them from the scan.
calculate_changed_submodule_paths() creates an argument array with all the
collected new objects, followed by --not and all the old objects. This argv
is passed to setup_revisions, which parses each argument, converts it back
to an oid and resolves the object. The parsing itself also does redundant
work, because it is treated like user input, while in fact it is a full
oid. So it needlessly attempts to look it up as ref (checks if it has ^, ~
etc.), checks if it is a file name etc.
For a repository with many refs, all of this is expensive. But if the fetch
in the superproject did not update the ref (i.e. the objects that are
required to exist in the submodule did not change), there is no need to
include it in the list.
Before commit be76c212 (fetch: ensure submodule objects fetched,
2018-12-06), submodule reference changes were only detected for refs that
were changed, but not for new refs. This commit covered also this case, but
what it did was to just include every ref.
This change should reduce the number of scanned refs by about half (except
the case of a no-op fetch, which will not scan any ref), because all the
existing refs will still be listed after --not.
The regression was reported here:
https://public-inbox.org/git/CAGHpTBKSUJzFSWc=uznSu2zB33qCSmKXM-
iAjxRCpqNK5bnhRg@mail.gmail.com/
Signed-off-by: Orgad Shaneh <orgads@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-09-04 21:50:49 +08:00
|
|
|
if (recurse_submodules != RECURSE_SUBMODULES_OFF &&
|
|
|
|
(!rm->peer_ref || !oideq(&ref->old_oid, &ref->new_oid))) {
|
2018-12-07 05:26:55 +08:00
|
|
|
check_for_new_submodule_commits(&rm->old_oid);
|
fetch: do not look for submodule changes in unchanged refs
When fetching recursively with submodules, for each ref in the
superproject, we call check_for_new_submodule_commits() which collects all
the objects that have to be checked for submodule changes on
calculate_changed_submodule_paths(). On the first call, it also collects all
the existing refs for excluding them from the scan.
calculate_changed_submodule_paths() creates an argument array with all the
collected new objects, followed by --not and all the old objects. This argv
is passed to setup_revisions, which parses each argument, converts it back
to an oid and resolves the object. The parsing itself also does redundant
work, because it is treated like user input, while in fact it is a full
oid. So it needlessly attempts to look it up as ref (checks if it has ^, ~
etc.), checks if it is a file name etc.
For a repository with many refs, all of this is expensive. But if the fetch
in the superproject did not update the ref (i.e. the objects that are
required to exist in the submodule did not change), there is no need to
include it in the list.
Before commit be76c212 (fetch: ensure submodule objects fetched,
2018-12-06), submodule reference changes were only detected for refs that
were changed, but not for new refs. This commit covered also this case, but
what it did was to just include every ref.
This change should reduce the number of scanned refs by about half (except
the case of a no-op fetch, which will not scan any ref), because all the
existing refs will still be listed after --not.
The regression was reported here:
https://public-inbox.org/git/CAGHpTBKSUJzFSWc=uznSu2zB33qCSmKXM-
iAjxRCpqNK5bnhRg@mail.gmail.com/
Signed-off-by: Orgad Shaneh <orgads@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-09-04 21:50:49 +08:00
|
|
|
}
|
2007-09-11 11:03:25 +08:00
|
|
|
|
2011-12-27 00:16:56 +08:00
|
|
|
if (!strcmp(rm->name, "HEAD")) {
|
|
|
|
kind = "";
|
|
|
|
what = "";
|
|
|
|
}
|
2019-11-26 19:18:26 +08:00
|
|
|
else if (skip_prefix(rm->name, "refs/heads/", &what))
|
2011-12-27 00:16:56 +08:00
|
|
|
kind = "branch";
|
2019-11-26 19:18:26 +08:00
|
|
|
else if (skip_prefix(rm->name, "refs/tags/", &what))
|
2011-12-27 00:16:56 +08:00
|
|
|
kind = "tag";
|
2019-11-26 19:18:26 +08:00
|
|
|
else if (skip_prefix(rm->name, "refs/remotes/", &what))
|
2011-12-27 00:16:56 +08:00
|
|
|
kind = "remote-tracking branch";
|
|
|
|
else {
|
|
|
|
kind = "";
|
|
|
|
what = rm->name;
|
|
|
|
}
|
2007-09-11 11:03:25 +08:00
|
|
|
|
2011-12-27 00:16:56 +08:00
|
|
|
url_len = strlen(url);
|
|
|
|
for (i = url_len - 1; url[i] == '/' && 0 <= i; i--)
|
|
|
|
;
|
|
|
|
url_len = i + 1;
|
|
|
|
if (4 < i && !strncmp(".git", url + i - 3, 4))
|
|
|
|
url_len = i - 3;
|
|
|
|
|
|
|
|
strbuf_reset(¬e);
|
|
|
|
if (*what) {
|
|
|
|
if (*kind)
|
|
|
|
strbuf_addf(¬e, "%s ", kind);
|
|
|
|
strbuf_addf(¬e, "'%s' of ", what);
|
|
|
|
}
|
2021-01-12 20:27:35 +08:00
|
|
|
|
|
|
|
append_fetch_head(&fetch_head, &rm->old_oid,
|
|
|
|
rm->fetch_head_status,
|
|
|
|
note.buf, url, url_len);
|
2011-12-27 00:16:56 +08:00
|
|
|
|
|
|
|
strbuf_reset(¬e);
|
|
|
|
if (ref) {
|
fetch: implement support for atomic reference updates
When executing a fetch, then git will currently allocate one reference
transaction per reference update and directly commit it. This means that
fetches are non-atomic: even if some of the reference updates fail,
others may still succeed and modify local references.
This is fine in many scenarios, but this strategy has its downsides.
- The view of remote references may be inconsistent and may show a
bastardized state of the remote repository.
- Batching together updates may improve performance in certain
scenarios. While the impact probably isn't as pronounced with loose
references, the upcoming reftable backend may benefit as it needs to
write less files in case the update is batched.
- The reference-update hook is currently being executed twice per
updated reference. While this doesn't matter when there is no such
hook, we have seen severe performance regressions when doing a
git-fetch(1) with reference-transaction hook when the remote
repository has hundreds of thousands of references.
Similar to `git push --atomic`, this commit thus introduces atomic
fetches. Instead of allocating one reference transaction per updated
reference, it causes us to only allocate a single transaction and commit
it as soon as all updates were received. If locking of any reference
fails, then we abort the complete transaction and don't update any
reference, which gives us an all-or-nothing fetch.
Note that this may not completely fix the first of above downsides, as
the consistent view also depends on the server-side. If the server
doesn't have a consistent view of its own references during the
reference negotiation phase, then the client would get the same
inconsistent view the server has. This is a separate problem though and,
if it actually exists, can be fixed at a later point.
This commit also changes the way we write FETCH_HEAD in case `--atomic`
is passed. Instead of writing changes as we go, we need to accumulate
all changes first and only commit them at the end when we know that all
reference updates succeeded. Ideally, we'd just do so via a temporary
file so that we don't need to carry all updates in-memory. This isn't
trivially doable though considering the `--append` mode, where we do not
truncate the file but simply append to it. And given that we support
concurrent processes appending to FETCH_HEAD at the same time without
any loss of data, seeding the temporary file with current contents of
FETCH_HEAD initially and then doing a rename wouldn't work either. So
this commit implements the simple strategy of buffering all changes and
appending them to the file on commit.
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-12 20:27:52 +08:00
|
|
|
rc |= update_local_ref(ref, transaction, what,
|
2021-01-12 20:27:48 +08:00
|
|
|
rm, ¬e, summary_width);
|
2011-12-27 00:16:56 +08:00
|
|
|
free(ref);
|
2020-09-03 05:05:39 +08:00
|
|
|
} else if (write_fetch_head || dry_run) {
|
|
|
|
/*
|
|
|
|
* Display fetches written to FETCH_HEAD (or
|
|
|
|
* would be written to FETCH_HEAD, if --dry-run
|
|
|
|
* is set).
|
|
|
|
*/
|
2016-06-26 13:58:07 +08:00
|
|
|
format_display(¬e, '*',
|
|
|
|
*kind ? kind : "branch", NULL,
|
|
|
|
*what ? what : "HEAD",
|
2016-10-22 06:22:55 +08:00
|
|
|
"FETCH_HEAD", summary_width);
|
2020-09-03 05:05:39 +08:00
|
|
|
}
|
2011-12-27 00:16:56 +08:00
|
|
|
if (note.len) {
|
|
|
|
if (verbosity >= 0 && !shown_url) {
|
|
|
|
fprintf(stderr, _("From %.*s\n"),
|
|
|
|
url_len, url);
|
|
|
|
shown_url = 1;
|
|
|
|
}
|
|
|
|
if (verbosity >= 0)
|
|
|
|
fprintf(stderr, " %s\n", note.buf);
|
2007-11-03 13:32:48 +08:00
|
|
|
}
|
|
|
|
}
|
2007-09-11 11:03:25 +08:00
|
|
|
}
|
2011-10-07 15:40:22 +08:00
|
|
|
|
fetch: implement support for atomic reference updates
When executing a fetch, then git will currently allocate one reference
transaction per reference update and directly commit it. This means that
fetches are non-atomic: even if some of the reference updates fail,
others may still succeed and modify local references.
This is fine in many scenarios, but this strategy has its downsides.
- The view of remote references may be inconsistent and may show a
bastardized state of the remote repository.
- Batching together updates may improve performance in certain
scenarios. While the impact probably isn't as pronounced with loose
references, the upcoming reftable backend may benefit as it needs to
write less files in case the update is batched.
- The reference-update hook is currently being executed twice per
updated reference. While this doesn't matter when there is no such
hook, we have seen severe performance regressions when doing a
git-fetch(1) with reference-transaction hook when the remote
repository has hundreds of thousands of references.
Similar to `git push --atomic`, this commit thus introduces atomic
fetches. Instead of allocating one reference transaction per updated
reference, it causes us to only allocate a single transaction and commit
it as soon as all updates were received. If locking of any reference
fails, then we abort the complete transaction and don't update any
reference, which gives us an all-or-nothing fetch.
Note that this may not completely fix the first of above downsides, as
the consistent view also depends on the server-side. If the server
doesn't have a consistent view of its own references during the
reference negotiation phase, then the client would get the same
inconsistent view the server has. This is a separate problem though and,
if it actually exists, can be fixed at a later point.
This commit also changes the way we write FETCH_HEAD in case `--atomic`
is passed. Instead of writing changes as we go, we need to accumulate
all changes first and only commit them at the end when we know that all
reference updates succeeded. Ideally, we'd just do so via a temporary
file so that we don't need to carry all updates in-memory. This isn't
trivially doable though considering the `--append` mode, where we do not
truncate the file but simply append to it. And given that we support
concurrent processes appending to FETCH_HEAD at the same time without
any loss of data, seeding the temporary file with current contents of
FETCH_HEAD initially and then doing a rename wouldn't work either. So
this commit implements the simple strategy of buffering all changes and
appending them to the file on commit.
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-12 20:27:52 +08:00
|
|
|
if (!rc && transaction) {
|
|
|
|
rc = ref_transaction_commit(transaction, &err);
|
|
|
|
if (rc) {
|
|
|
|
error("%s", err.buf);
|
|
|
|
goto abort;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-12 20:27:35 +08:00
|
|
|
if (!rc)
|
|
|
|
commit_fetch_head(&fetch_head);
|
|
|
|
|
2009-05-25 18:40:54 +08:00
|
|
|
if (rc & STORE_REF_ERROR_DF_CONFLICT)
|
2011-02-23 07:41:51 +08:00
|
|
|
error(_("some local refs could not be updated; try running\n"
|
2008-06-27 12:01:41 +08:00
|
|
|
" 'git remote prune %s' to remove any old, conflicting "
|
2011-02-23 07:41:51 +08:00
|
|
|
"branches"), remote_name);
|
2011-10-07 15:40:22 +08:00
|
|
|
|
2019-06-19 04:25:27 +08:00
|
|
|
if (advice_fetch_show_forced_updates) {
|
|
|
|
if (!fetch_show_forced_updates) {
|
2019-08-07 01:19:52 +08:00
|
|
|
warning(_(warn_show_forced_updates));
|
2019-06-19 04:25:27 +08:00
|
|
|
} else if (forced_updates_ms > FORCED_UPDATES_DELAY_WARNING_IN_MS) {
|
2019-08-07 01:19:52 +08:00
|
|
|
warning(_(warn_time_show_forced_updates),
|
2019-06-19 04:25:27 +08:00
|
|
|
forced_updates_ms / 1000.0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-07 15:40:22 +08:00
|
|
|
abort:
|
2011-12-08 16:43:19 +08:00
|
|
|
strbuf_release(¬e);
|
fetch: implement support for atomic reference updates
When executing a fetch, then git will currently allocate one reference
transaction per reference update and directly commit it. This means that
fetches are non-atomic: even if some of the reference updates fail,
others may still succeed and modify local references.
This is fine in many scenarios, but this strategy has its downsides.
- The view of remote references may be inconsistent and may show a
bastardized state of the remote repository.
- Batching together updates may improve performance in certain
scenarios. While the impact probably isn't as pronounced with loose
references, the upcoming reftable backend may benefit as it needs to
write less files in case the update is batched.
- The reference-update hook is currently being executed twice per
updated reference. While this doesn't matter when there is no such
hook, we have seen severe performance regressions when doing a
git-fetch(1) with reference-transaction hook when the remote
repository has hundreds of thousands of references.
Similar to `git push --atomic`, this commit thus introduces atomic
fetches. Instead of allocating one reference transaction per updated
reference, it causes us to only allocate a single transaction and commit
it as soon as all updates were received. If locking of any reference
fails, then we abort the complete transaction and don't update any
reference, which gives us an all-or-nothing fetch.
Note that this may not completely fix the first of above downsides, as
the consistent view also depends on the server-side. If the server
doesn't have a consistent view of its own references during the
reference negotiation phase, then the client would get the same
inconsistent view the server has. This is a separate problem though and,
if it actually exists, can be fixed at a later point.
This commit also changes the way we write FETCH_HEAD in case `--atomic`
is passed. Instead of writing changes as we go, we need to accumulate
all changes first and only commit them at the end when we know that all
reference updates succeeded. Ideally, we'd just do so via a temporary
file so that we don't need to carry all updates in-memory. This isn't
trivially doable though considering the `--append` mode, where we do not
truncate the file but simply append to it. And given that we support
concurrent processes appending to FETCH_HEAD at the same time without
any loss of data, seeding the temporary file with current contents of
FETCH_HEAD initially and then doing a rename wouldn't work either. So
this commit implements the simple strategy of buffering all changes and
appending them to the file on commit.
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-12 20:27:52 +08:00
|
|
|
strbuf_release(&err);
|
|
|
|
ref_transaction_free(transaction);
|
2011-10-07 15:40:22 +08:00
|
|
|
free(url);
|
2021-01-12 20:27:35 +08:00
|
|
|
close_fetch_head(&fetch_head);
|
2008-05-28 23:29:36 +08:00
|
|
|
return rc;
|
2007-09-11 11:03:25 +08:00
|
|
|
}
|
|
|
|
|
2007-11-11 15:29:47 +08:00
|
|
|
/*
|
|
|
|
* We would want to bypass the object transfer altogether if
|
quickfetch(): Prevent overflow of the rev-list command line
quickfetch() calls rev-list to check whether the objects we are about to
fetch are already present in the repo (if so, we can skip the object fetch).
However, when there are many (~1000) refs to be fetched, the rev-list
command line grows larger than the maximum command line size on some systems
(32K in Windows). This causes rev-list to fail, making quickfetch() return
non-zero, which unnecessarily triggers the transport machinery. This somehow
causes fetch to fail with an exit code.
By using the --stdin option to rev-list (and feeding the object list to its
standard input), we prevent the overflow of the rev-list command line,
which causes quickfetch(), and subsequently the overall fetch, to succeed.
However, using rev-list --stdin is not entirely straightforward: rev-list
terminates immediately when encountering an unknown object, which can
trigger SIGPIPE if we are still writing object's to its standard input.
We therefore temporarily ignore SIGPIPE so that the fetch process is not
terminated.
The patch also contains a testcase to verify the fix (note that before
the patch, the testcase would only fail on msysGit).
Signed-off-by: Johan Herland <johan@herland.net>
Improved-by: Johannes Sixt <j6t@kdbg.org>
Improved-by: Alex Riesen <raa.lkml@gmail.com>
Tested-by: Peter Krefting <peter@softwolves.pp.se>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-07-10 07:52:30 +08:00
|
|
|
* everything we are going to fetch already exists and is connected
|
2007-11-11 15:29:47 +08:00
|
|
|
* locally.
|
|
|
|
*/
|
2018-09-22 02:22:38 +08:00
|
|
|
static int check_exist_and_connected(struct ref *ref_map)
|
2007-11-11 15:29:47 +08:00
|
|
|
{
|
2011-09-03 07:22:47 +08:00
|
|
|
struct ref *rm = ref_map;
|
check_everything_connected: use a struct with named options
The number of variants of check_everything_connected has
grown over the years, so that the "real" function takes
several possibly-zero, possibly-NULL arguments. We hid the
complexity behind some wrapper functions, but this doesn't
scale well when we want to add new options.
If we add more wrapper variants to handle the new options,
then we can get a combinatorial explosion when those options
might be used together (right now nobody wants to use both
"shallow" and "transport" together, so we get by with just a
few wrappers).
If instead we add new parameters to each function, each of
which can have a default value, then callers who want the
defaults end up with confusing invocations like:
check_everything_connected(fn, 0, data, -1, 0, NULL);
where it is unclear which parameter is which (and every
caller needs updated when we add new options).
Instead, let's add a struct to hold all of the optional
parameters. This is a little more verbose for the callers
(who have to declare the struct and fill it in), but it
makes their code much easier to follow, because every option
is named as it is set (and unused options do not have to be
mentioned at all).
Note that we could also stick the iteration function and its
callback data into the option struct, too. But since those
are required for each call, by avoiding doing so, we can let
very simple callers just pass "NULL" for the options and not
worry about the struct at all.
While we're touching each site, let's also rename the
function to check_connected(). The existing name was quite
long, and not all of the wrappers even used the full name.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-07-15 18:30:40 +08:00
|
|
|
struct check_connected_options opt = CHECK_CONNECTED_INIT;
|
2018-09-22 02:22:38 +08:00
|
|
|
struct ref *r;
|
2011-09-03 07:22:47 +08:00
|
|
|
|
2007-11-11 15:29:47 +08:00
|
|
|
/*
|
|
|
|
* If we are deepening a shallow clone we already have these
|
|
|
|
* objects reachable. Running rev-list here will return with
|
|
|
|
* a good (0) exit status and we'll bypass the fetch that we
|
|
|
|
* really need to perform. Claiming failure now will ensure
|
|
|
|
* we perform the network exchange to deepen our history.
|
|
|
|
*/
|
2016-06-12 18:53:59 +08:00
|
|
|
if (deepen)
|
2007-11-11 15:29:47 +08:00
|
|
|
return -1;
|
2018-09-22 02:22:38 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* check_connected() allows objects to merely be promised, but
|
|
|
|
* we need all direct targets to exist.
|
|
|
|
*/
|
|
|
|
for (r = rm; r; r = r->next) {
|
2019-11-06 02:56:19 +08:00
|
|
|
if (!has_object_file_with_flags(&r->old_oid,
|
|
|
|
OBJECT_INFO_SKIP_FETCH_OBJECT))
|
2018-09-22 02:22:38 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
check_everything_connected: use a struct with named options
The number of variants of check_everything_connected has
grown over the years, so that the "real" function takes
several possibly-zero, possibly-NULL arguments. We hid the
complexity behind some wrapper functions, but this doesn't
scale well when we want to add new options.
If we add more wrapper variants to handle the new options,
then we can get a combinatorial explosion when those options
might be used together (right now nobody wants to use both
"shallow" and "transport" together, so we get by with just a
few wrappers).
If instead we add new parameters to each function, each of
which can have a default value, then callers who want the
defaults end up with confusing invocations like:
check_everything_connected(fn, 0, data, -1, 0, NULL);
where it is unclear which parameter is which (and every
caller needs updated when we add new options).
Instead, let's add a struct to hold all of the optional
parameters. This is a little more verbose for the callers
(who have to declare the struct and fill it in), but it
makes their code much easier to follow, because every option
is named as it is set (and unused options do not have to be
mentioned at all).
Note that we could also stick the iteration function and its
callback data into the option struct, too. But since those
are required for each call, by avoiding doing so, we can let
very simple callers just pass "NULL" for the options and not
worry about the struct at all.
While we're touching each site, let's also rename the
function to check_connected(). The existing name was quite
long, and not all of the wrappers even used the full name.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-07-15 18:30:40 +08:00
|
|
|
opt.quiet = 1;
|
|
|
|
return check_connected(iterate_ref_map, &rm, &opt);
|
2007-11-11 15:29:47 +08:00
|
|
|
}
|
|
|
|
|
fetch-pack: unify ref in and out param
When a user fetches:
- at least one up-to-date ref and at least one non-up-to-date ref,
- using HTTP with protocol v0 (or something else that uses the fetch
command of a remote helper)
some refs might not be updated after the fetch.
This bug was introduced in commit 989b8c4452 ("fetch-pack: put shallow
info in output parameter", 2018-06-28) which allowed transports to
report the refs that they have fetched in a new out-parameter
"fetched_refs". If they do so, transport_fetch_refs() makes this
information available to its caller.
Users of "fetched_refs" rely on the following 3 properties:
(1) it is the complete list of refs that was passed to
transport_fetch_refs(),
(2) it has shallow information (REF_STATUS_REJECT_SHALLOW set if
relevant), and
(3) it has updated OIDs if ref-in-want was used (introduced after
989b8c4452).
In an effort to satisfy (1), whenever transport_fetch_refs()
filters the refs sent to the transport, it re-adds the filtered refs to
whatever the transport supplies before returning it to the user.
However, the implementation in 989b8c4452 unconditionally re-adds the
filtered refs without checking if the transport refrained from reporting
anything in "fetched_refs" (which it is allowed to do), resulting in an
incomplete list, no longer satisfying (1).
An earlier effort to resolve this [1] solved the issue by readding the
filtered refs only if the transport did not refrain from reporting in
"fetched_refs", but after further discussion, it seems that the better
solution is to revert the API change that introduced "fetched_refs".
This API change was first suggested as part of a ref-in-want
implementation that allowed for ref patterns and, thus, there could be
drastic differences between the input refs and the refs actually fetched
[2]; we eventually decided to only allow exact ref names, but this API
change remained even though its necessity was decreased.
Therefore, revert this API change by reverting commit 989b8c4452, and
make receive_wanted_refs() update the OIDs in the sought array (like how
update_shallow() updates shallow information in the sought array)
instead. A test is also included to show that the user-visible bug
discussed at the beginning of this commit message no longer exists.
[1] https://public-inbox.org/git/20180801171806.GA122458@google.com/
[2] https://public-inbox.org/git/86a128c5fb710a41791e7183207c4d64889f9307.1485381677.git.jonathantanmy@google.com/
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-02 04:13:20 +08:00
|
|
|
static int fetch_refs(struct transport *transport, struct ref *ref_map)
|
2007-09-11 11:03:25 +08:00
|
|
|
{
|
2018-09-22 02:22:38 +08:00
|
|
|
int ret = check_exist_and_connected(ref_map);
|
2019-10-03 07:49:28 +08:00
|
|
|
if (ret) {
|
|
|
|
trace2_region_enter("fetch", "fetch_refs", the_repository);
|
fetch-pack: unify ref in and out param
When a user fetches:
- at least one up-to-date ref and at least one non-up-to-date ref,
- using HTTP with protocol v0 (or something else that uses the fetch
command of a remote helper)
some refs might not be updated after the fetch.
This bug was introduced in commit 989b8c4452 ("fetch-pack: put shallow
info in output parameter", 2018-06-28) which allowed transports to
report the refs that they have fetched in a new out-parameter
"fetched_refs". If they do so, transport_fetch_refs() makes this
information available to its caller.
Users of "fetched_refs" rely on the following 3 properties:
(1) it is the complete list of refs that was passed to
transport_fetch_refs(),
(2) it has shallow information (REF_STATUS_REJECT_SHALLOW set if
relevant), and
(3) it has updated OIDs if ref-in-want was used (introduced after
989b8c4452).
In an effort to satisfy (1), whenever transport_fetch_refs()
filters the refs sent to the transport, it re-adds the filtered refs to
whatever the transport supplies before returning it to the user.
However, the implementation in 989b8c4452 unconditionally re-adds the
filtered refs without checking if the transport refrained from reporting
anything in "fetched_refs" (which it is allowed to do), resulting in an
incomplete list, no longer satisfying (1).
An earlier effort to resolve this [1] solved the issue by readding the
filtered refs only if the transport did not refrain from reporting in
"fetched_refs", but after further discussion, it seems that the better
solution is to revert the API change that introduced "fetched_refs".
This API change was first suggested as part of a ref-in-want
implementation that allowed for ref patterns and, thus, there could be
drastic differences between the input refs and the refs actually fetched
[2]; we eventually decided to only allow exact ref names, but this API
change remained even though its necessity was decreased.
Therefore, revert this API change by reverting commit 989b8c4452, and
make receive_wanted_refs() update the OIDs in the sought array (like how
update_shallow() updates shallow information in the sought array)
instead. A test is also included to show that the user-visible bug
discussed at the beginning of this commit message no longer exists.
[1] https://public-inbox.org/git/20180801171806.GA122458@google.com/
[2] https://public-inbox.org/git/86a128c5fb710a41791e7183207c4d64889f9307.1485381677.git.jonathantanmy@google.com/
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-02 04:13:20 +08:00
|
|
|
ret = transport_fetch_refs(transport, ref_map);
|
2019-10-03 07:49:28 +08:00
|
|
|
trace2_region_leave("fetch", "fetch_refs", the_repository);
|
|
|
|
}
|
2007-09-11 11:03:25 +08:00
|
|
|
if (!ret)
|
2018-06-28 06:30:20 +08:00
|
|
|
/*
|
|
|
|
* Keep the new pack's ".keep" file around to allow the caller
|
|
|
|
* time to update refs to reference the new objects.
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
transport_unlock_pack(transport);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update local refs based on the ref values fetched from a remote */
|
|
|
|
static int consume_refs(struct transport *transport, struct ref *ref_map)
|
|
|
|
{
|
fetch-pack: write shallow, then check connectivity
When fetching, connectivity is checked after the shallow file is
updated. There are 2 issues with this: (1) the connectivity check is
only performed up to ancestors of existing refs (which is not thorough
enough if we were deepening an existing ref in the first place), and (2)
there is no rollback of the shallow file if the connectivity check
fails.
To solve (1), update the connectivity check to check the ancestry chain
completely in the case of a deepening fetch by refraining from passing
"--not --all" when invoking rev-list in connected.c.
To solve (2), have fetch_pack() perform its own connectivity check
before updating the shallow file. To support existing use cases in which
"git fetch-pack" is used to download objects without much regard as to
the connectivity of the resulting objects with respect to the existing
repository, the connectivity check is only done if necessary (that is,
the fetch is not a clone, and the fetch involves shallow/deepen
functionality). "git fetch" still performs its own connectivity check,
preserving correctness but sometimes performing redundant work. This
redundancy is mitigated by the fact that fetch_pack() reports if it has
performed a connectivity check itself, and if the transport supports
connect or stateless-connect, it will bubble up that report so that "git
fetch" knows not to perform the connectivity check in such a case.
This was noticed when a user tried to deepen an existing repository by
fetching with --no-shallow from a server that did not send all necessary
objects - the connectivity check as run by "git fetch" succeeded, but a
subsequent "git fsck" failed.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-03 06:08:43 +08:00
|
|
|
int connectivity_checked = transport->smart_options
|
|
|
|
? transport->smart_options->connectivity_checked : 0;
|
2019-10-03 07:49:28 +08:00
|
|
|
int ret;
|
|
|
|
trace2_region_enter("fetch", "consume_refs", the_repository);
|
|
|
|
ret = store_updated_refs(transport->url,
|
|
|
|
transport->remote->name,
|
|
|
|
connectivity_checked,
|
|
|
|
ref_map);
|
2007-09-14 15:31:23 +08:00
|
|
|
transport_unlock_pack(transport);
|
2019-10-03 07:49:28 +08:00
|
|
|
trace2_region_leave("fetch", "consume_refs", the_repository);
|
2007-09-11 11:03:25 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-05-17 06:58:09 +08:00
|
|
|
static int prune_refs(struct refspec *rs, struct ref *ref_map,
|
|
|
|
const char *raw_url)
|
2009-11-10 16:15:47 +08:00
|
|
|
{
|
2014-01-03 10:28:51 +08:00
|
|
|
int url_len, i, result = 0;
|
2018-05-17 06:58:10 +08:00
|
|
|
struct ref *ref, *stale_refs = get_stale_heads(rs, ref_map);
|
2014-01-03 10:28:51 +08:00
|
|
|
char *url;
|
2016-10-22 06:28:07 +08:00
|
|
|
int summary_width = transport_summary_width(stale_refs);
|
2009-11-10 16:15:47 +08:00
|
|
|
const char *dangling_msg = dry_run
|
2012-04-23 20:30:25 +08:00
|
|
|
? _(" (%s will become dangling)")
|
|
|
|
: _(" (%s has become dangling)");
|
2009-11-10 16:15:47 +08:00
|
|
|
|
2014-01-03 10:28:51 +08:00
|
|
|
if (raw_url)
|
|
|
|
url = transport_anonymize_url(raw_url);
|
|
|
|
else
|
|
|
|
url = xstrdup("foreign");
|
|
|
|
|
|
|
|
url_len = strlen(url);
|
|
|
|
for (i = url_len - 1; url[i] == '/' && 0 <= i; i--)
|
|
|
|
;
|
|
|
|
|
|
|
|
url_len = i + 1;
|
|
|
|
if (4 < i && !strncmp(".git", url + i - 3, 4))
|
|
|
|
url_len = i - 3;
|
|
|
|
|
2015-06-22 22:02:59 +08:00
|
|
|
if (!dry_run) {
|
|
|
|
struct string_list refnames = STRING_LIST_INIT_NODUP;
|
|
|
|
|
|
|
|
for (ref = stale_refs; ref; ref = ref->next)
|
|
|
|
string_list_append(&refnames, ref->name);
|
|
|
|
|
2017-05-22 22:17:38 +08:00
|
|
|
result = delete_refs("fetch: prune", &refnames, 0);
|
2015-06-22 22:02:59 +08:00
|
|
|
string_list_clear(&refnames, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (verbosity >= 0) {
|
|
|
|
for (ref = stale_refs; ref; ref = ref->next) {
|
2016-06-26 13:58:07 +08:00
|
|
|
struct strbuf sb = STRBUF_INIT;
|
2015-06-22 22:02:59 +08:00
|
|
|
if (!shown_url) {
|
|
|
|
fprintf(stderr, _("From %.*s\n"), url_len, url);
|
|
|
|
shown_url = 1;
|
|
|
|
}
|
2016-06-26 13:58:08 +08:00
|
|
|
format_display(&sb, '-', _("[deleted]"), NULL,
|
2016-10-22 06:22:55 +08:00
|
|
|
_("(none)"), prettify_refname(ref->name),
|
|
|
|
summary_width);
|
2016-06-26 13:58:07 +08:00
|
|
|
fprintf(stderr, " %s\n",sb.buf);
|
|
|
|
strbuf_release(&sb);
|
2009-11-10 16:15:47 +08:00
|
|
|
warn_dangling_symref(stderr, dangling_msg, ref->name);
|
|
|
|
}
|
|
|
|
}
|
2015-06-22 22:02:59 +08:00
|
|
|
|
2014-01-03 10:28:51 +08:00
|
|
|
free(url);
|
2009-11-10 16:15:47 +08:00
|
|
|
free_refs(stale_refs);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2008-10-13 17:36:52 +08:00
|
|
|
static void check_not_current_branch(struct ref *ref_map)
|
|
|
|
{
|
|
|
|
struct branch *current_branch = branch_get(NULL);
|
|
|
|
|
|
|
|
if (is_bare_repository() || !current_branch)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (; ref_map; ref_map = ref_map->next)
|
|
|
|
if (ref_map->peer_ref && !strcmp(current_branch->refname,
|
|
|
|
ref_map->peer_ref->name))
|
2011-02-23 07:41:51 +08:00
|
|
|
die(_("Refusing to fetch into current branch %s "
|
|
|
|
"of non-bare repository"), current_branch->refname);
|
2008-10-13 17:36:52 +08:00
|
|
|
}
|
|
|
|
|
2010-02-25 03:02:05 +08:00
|
|
|
static int truncate_fetch_head(void)
|
|
|
|
{
|
2018-05-18 06:51:51 +08:00
|
|
|
const char *filename = git_path_fetch_head(the_repository);
|
Handle more file writes correctly in shared repos
In shared repositories, we have to be careful when writing files whose
permissions do not allow users other than the owner to write them.
In particular, we force the marks file of fast-export and the FETCH_HEAD
when fetching to be rewritten from scratch.
This commit does not touch other calls to fopen() that want to
write files:
- commands that write to working tree files (core.sharedRepository
does not affect permission bits of working tree files),
e.g. .rej file created by "apply --reject", result of applying a
previous conflict resolution by "rerere", "git merge-file".
- git am, when splitting mails (git-am correctly cleans up its directory
after finishing, so there is no need to share those files between users)
- git submodule clone, when writing the .git file, because the file
will not be overwritten
- git_terminal_prompt() in compat/terminal.c, because it is not writing to
a file at all
- git diff --output, because the output file is clearly not intended to be
shared between the users of the current repository
- git fast-import, when writing a crash report, because the reports' file
names are unique due to an embedded process ID
- mailinfo() in mailinfo.c, because the output is clearly not intended to
be shared between the users of the current repository
- check_or_regenerate_marks() in remote-testsvn.c, because this is only
used for Git's internal testing
- git fsck, when writing lost&found blobs (this should probably be
changed, but left as a low-hanging fruit for future contributors).
Note that this patch does not touch callers of write_file() and
write_file_gently(), which would benefit from the same scrutiny as
to usage in shared repositories. Most notable users are branch,
daemon, submodule & worktree, and a worrisome call in transport.c
when updating one ref (which ignores the shared flag).
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-01-12 02:35:54 +08:00
|
|
|
FILE *fp = fopen_for_writing(filename);
|
2010-02-25 03:02:05 +08:00
|
|
|
|
|
|
|
if (!fp)
|
2016-05-08 17:47:26 +08:00
|
|
|
return error_errno(_("cannot open %s"), filename);
|
2010-02-25 03:02:05 +08:00
|
|
|
fclose(fp);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-08-08 05:43:20 +08:00
|
|
|
static void set_option(struct transport *transport, const char *name, const char *value)
|
|
|
|
{
|
|
|
|
int r = transport_set_option(transport, name, value);
|
|
|
|
if (r < 0)
|
|
|
|
die(_("Option \"%s\" value \"%s\" is not valid for %s"),
|
|
|
|
name, value, transport->url);
|
|
|
|
if (r > 0)
|
|
|
|
warning(_("Option \"%s\" is ignored for %s\n"),
|
|
|
|
name, transport->url);
|
|
|
|
}
|
|
|
|
|
2018-07-03 06:39:44 +08:00
|
|
|
|
|
|
|
static int add_oid(const char *refname, const struct object_id *oid, int flags,
|
|
|
|
void *cb_data)
|
|
|
|
{
|
|
|
|
struct oid_array *oids = cb_data;
|
|
|
|
|
|
|
|
oid_array_append(oids, oid);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void add_negotiation_tips(struct git_transport_options *smart_options)
|
|
|
|
{
|
|
|
|
struct oid_array *oids = xcalloc(1, sizeof(*oids));
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < negotiation_tip.nr; i++) {
|
|
|
|
const char *s = negotiation_tip.items[i].string;
|
|
|
|
int old_nr;
|
|
|
|
if (!has_glob_specials(s)) {
|
|
|
|
struct object_id oid;
|
|
|
|
if (get_oid(s, &oid))
|
2021-07-16 01:44:32 +08:00
|
|
|
die(_("%s is not a valid object"), s);
|
|
|
|
if (!has_object(the_repository, &oid, 0))
|
|
|
|
die(_("the object %s does not exist"), s);
|
2018-07-03 06:39:44 +08:00
|
|
|
oid_array_append(oids, &oid);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
old_nr = oids->nr;
|
|
|
|
for_each_glob_ref(add_oid, s, oids);
|
|
|
|
if (old_nr == oids->nr)
|
|
|
|
warning("Ignoring --negotiation-tip=%s because it does not match any refs",
|
|
|
|
s);
|
|
|
|
}
|
|
|
|
smart_options->negotiation_tips = oids;
|
|
|
|
}
|
|
|
|
|
2016-06-12 18:53:59 +08:00
|
|
|
static struct transport *prepare_transport(struct remote *remote, int deepen)
|
2013-08-08 05:43:20 +08:00
|
|
|
{
|
|
|
|
struct transport *transport;
|
2019-01-08 08:17:09 +08:00
|
|
|
|
2013-08-08 05:43:20 +08:00
|
|
|
transport = transport_get(remote, NULL);
|
|
|
|
transport_set_verbosity(transport, verbosity, progress);
|
2016-02-03 12:09:14 +08:00
|
|
|
transport->family = family;
|
2013-08-08 05:43:20 +08:00
|
|
|
if (upload_pack)
|
|
|
|
set_option(transport, TRANS_OPT_UPLOADPACK, upload_pack);
|
|
|
|
if (keep)
|
|
|
|
set_option(transport, TRANS_OPT_KEEP, "yes");
|
|
|
|
if (depth)
|
|
|
|
set_option(transport, TRANS_OPT_DEPTH, depth);
|
2016-06-12 18:53:59 +08:00
|
|
|
if (deepen && deepen_since)
|
|
|
|
set_option(transport, TRANS_OPT_DEEPEN_SINCE, deepen_since);
|
2016-06-12 18:54:04 +08:00
|
|
|
if (deepen && deepen_not.nr)
|
|
|
|
set_option(transport, TRANS_OPT_DEEPEN_NOT,
|
|
|
|
(const char *)&deepen_not);
|
fetch, upload-pack: --deepen=N extends shallow boundary by N commits
In git-fetch, --depth argument is always relative with the latest
remote refs. This makes it a bit difficult to cover this use case,
where the user wants to make the shallow history, say 3 levels
deeper. It would work if remote refs have not moved yet, but nobody
can guarantee that, especially when that use case is performed a
couple months after the last clone or "git fetch --depth". Also,
modifying shallow boundary using --depth does not work well with
clones created by --since or --not.
This patch fixes that. A new argument --deepen=<N> will add <N> more (*)
parent commits to the current history regardless of where remote refs
are.
Have/Want negotiation is still respected. So if remote refs move, the
server will send two chunks: one between "have" and "want" and another
to extend shallow history. In theory, the client could send no "want"s
in order to get the second chunk only. But the protocol does not allow
that. Either you send no want lines, which means ls-remote; or you
have to send at least one want line that carries deep-relative to the
server..
The main work was done by Dongcan Jiang. I fixed it up here and there.
And of course all the bugs belong to me.
(*) We could even support --deepen=<N> where <N> is negative. In that
case we can cut some history from the shallow clone. This operation
(and --depth=<shorter depth>) does not require interaction with remote
side (and more complicated to implement as a result).
Helped-by: Duy Nguyen <pclouds@gmail.com>
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Helped-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Dongcan Jiang <dongcan.jiang@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-12 18:54:09 +08:00
|
|
|
if (deepen_relative)
|
|
|
|
set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, "yes");
|
2013-12-05 21:02:42 +08:00
|
|
|
if (update_shallow)
|
|
|
|
set_option(transport, TRANS_OPT_UPDATE_SHALLOW, "yes");
|
2017-12-08 23:58:44 +08:00
|
|
|
if (filter_options.choice) {
|
2019-06-28 06:54:10 +08:00
|
|
|
const char *spec =
|
|
|
|
expand_list_objects_filter_spec(&filter_options);
|
|
|
|
set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER, spec);
|
2017-12-08 23:58:44 +08:00
|
|
|
set_option(transport, TRANS_OPT_FROM_PROMISOR, "1");
|
|
|
|
}
|
2018-07-03 06:39:44 +08:00
|
|
|
if (negotiation_tip.nr) {
|
|
|
|
if (transport->smart_options)
|
|
|
|
add_negotiation_tips(transport->smart_options);
|
|
|
|
else
|
|
|
|
warning("Ignoring --negotiation-tip because the protocol does not support it.");
|
|
|
|
}
|
2013-08-08 05:43:20 +08:00
|
|
|
return transport;
|
|
|
|
}
|
|
|
|
|
2013-08-08 06:14:45 +08:00
|
|
|
static void backfill_tags(struct transport *transport, struct ref *ref_map)
|
|
|
|
{
|
2016-06-12 18:53:59 +08:00
|
|
|
int cannot_reuse;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Once we have set TRANS_OPT_DEEPEN_SINCE, we can't unset it
|
|
|
|
* when remote helper is used (setting it to an empty string
|
|
|
|
* is not unsetting). We could extend the remote helper
|
|
|
|
* protocol for that, but for now, just force a new connection
|
2016-06-12 18:54:04 +08:00
|
|
|
* without deepen-since. Similar story for deepen-not.
|
2016-06-12 18:53:59 +08:00
|
|
|
*/
|
2016-06-12 18:54:04 +08:00
|
|
|
cannot_reuse = transport->cannot_reuse ||
|
|
|
|
deepen_since || deepen_not.nr;
|
2016-06-12 18:53:59 +08:00
|
|
|
if (cannot_reuse) {
|
|
|
|
gsecondary = prepare_transport(transport->remote, 0);
|
fetch: work around "transport-take-over" hack
A Git-aware "connect" transport allows the "transport_take_over" to
redirect generic transport requests like fetch(), push_refs() and
get_refs_list() to the native Git transport handling methods. The
take-over process replaces transport->data with a fake data that
these method implementations understand.
While this hack works OK for a single request, it breaks when the
transport needs to make more than one requests. transport->data
that used to hold necessary information for the specific helper to
work correctly is destroyed during the take-over process.
One codepath that this matters is "git fetch" in auto-follow mode;
when it does not get all the tags that ought to point at the history
it got (which can be determined by looking at the peeled tags in the
initial advertisement) from the primary transfer, it internally
makes a second request to complete the fetch. Because "take-over"
hack has already destroyed the data necessary to talk to the
transport helper by the time this happens, the second request cannot
make a request to the helper to make another connection to fetch
these additional tags.
Mark such a transport as "cannot_reuse", and use a separate
transport to perform the backfill fetch in order to work around
this breakage.
Note that this problem does not manifest itself when running t5802,
because our upload-pack gives you all the necessary auto-followed
tags during the primary transfer. You would need to step through
"git fetch" in a debugger, stop immediately after the primary
transfer finishes and writes these auto-followed tags, remove the
tag references and repack/prune the repository to convince the
"find-non-local-tags" procedure that the primary transfer failed to
give us all the necessary tags, and then let it continue, in order
to trigger the bug in the secondary transfer this patch fixes.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-08-08 06:47:18 +08:00
|
|
|
transport = gsecondary;
|
|
|
|
}
|
|
|
|
|
2013-08-08 06:14:45 +08:00
|
|
|
transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, NULL);
|
|
|
|
transport_set_option(transport, TRANS_OPT_DEPTH, "0");
|
fetch, upload-pack: --deepen=N extends shallow boundary by N commits
In git-fetch, --depth argument is always relative with the latest
remote refs. This makes it a bit difficult to cover this use case,
where the user wants to make the shallow history, say 3 levels
deeper. It would work if remote refs have not moved yet, but nobody
can guarantee that, especially when that use case is performed a
couple months after the last clone or "git fetch --depth". Also,
modifying shallow boundary using --depth does not work well with
clones created by --since or --not.
This patch fixes that. A new argument --deepen=<N> will add <N> more (*)
parent commits to the current history regardless of where remote refs
are.
Have/Want negotiation is still respected. So if remote refs move, the
server will send two chunks: one between "have" and "want" and another
to extend shallow history. In theory, the client could send no "want"s
in order to get the second chunk only. But the protocol does not allow
that. Either you send no want lines, which means ls-remote; or you
have to send at least one want line that carries deep-relative to the
server..
The main work was done by Dongcan Jiang. I fixed it up here and there.
And of course all the bugs belong to me.
(*) We could even support --deepen=<N> where <N> is negative. In that
case we can cut some history from the shallow clone. This operation
(and --depth=<shorter depth>) does not require interaction with remote
side (and more complicated to implement as a result).
Helped-by: Duy Nguyen <pclouds@gmail.com>
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Helped-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Dongcan Jiang <dongcan.jiang@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-12 18:54:09 +08:00
|
|
|
transport_set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, NULL);
|
fetch-pack: unify ref in and out param
When a user fetches:
- at least one up-to-date ref and at least one non-up-to-date ref,
- using HTTP with protocol v0 (or something else that uses the fetch
command of a remote helper)
some refs might not be updated after the fetch.
This bug was introduced in commit 989b8c4452 ("fetch-pack: put shallow
info in output parameter", 2018-06-28) which allowed transports to
report the refs that they have fetched in a new out-parameter
"fetched_refs". If they do so, transport_fetch_refs() makes this
information available to its caller.
Users of "fetched_refs" rely on the following 3 properties:
(1) it is the complete list of refs that was passed to
transport_fetch_refs(),
(2) it has shallow information (REF_STATUS_REJECT_SHALLOW set if
relevant), and
(3) it has updated OIDs if ref-in-want was used (introduced after
989b8c4452).
In an effort to satisfy (1), whenever transport_fetch_refs()
filters the refs sent to the transport, it re-adds the filtered refs to
whatever the transport supplies before returning it to the user.
However, the implementation in 989b8c4452 unconditionally re-adds the
filtered refs without checking if the transport refrained from reporting
anything in "fetched_refs" (which it is allowed to do), resulting in an
incomplete list, no longer satisfying (1).
An earlier effort to resolve this [1] solved the issue by readding the
filtered refs only if the transport did not refrain from reporting in
"fetched_refs", but after further discussion, it seems that the better
solution is to revert the API change that introduced "fetched_refs".
This API change was first suggested as part of a ref-in-want
implementation that allowed for ref patterns and, thus, there could be
drastic differences between the input refs and the refs actually fetched
[2]; we eventually decided to only allow exact ref names, but this API
change remained even though its necessity was decreased.
Therefore, revert this API change by reverting commit 989b8c4452, and
make receive_wanted_refs() update the OIDs in the sought array (like how
update_shallow() updates shallow information in the sought array)
instead. A test is also included to show that the user-visible bug
discussed at the beginning of this commit message no longer exists.
[1] https://public-inbox.org/git/20180801171806.GA122458@google.com/
[2] https://public-inbox.org/git/86a128c5fb710a41791e7183207c4d64889f9307.1485381677.git.jonathantanmy@google.com/
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-02 04:13:20 +08:00
|
|
|
if (!fetch_refs(transport, ref_map))
|
2018-06-28 06:30:20 +08:00
|
|
|
consume_refs(transport, ref_map);
|
fetch: work around "transport-take-over" hack
A Git-aware "connect" transport allows the "transport_take_over" to
redirect generic transport requests like fetch(), push_refs() and
get_refs_list() to the native Git transport handling methods. The
take-over process replaces transport->data with a fake data that
these method implementations understand.
While this hack works OK for a single request, it breaks when the
transport needs to make more than one requests. transport->data
that used to hold necessary information for the specific helper to
work correctly is destroyed during the take-over process.
One codepath that this matters is "git fetch" in auto-follow mode;
when it does not get all the tags that ought to point at the history
it got (which can be determined by looking at the peeled tags in the
initial advertisement) from the primary transfer, it internally
makes a second request to complete the fetch. Because "take-over"
hack has already destroyed the data necessary to talk to the
transport helper by the time this happens, the second request cannot
make a request to the helper to make another connection to fetch
these additional tags.
Mark such a transport as "cannot_reuse", and use a separate
transport to perform the backfill fetch in order to work around
this breakage.
Note that this problem does not manifest itself when running t5802,
because our upload-pack gives you all the necessary auto-followed
tags during the primary transfer. You would need to step through
"git fetch" in a debugger, stop immediately after the primary
transfer finishes and writes these auto-followed tags, remove the
tag references and repack/prune the repository to convince the
"find-non-local-tags" procedure that the primary transfer failed to
give us all the necessary tags, and then let it continue, in order
to trigger the bug in the secondary transfer this patch fixes.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-08-08 06:47:18 +08:00
|
|
|
|
|
|
|
if (gsecondary) {
|
|
|
|
transport_disconnect(gsecondary);
|
|
|
|
gsecondary = NULL;
|
|
|
|
}
|
2013-08-08 06:14:45 +08:00
|
|
|
}
|
|
|
|
|
2007-09-11 11:03:25 +08:00
|
|
|
static int do_fetch(struct transport *transport,
|
2018-05-17 06:58:07 +08:00
|
|
|
struct refspec *rs)
|
2007-09-11 11:03:25 +08:00
|
|
|
{
|
2008-03-03 10:34:43 +08:00
|
|
|
struct ref *ref_map;
|
2007-09-11 11:03:25 +08:00
|
|
|
int autotags = (transport->remote->fetch_tags == 1);
|
2013-05-25 17:08:16 +08:00
|
|
|
int retcode = 0;
|
2018-06-28 06:30:21 +08:00
|
|
|
const struct ref *remote_refs;
|
2021-02-06 04:48:48 +08:00
|
|
|
struct transport_ls_refs_options transport_ls_refs_options =
|
|
|
|
TRANSPORT_LS_REFS_OPTIONS_INIT;
|
2018-09-28 03:24:07 +08:00
|
|
|
int must_list_refs = 1;
|
2009-10-26 05:28:12 +08:00
|
|
|
|
2010-08-12 06:57:20 +08:00
|
|
|
if (tags == TAGS_DEFAULT) {
|
|
|
|
if (transport->remote->fetch_tags == 2)
|
|
|
|
tags = TAGS_SET;
|
|
|
|
if (transport->remote->fetch_tags == -1)
|
|
|
|
tags = TAGS_UNSET;
|
|
|
|
}
|
2007-09-11 11:03:25 +08:00
|
|
|
|
|
|
|
/* if not appending, truncate FETCH_HEAD */
|
2020-08-18 22:25:22 +08:00
|
|
|
if (!append && write_fetch_head) {
|
2013-05-25 17:08:16 +08:00
|
|
|
retcode = truncate_fetch_head();
|
|
|
|
if (retcode)
|
|
|
|
goto cleanup;
|
2007-11-23 06:22:23 +08:00
|
|
|
}
|
2007-09-11 11:03:25 +08:00
|
|
|
|
2018-09-28 03:24:07 +08:00
|
|
|
if (rs->nr) {
|
|
|
|
int i;
|
|
|
|
|
2021-02-06 04:48:48 +08:00
|
|
|
refspec_ref_prefixes(rs, &transport_ls_refs_options.ref_prefixes);
|
2018-09-28 03:24:07 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We can avoid listing refs if all of them are exact
|
|
|
|
* OIDs
|
|
|
|
*/
|
|
|
|
must_list_refs = 0;
|
|
|
|
for (i = 0; i < rs->nr; i++) {
|
|
|
|
if (!rs->items[i].exact_sha1) {
|
|
|
|
must_list_refs = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if (transport->remote && transport->remote->fetch.nr)
|
2021-02-06 04:48:48 +08:00
|
|
|
refspec_ref_prefixes(&transport->remote->fetch,
|
|
|
|
&transport_ls_refs_options.ref_prefixes);
|
2007-09-11 11:03:25 +08:00
|
|
|
|
2018-09-28 03:24:07 +08:00
|
|
|
if (tags == TAGS_SET || tags == TAGS_DEFAULT) {
|
|
|
|
must_list_refs = 1;
|
2021-02-06 04:48:48 +08:00
|
|
|
if (transport_ls_refs_options.ref_prefixes.nr)
|
|
|
|
strvec_push(&transport_ls_refs_options.ref_prefixes,
|
|
|
|
"refs/tags/");
|
2007-09-11 11:03:25 +08:00
|
|
|
}
|
|
|
|
|
2019-10-03 07:49:28 +08:00
|
|
|
if (must_list_refs) {
|
|
|
|
trace2_region_enter("fetch", "remote_refs", the_repository);
|
2021-02-06 04:48:48 +08:00
|
|
|
remote_refs = transport_get_remote_refs(transport,
|
|
|
|
&transport_ls_refs_options);
|
2019-10-03 07:49:28 +08:00
|
|
|
trace2_region_leave("fetch", "remote_refs", the_repository);
|
|
|
|
} else
|
2018-09-28 03:24:07 +08:00
|
|
|
remote_refs = NULL;
|
|
|
|
|
2021-02-06 04:48:48 +08:00
|
|
|
strvec_clear(&transport_ls_refs_options.ref_prefixes);
|
2018-06-28 06:30:21 +08:00
|
|
|
|
|
|
|
ref_map = get_ref_map(transport->remote, remote_refs, rs,
|
|
|
|
tags, &autotags);
|
2008-10-13 17:36:52 +08:00
|
|
|
if (!update_head_ok)
|
|
|
|
check_not_current_branch(ref_map);
|
2007-09-11 11:03:25 +08:00
|
|
|
|
2008-03-04 11:27:40 +08:00
|
|
|
if (tags == TAGS_DEFAULT && autotags)
|
|
|
|
transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, "1");
|
2011-10-15 13:04:25 +08:00
|
|
|
if (prune) {
|
fetch --prune: prune only based on explicit refspecs
The old behavior of "fetch --prune" was to prune whatever was being
fetched. In particular, "fetch --prune --tags" caused tags not only
to be fetched, but also to be pruned. This is inappropriate because
there is only one tags namespace that is shared among the local
repository and all remotes. Therefore, if the user defines a local
tag and then runs "git fetch --prune --tags", then the local tag is
deleted. Moreover, "--prune" and "--tags" can also be configured via
fetch.prune / remote.<name>.prune and remote.<name>.tagopt, making it
even less obvious that an invocation of "git fetch" could result in
tag lossage.
Since the command "git remote update" invokes "git fetch", it had the
same problem.
The command "git remote prune", on the other hand, disregarded the
setting of remote.<name>.tagopt, and so its behavior was inconsistent
with that of the other commands.
So the old behavior made it too easy to lose tags. To fix this
problem, change "fetch --prune" to prune references based only on
refspecs specified explicitly by the user, either on the command line
or via remote.<name>.fetch. Thus, tags are no longer made subject to
pruning by the --tags option or the remote.<name>.tagopt setting.
However, tags *are* still subject to pruning if they are fetched as
part of a refspec, and that is good. For example:
* On the command line,
git fetch --prune 'refs/tags/*:refs/tags/*'
causes tags, and only tags, to be fetched and pruned, and is
therefore a simple way for the user to get the equivalent of the old
behavior of "--prune --tag".
* For a remote that was configured with the "--mirror" option, the
configuration is set to include
[remote "name"]
fetch = +refs/*:refs/*
, which causes tags to be subject to pruning along with all other
references. This is the behavior that will typically be desired for
a mirror.
Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-10-30 13:33:00 +08:00
|
|
|
/*
|
|
|
|
* We only prune based on refspecs specified
|
|
|
|
* explicitly (via command line or configuration); we
|
|
|
|
* don't care whether --tags was specified.
|
|
|
|
*/
|
2018-05-17 06:58:07 +08:00
|
|
|
if (rs->nr) {
|
2018-05-17 06:58:09 +08:00
|
|
|
prune_refs(rs, ref_map, transport->url);
|
2011-10-15 13:04:26 +08:00
|
|
|
} else {
|
2018-05-17 06:58:09 +08:00
|
|
|
prune_refs(&transport->remote->fetch,
|
2014-01-03 10:28:51 +08:00
|
|
|
ref_map,
|
|
|
|
transport->url);
|
2011-10-15 13:04:26 +08:00
|
|
|
}
|
2011-10-15 13:04:25 +08:00
|
|
|
}
|
fetch-pack: unify ref in and out param
When a user fetches:
- at least one up-to-date ref and at least one non-up-to-date ref,
- using HTTP with protocol v0 (or something else that uses the fetch
command of a remote helper)
some refs might not be updated after the fetch.
This bug was introduced in commit 989b8c4452 ("fetch-pack: put shallow
info in output parameter", 2018-06-28) which allowed transports to
report the refs that they have fetched in a new out-parameter
"fetched_refs". If they do so, transport_fetch_refs() makes this
information available to its caller.
Users of "fetched_refs" rely on the following 3 properties:
(1) it is the complete list of refs that was passed to
transport_fetch_refs(),
(2) it has shallow information (REF_STATUS_REJECT_SHALLOW set if
relevant), and
(3) it has updated OIDs if ref-in-want was used (introduced after
989b8c4452).
In an effort to satisfy (1), whenever transport_fetch_refs()
filters the refs sent to the transport, it re-adds the filtered refs to
whatever the transport supplies before returning it to the user.
However, the implementation in 989b8c4452 unconditionally re-adds the
filtered refs without checking if the transport refrained from reporting
anything in "fetched_refs" (which it is allowed to do), resulting in an
incomplete list, no longer satisfying (1).
An earlier effort to resolve this [1] solved the issue by readding the
filtered refs only if the transport did not refrain from reporting in
"fetched_refs", but after further discussion, it seems that the better
solution is to revert the API change that introduced "fetched_refs".
This API change was first suggested as part of a ref-in-want
implementation that allowed for ref patterns and, thus, there could be
drastic differences between the input refs and the refs actually fetched
[2]; we eventually decided to only allow exact ref names, but this API
change remained even though its necessity was decreased.
Therefore, revert this API change by reverting commit 989b8c4452, and
make receive_wanted_refs() update the OIDs in the sought array (like how
update_shallow() updates shallow information in the sought array)
instead. A test is also included to show that the user-visible bug
discussed at the beginning of this commit message no longer exists.
[1] https://public-inbox.org/git/20180801171806.GA122458@google.com/
[2] https://public-inbox.org/git/86a128c5fb710a41791e7183207c4d64889f9307.1485381677.git.jonathantanmy@google.com/
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-02 04:13:20 +08:00
|
|
|
if (fetch_refs(transport, ref_map) || consume_refs(transport, ref_map)) {
|
2014-01-03 10:28:52 +08:00
|
|
|
free_refs(ref_map);
|
|
|
|
retcode = 1;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2019-08-19 17:11:20 +08:00
|
|
|
|
|
|
|
if (set_upstream) {
|
|
|
|
struct branch *branch = branch_get("HEAD");
|
|
|
|
struct ref *rm;
|
|
|
|
struct ref *source_ref = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We're setting the upstream configuration for the
|
2019-11-06 01:07:23 +08:00
|
|
|
* current branch. The relevant upstream is the
|
2019-08-19 17:11:20 +08:00
|
|
|
* fetched branch that is meant to be merged with the
|
|
|
|
* current one, i.e. the one fetched to FETCH_HEAD.
|
|
|
|
*
|
|
|
|
* When there are several such branches, consider the
|
|
|
|
* request ambiguous and err on the safe side by doing
|
|
|
|
* nothing and just emit a warning.
|
|
|
|
*/
|
|
|
|
for (rm = ref_map; rm; rm = rm->next) {
|
|
|
|
if (!rm->peer_ref) {
|
|
|
|
if (source_ref) {
|
2019-11-01 04:41:46 +08:00
|
|
|
warning(_("multiple branches detected, incompatible with --set-upstream"));
|
2019-08-19 17:11:20 +08:00
|
|
|
goto skip;
|
|
|
|
} else {
|
|
|
|
source_ref = rm;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (source_ref) {
|
|
|
|
if (!strcmp(source_ref->name, "HEAD") ||
|
|
|
|
starts_with(source_ref->name, "refs/heads/"))
|
|
|
|
install_branch_config(0,
|
|
|
|
branch->name,
|
|
|
|
transport->remote->name,
|
|
|
|
source_ref->name);
|
|
|
|
else if (starts_with(source_ref->name, "refs/remotes/"))
|
|
|
|
warning(_("not setting upstream for a remote remote-tracking branch"));
|
|
|
|
else if (starts_with(source_ref->name, "refs/tags/"))
|
|
|
|
warning(_("not setting upstream for a remote tag"));
|
|
|
|
else
|
|
|
|
warning(_("unknown branch type"));
|
|
|
|
} else {
|
|
|
|
warning(_("no source branch found.\n"
|
|
|
|
"you need to specify exactly one branch with the --set-upstream option."));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
skip:
|
2008-03-03 10:34:43 +08:00
|
|
|
free_refs(ref_map);
|
2007-09-11 11:03:25 +08:00
|
|
|
|
|
|
|
/* if neither --no-tags nor --tags was specified, do automated tag
|
|
|
|
* following ... */
|
2007-12-04 15:25:47 +08:00
|
|
|
if (tags == TAGS_DEFAULT && autotags) {
|
2008-03-03 10:35:00 +08:00
|
|
|
struct ref **tail = &ref_map;
|
|
|
|
ref_map = NULL;
|
2018-06-28 06:30:21 +08:00
|
|
|
find_non_local_tags(remote_refs, &ref_map, &tail);
|
2013-08-08 06:14:45 +08:00
|
|
|
if (ref_map)
|
|
|
|
backfill_tags(transport, ref_map);
|
2007-09-11 11:03:25 +08:00
|
|
|
free_refs(ref_map);
|
|
|
|
}
|
|
|
|
|
2013-05-25 17:08:16 +08:00
|
|
|
cleanup:
|
|
|
|
return retcode;
|
2007-09-11 11:03:25 +08:00
|
|
|
}
|
|
|
|
|
2009-11-10 04:09:56 +08:00
|
|
|
static int get_one_remote_for_fetch(struct remote *remote, void *priv)
|
|
|
|
{
|
|
|
|
struct string_list *list = priv;
|
2009-11-10 04:11:06 +08:00
|
|
|
if (!remote->skip_default_update)
|
2010-06-26 07:41:38 +08:00
|
|
|
string_list_append(list, remote->name);
|
2009-11-10 04:09:56 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct remote_group_data {
|
|
|
|
const char *name;
|
|
|
|
struct string_list *list;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int get_remote_group(const char *key, const char *value, void *priv)
|
|
|
|
{
|
|
|
|
struct remote_group_data *g = priv;
|
|
|
|
|
2015-07-29 05:08:21 +08:00
|
|
|
if (skip_prefix(key, "remotes.", &key) && !strcmp(key, g->name)) {
|
2009-11-10 04:09:56 +08:00
|
|
|
/* split list by white space */
|
|
|
|
while (*value) {
|
2015-07-29 05:08:20 +08:00
|
|
|
size_t wordlen = strcspn(value, " \t\n");
|
|
|
|
|
2015-07-29 05:08:19 +08:00
|
|
|
if (wordlen >= 1)
|
2016-06-15 02:28:56 +08:00
|
|
|
string_list_append_nodup(g->list,
|
2015-07-29 05:08:19 +08:00
|
|
|
xstrndup(value, wordlen));
|
|
|
|
value += wordlen + (value[wordlen] != '\0');
|
2009-11-10 04:09:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int add_remote_or_group(const char *name, struct string_list *list)
|
|
|
|
{
|
|
|
|
int prev_nr = list->nr;
|
2010-05-14 17:31:33 +08:00
|
|
|
struct remote_group_data g;
|
|
|
|
g.name = name; g.list = list;
|
2009-11-10 04:09:56 +08:00
|
|
|
|
|
|
|
git_config(get_remote_group, &g);
|
|
|
|
if (list->nr == prev_nr) {
|
2016-02-16 17:47:50 +08:00
|
|
|
struct remote *remote = remote_get(name);
|
2017-01-20 05:20:02 +08:00
|
|
|
if (!remote_is_configured(remote, 0))
|
2009-11-10 04:09:56 +08:00
|
|
|
return 0;
|
2010-06-26 07:41:38 +08:00
|
|
|
string_list_append(list, remote->name);
|
2009-11-10 04:09:56 +08:00
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-07-29 04:24:27 +08:00
|
|
|
static void add_options_to_argv(struct strvec *argv)
|
2009-11-10 04:09:56 +08:00
|
|
|
{
|
2009-11-10 16:19:43 +08:00
|
|
|
if (dry_run)
|
2020-07-29 04:24:27 +08:00
|
|
|
strvec_push(argv, "--dry-run");
|
2013-10-30 13:33:04 +08:00
|
|
|
if (prune != -1)
|
2020-07-29 04:24:27 +08:00
|
|
|
strvec_push(argv, prune ? "--prune" : "--no-prune");
|
fetch: add a --prune-tags option and fetch.pruneTags config
Add a --prune-tags option to git-fetch, along with fetch.pruneTags
config option and a -P shorthand (-p is --prune). This allows for
doing any of:
git fetch -p -P
git fetch --prune --prune-tags
git fetch -p -P origin
git fetch --prune --prune-tags origin
Or simply:
git config fetch.prune true &&
git config fetch.pruneTags true &&
git fetch
Instead of the much more verbose:
git fetch --prune origin 'refs/tags/*:refs/tags/*' '+refs/heads/*:refs/remotes/origin/*'
Before this feature it was painful to support the use-case of pulling
from a repo which is having both its branches *and* tags deleted
regularly, and have our local references to reflect upstream.
At work we create deployment tags in the repo for each rollout, and
there's *lots* of those, so they're archived within weeks for
performance reasons.
Without this change it's hard to centrally configure such repos in
/etc/gitconfig (on servers that are only used for working with
them). You need to set fetch.prune=true globally, and then for each
repo:
git -C {} config --replace-all remote.origin.fetch "refs/tags/*:refs/tags/*" "^\+*refs/tags/\*:refs/tags/\*$"
Now I can simply set fetch.pruneTags=true in /etc/gitconfig as well,
and users running "git pull" will automatically get the pruning
semantics I want.
Even though "git remote" has corresponding "prune" and "update
--prune" subcommands I'm intentionally not adding a corresponding
prune-tags or "update --prune --prune-tags" mode to that command.
It's advertised (as noted in my recent "git remote doc: correct
dangerous lies about what prune does") as only modifying remote
tracking references, whereas any --prune-tags option is always going
to modify what from the user's perspective is a local copy of the tag,
since there's no such thing as a remote tracking tag.
Ideally add_prune_tags_to_fetch_refspec() would be something that
would use ALLOC_GROW() to grow the 'fetch` member of the 'remote'
struct. Instead I'm realloc-ing remote->fetch and adding the
tag_refspec to the end.
The reason is that parse_{fetch,push}_refspec which allocate the
refspec (ultimately remote->fetch) struct are called many places that
don't have access to a 'remote' struct. It would be hard to change all
their callsites to be amenable to carry around the bookkeeping
variables required for dynamic allocation.
All the other callers of the API first incrementally construct the
string version of the refspec in remote->fetch_refspec via
add_fetch_refspec(), before finally calling parse_fetch_refspec() via
some variation of remote_get().
It's less of a pain to deal with the one special case that needs to
modify already constructed refspecs than to chase down and change all
the other callsites. The API I'm adding is intentionally not
generalized because if we add more of these we'd probably want to
re-visit how this is done.
See my "Re: [BUG] git remote prune removes local tags, depending on
fetch config" (87po6ahx87.fsf@evledraar.gmail.com;
https://public-inbox.org/git/87po6ahx87.fsf@evledraar.gmail.com/) for
more background info.
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-10 04:32:15 +08:00
|
|
|
if (prune_tags != -1)
|
2020-07-29 04:24:27 +08:00
|
|
|
strvec_push(argv, prune_tags ? "--prune-tags" : "--no-prune-tags");
|
2010-02-25 02:22:06 +08:00
|
|
|
if (update_head_ok)
|
2020-07-29 04:24:27 +08:00
|
|
|
strvec_push(argv, "--update-head-ok");
|
2010-02-25 02:22:06 +08:00
|
|
|
if (force)
|
2020-07-29 04:24:27 +08:00
|
|
|
strvec_push(argv, "--force");
|
2010-02-25 02:22:06 +08:00
|
|
|
if (keep)
|
2020-07-29 04:24:27 +08:00
|
|
|
strvec_push(argv, "--keep");
|
2010-11-11 07:55:02 +08:00
|
|
|
if (recurse_submodules == RECURSE_SUBMODULES_ON)
|
2020-07-29 04:24:27 +08:00
|
|
|
strvec_push(argv, "--recurse-submodules");
|
2011-03-07 06:11:21 +08:00
|
|
|
else if (recurse_submodules == RECURSE_SUBMODULES_ON_DEMAND)
|
2020-07-29 04:24:27 +08:00
|
|
|
strvec_push(argv, "--recurse-submodules=on-demand");
|
2012-09-06 05:22:19 +08:00
|
|
|
if (tags == TAGS_SET)
|
2020-07-29 04:24:27 +08:00
|
|
|
strvec_push(argv, "--tags");
|
2012-09-06 05:22:19 +08:00
|
|
|
else if (tags == TAGS_UNSET)
|
2020-07-29 04:24:27 +08:00
|
|
|
strvec_push(argv, "--no-tags");
|
2009-11-10 04:09:56 +08:00
|
|
|
if (verbosity >= 2)
|
2020-07-29 04:24:27 +08:00
|
|
|
strvec_push(argv, "-v");
|
2009-11-10 04:09:56 +08:00
|
|
|
if (verbosity >= 1)
|
2020-07-29 04:24:27 +08:00
|
|
|
strvec_push(argv, "-v");
|
2009-11-10 04:09:56 +08:00
|
|
|
else if (verbosity < 0)
|
2020-07-29 04:24:27 +08:00
|
|
|
strvec_push(argv, "-q");
|
2020-09-15 19:54:07 +08:00
|
|
|
if (family == TRANSPORT_FAMILY_IPV4)
|
2020-09-23 03:36:34 +08:00
|
|
|
strvec_push(argv, "--ipv4");
|
2020-09-15 19:54:07 +08:00
|
|
|
else if (family == TRANSPORT_FAMILY_IPV6)
|
2020-09-23 03:36:34 +08:00
|
|
|
strvec_push(argv, "--ipv6");
|
2010-11-12 20:54:52 +08:00
|
|
|
}
|
|
|
|
|
2019-10-06 02:46:40 +08:00
|
|
|
/* Fetch multiple remotes in parallel */
|
|
|
|
|
|
|
|
struct parallel_fetch_state {
|
|
|
|
const char **argv;
|
|
|
|
struct string_list *remotes;
|
|
|
|
int next, result;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int fetch_next_remote(struct child_process *cp, struct strbuf *out,
|
|
|
|
void *cb, void **task_cb)
|
|
|
|
{
|
|
|
|
struct parallel_fetch_state *state = cb;
|
|
|
|
char *remote;
|
|
|
|
|
|
|
|
if (state->next < 0 || state->next >= state->remotes->nr)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
remote = state->remotes->items[state->next++].string;
|
|
|
|
*task_cb = remote;
|
|
|
|
|
2020-07-29 04:24:27 +08:00
|
|
|
strvec_pushv(&cp->args, state->argv);
|
|
|
|
strvec_push(&cp->args, remote);
|
2019-10-06 02:46:40 +08:00
|
|
|
cp->git_cmd = 1;
|
|
|
|
|
|
|
|
if (verbosity >= 0)
|
|
|
|
printf(_("Fetching %s\n"), remote);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fetch_failed_to_start(struct strbuf *out, void *cb, void *task_cb)
|
|
|
|
{
|
|
|
|
struct parallel_fetch_state *state = cb;
|
|
|
|
const char *remote = task_cb;
|
|
|
|
|
|
|
|
state->result = error(_("Could not fetch %s"), remote);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fetch_finished(int result, struct strbuf *out,
|
|
|
|
void *cb, void *task_cb)
|
|
|
|
{
|
|
|
|
struct parallel_fetch_state *state = cb;
|
|
|
|
const char *remote = task_cb;
|
|
|
|
|
|
|
|
if (result) {
|
|
|
|
strbuf_addf(out, _("could not fetch '%s' (exit code: %d)\n"),
|
|
|
|
remote, result);
|
|
|
|
state->result = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fetch_multiple(struct string_list *list, int max_children)
|
2010-11-12 20:54:52 +08:00
|
|
|
{
|
|
|
|
int i, result = 0;
|
2020-07-29 04:24:27 +08:00
|
|
|
struct strvec argv = STRVEC_INIT;
|
2009-11-10 04:09:56 +08:00
|
|
|
|
2020-08-18 22:25:22 +08:00
|
|
|
if (!append && write_fetch_head) {
|
2010-02-25 03:02:05 +08:00
|
|
|
int errcode = truncate_fetch_head();
|
|
|
|
if (errcode)
|
|
|
|
return errcode;
|
|
|
|
}
|
|
|
|
|
2020-07-29 04:24:27 +08:00
|
|
|
strvec_pushl(&argv, "fetch", "--append", "--no-auto-gc",
|
strvec: fix indentation in renamed calls
Code which split an argv_array call across multiple lines, like:
argv_array_pushl(&args, "one argument",
"another argument", "and more",
NULL);
was recently mechanically renamed to use strvec, which results in
mis-matched indentation like:
strvec_pushl(&args, "one argument",
"another argument", "and more",
NULL);
Let's fix these up to align the arguments with the opening paren. I did
this manually by sifting through the results of:
git jump grep 'strvec_.*,$'
and liberally applying my editor's auto-format. Most of the changes are
of the form shown above, though I also normalized a few that had
originally used a single-tab indentation (rather than our usual style of
aligning with the open paren). I also rewrapped a couple of obvious
cases (e.g., where previously too-long lines became short enough to fit
on one), but I wasn't aggressive about it. In cases broken to three or
more lines, the grouping of arguments is sometimes meaningful, and it
wasn't worth my time or reviewer time to ponder each case individually.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-07-29 04:26:31 +08:00
|
|
|
"--no-write-commit-graph", NULL);
|
2012-09-01 19:27:35 +08:00
|
|
|
add_options_to_argv(&argv);
|
|
|
|
|
2019-10-06 02:46:40 +08:00
|
|
|
if (max_children != 1 && list->nr != 1) {
|
2020-07-29 08:37:20 +08:00
|
|
|
struct parallel_fetch_state state = { argv.v, list, 0, 0 };
|
2019-10-06 02:46:40 +08:00
|
|
|
|
2020-07-29 04:24:27 +08:00
|
|
|
strvec_push(&argv, "--end-of-options");
|
2019-10-06 02:46:40 +08:00
|
|
|
result = run_processes_parallel_tr2(max_children,
|
|
|
|
&fetch_next_remote,
|
|
|
|
&fetch_failed_to_start,
|
|
|
|
&fetch_finished,
|
|
|
|
&state,
|
|
|
|
"fetch", "parallel/fetch");
|
|
|
|
|
|
|
|
if (!result)
|
|
|
|
result = state.result;
|
|
|
|
} else
|
|
|
|
for (i = 0; i < list->nr; i++) {
|
|
|
|
const char *name = list->items[i].string;
|
2020-07-29 04:24:27 +08:00
|
|
|
strvec_push(&argv, name);
|
2019-10-06 02:46:40 +08:00
|
|
|
if (verbosity >= 0)
|
|
|
|
printf(_("Fetching %s\n"), name);
|
2020-07-29 08:37:20 +08:00
|
|
|
if (run_command_v_opt(argv.v, RUN_GIT_CMD)) {
|
2019-10-06 02:46:40 +08:00
|
|
|
error(_("Could not fetch %s"), name);
|
|
|
|
result = 1;
|
|
|
|
}
|
2020-07-29 04:24:27 +08:00
|
|
|
strvec_pop(&argv);
|
2009-11-10 04:09:56 +08:00
|
|
|
}
|
|
|
|
|
2020-07-29 04:24:27 +08:00
|
|
|
strvec_clear(&argv);
|
2019-10-06 02:46:40 +08:00
|
|
|
return !!result;
|
2009-11-10 04:09:56 +08:00
|
|
|
}
|
|
|
|
|
2017-12-08 23:58:50 +08:00
|
|
|
/*
|
|
|
|
* Fetching from the promisor remote should use the given filter-spec
|
|
|
|
* or inherit the default filter-spec from the config.
|
|
|
|
*/
|
|
|
|
static inline void fetch_one_setup_partial(struct remote *remote)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Explicit --no-filter argument overrides everything, regardless
|
|
|
|
* of any prior partial clones and fetches.
|
|
|
|
*/
|
|
|
|
if (filter_options.no_filter)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If no prior partial clone/fetch and the current fetch DID NOT
|
|
|
|
* request a partial-fetch, do a normal fetch.
|
|
|
|
*/
|
2019-06-25 21:40:31 +08:00
|
|
|
if (!has_promisor_remote() && !filter_options.choice)
|
2017-12-08 23:58:50 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
2019-06-25 21:40:33 +08:00
|
|
|
* If this is a partial-fetch request, we enable partial on
|
|
|
|
* this repo if not already enabled and remember the given
|
|
|
|
* filter-spec as the default for subsequent fetches to this
|
2020-09-29 06:26:38 +08:00
|
|
|
* remote if there is currently no default filter-spec.
|
2017-12-08 23:58:50 +08:00
|
|
|
*/
|
2019-06-25 21:40:33 +08:00
|
|
|
if (filter_options.choice) {
|
2017-12-08 23:58:50 +08:00
|
|
|
partial_clone_register(remote->name, &filter_options);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do a partial-fetch from the promisor remote using either the
|
|
|
|
* explicitly given filter-spec or inherit the filter-spec from
|
|
|
|
* the config.
|
|
|
|
*/
|
|
|
|
if (!filter_options.choice)
|
2019-06-25 21:40:32 +08:00
|
|
|
partial_clone_get_default_filter_spec(&filter_options, remote->name);
|
2017-12-08 23:58:50 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-08-18 12:01:32 +08:00
|
|
|
static int fetch_one(struct remote *remote, int argc, const char **argv,
|
|
|
|
int prune_tags_ok, int use_stdin_refspecs)
|
2007-09-11 11:03:25 +08:00
|
|
|
{
|
2018-05-17 06:58:04 +08:00
|
|
|
struct refspec rs = REFSPEC_INIT_FETCH;
|
|
|
|
int i;
|
2008-04-29 04:23:35 +08:00
|
|
|
int exit_code;
|
2018-02-10 04:32:16 +08:00
|
|
|
int maybe_prune_tags;
|
|
|
|
int remote_via_config = remote_is_configured(remote, 0);
|
2007-09-11 11:03:25 +08:00
|
|
|
|
Give error when no remote is configured
When there's no explicitly-named remote, we use the remote specified
for the current branch, which in turn defaults to "origin". But it
this case should require the remote to actually be configured, and not
fall back to the path "origin".
Possibly, the config file's "remote = something" should require the
something to be a configured remote instead of a bare repository URL,
but we actually test with a bare repository URL.
In fetch, we were giving the sensible error message when coming up
with a URL failed, but this wasn't actually reachable, so move that
error up and use it when appropriate.
In push, we need a new error message, because the old one (formerly
unreachable without a lot of help) used the repo name, which was NULL.
Signed-off-by: Daniel Barkalow <barkalow@iabervon.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-03-11 13:47:20 +08:00
|
|
|
if (!remote)
|
2011-02-23 07:41:51 +08:00
|
|
|
die(_("No remote repository specified. Please, specify either a URL or a\n"
|
|
|
|
"remote name from which new revisions should be fetched."));
|
Give error when no remote is configured
When there's no explicitly-named remote, we use the remote specified
for the current branch, which in turn defaults to "origin". But it
this case should require the remote to actually be configured, and not
fall back to the path "origin".
Possibly, the config file's "remote = something" should require the
something to be a configured remote instead of a bare repository URL,
but we actually test with a bare repository URL.
In fetch, we were giving the sensible error message when coming up
with a URL failed, but this wasn't actually reachable, so move that
error up and use it when appropriate.
In push, we need a new error message, because the old one (formerly
unreachable without a lot of help) used the repo name, which was NULL.
Signed-off-by: Daniel Barkalow <barkalow@iabervon.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-03-11 13:47:20 +08:00
|
|
|
|
2016-06-12 18:53:59 +08:00
|
|
|
gtransport = prepare_transport(remote, 1);
|
2013-07-13 17:36:24 +08:00
|
|
|
|
|
|
|
if (prune < 0) {
|
|
|
|
/* no command line request */
|
2018-02-10 04:32:02 +08:00
|
|
|
if (0 <= remote->prune)
|
|
|
|
prune = remote->prune;
|
2013-07-13 17:36:24 +08:00
|
|
|
else if (0 <= fetch_prune_config)
|
|
|
|
prune = fetch_prune_config;
|
|
|
|
else
|
|
|
|
prune = PRUNE_BY_DEFAULT;
|
|
|
|
}
|
|
|
|
|
fetch: add a --prune-tags option and fetch.pruneTags config
Add a --prune-tags option to git-fetch, along with fetch.pruneTags
config option and a -P shorthand (-p is --prune). This allows for
doing any of:
git fetch -p -P
git fetch --prune --prune-tags
git fetch -p -P origin
git fetch --prune --prune-tags origin
Or simply:
git config fetch.prune true &&
git config fetch.pruneTags true &&
git fetch
Instead of the much more verbose:
git fetch --prune origin 'refs/tags/*:refs/tags/*' '+refs/heads/*:refs/remotes/origin/*'
Before this feature it was painful to support the use-case of pulling
from a repo which is having both its branches *and* tags deleted
regularly, and have our local references to reflect upstream.
At work we create deployment tags in the repo for each rollout, and
there's *lots* of those, so they're archived within weeks for
performance reasons.
Without this change it's hard to centrally configure such repos in
/etc/gitconfig (on servers that are only used for working with
them). You need to set fetch.prune=true globally, and then for each
repo:
git -C {} config --replace-all remote.origin.fetch "refs/tags/*:refs/tags/*" "^\+*refs/tags/\*:refs/tags/\*$"
Now I can simply set fetch.pruneTags=true in /etc/gitconfig as well,
and users running "git pull" will automatically get the pruning
semantics I want.
Even though "git remote" has corresponding "prune" and "update
--prune" subcommands I'm intentionally not adding a corresponding
prune-tags or "update --prune --prune-tags" mode to that command.
It's advertised (as noted in my recent "git remote doc: correct
dangerous lies about what prune does") as only modifying remote
tracking references, whereas any --prune-tags option is always going
to modify what from the user's perspective is a local copy of the tag,
since there's no such thing as a remote tracking tag.
Ideally add_prune_tags_to_fetch_refspec() would be something that
would use ALLOC_GROW() to grow the 'fetch` member of the 'remote'
struct. Instead I'm realloc-ing remote->fetch and adding the
tag_refspec to the end.
The reason is that parse_{fetch,push}_refspec which allocate the
refspec (ultimately remote->fetch) struct are called many places that
don't have access to a 'remote' struct. It would be hard to change all
their callsites to be amenable to carry around the bookkeeping
variables required for dynamic allocation.
All the other callers of the API first incrementally construct the
string version of the refspec in remote->fetch_refspec via
add_fetch_refspec(), before finally calling parse_fetch_refspec() via
some variation of remote_get().
It's less of a pain to deal with the one special case that needs to
modify already constructed refspecs than to chase down and change all
the other callsites. The API I'm adding is intentionally not
generalized because if we add more of these we'd probably want to
re-visit how this is done.
See my "Re: [BUG] git remote prune removes local tags, depending on
fetch config" (87po6ahx87.fsf@evledraar.gmail.com;
https://public-inbox.org/git/87po6ahx87.fsf@evledraar.gmail.com/) for
more background info.
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-10 04:32:15 +08:00
|
|
|
if (prune_tags < 0) {
|
|
|
|
/* no command line request */
|
|
|
|
if (0 <= remote->prune_tags)
|
|
|
|
prune_tags = remote->prune_tags;
|
|
|
|
else if (0 <= fetch_prune_tags_config)
|
|
|
|
prune_tags = fetch_prune_tags_config;
|
|
|
|
else
|
|
|
|
prune_tags = PRUNE_TAGS_BY_DEFAULT;
|
|
|
|
}
|
|
|
|
|
2018-02-10 04:32:16 +08:00
|
|
|
maybe_prune_tags = prune_tags_ok && prune_tags;
|
|
|
|
if (maybe_prune_tags && remote_via_config)
|
2018-05-17 06:58:02 +08:00
|
|
|
refspec_append(&remote->fetch, TAG_REFSPEC);
|
fetch: add a --prune-tags option and fetch.pruneTags config
Add a --prune-tags option to git-fetch, along with fetch.pruneTags
config option and a -P shorthand (-p is --prune). This allows for
doing any of:
git fetch -p -P
git fetch --prune --prune-tags
git fetch -p -P origin
git fetch --prune --prune-tags origin
Or simply:
git config fetch.prune true &&
git config fetch.pruneTags true &&
git fetch
Instead of the much more verbose:
git fetch --prune origin 'refs/tags/*:refs/tags/*' '+refs/heads/*:refs/remotes/origin/*'
Before this feature it was painful to support the use-case of pulling
from a repo which is having both its branches *and* tags deleted
regularly, and have our local references to reflect upstream.
At work we create deployment tags in the repo for each rollout, and
there's *lots* of those, so they're archived within weeks for
performance reasons.
Without this change it's hard to centrally configure such repos in
/etc/gitconfig (on servers that are only used for working with
them). You need to set fetch.prune=true globally, and then for each
repo:
git -C {} config --replace-all remote.origin.fetch "refs/tags/*:refs/tags/*" "^\+*refs/tags/\*:refs/tags/\*$"
Now I can simply set fetch.pruneTags=true in /etc/gitconfig as well,
and users running "git pull" will automatically get the pruning
semantics I want.
Even though "git remote" has corresponding "prune" and "update
--prune" subcommands I'm intentionally not adding a corresponding
prune-tags or "update --prune --prune-tags" mode to that command.
It's advertised (as noted in my recent "git remote doc: correct
dangerous lies about what prune does") as only modifying remote
tracking references, whereas any --prune-tags option is always going
to modify what from the user's perspective is a local copy of the tag,
since there's no such thing as a remote tracking tag.
Ideally add_prune_tags_to_fetch_refspec() would be something that
would use ALLOC_GROW() to grow the 'fetch` member of the 'remote'
struct. Instead I'm realloc-ing remote->fetch and adding the
tag_refspec to the end.
The reason is that parse_{fetch,push}_refspec which allocate the
refspec (ultimately remote->fetch) struct are called many places that
don't have access to a 'remote' struct. It would be hard to change all
their callsites to be amenable to carry around the bookkeeping
variables required for dynamic allocation.
All the other callers of the API first incrementally construct the
string version of the refspec in remote->fetch_refspec via
add_fetch_refspec(), before finally calling parse_fetch_refspec() via
some variation of remote_get().
It's less of a pain to deal with the one special case that needs to
modify already constructed refspecs than to chase down and change all
the other callsites. The API I'm adding is intentionally not
generalized because if we add more of these we'd probably want to
re-visit how this is done.
See my "Re: [BUG] git remote prune removes local tags, depending on
fetch config" (87po6ahx87.fsf@evledraar.gmail.com;
https://public-inbox.org/git/87po6ahx87.fsf@evledraar.gmail.com/) for
more background info.
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-10 04:32:15 +08:00
|
|
|
|
2018-05-17 06:58:04 +08:00
|
|
|
if (maybe_prune_tags && (argc || !remote_via_config))
|
|
|
|
refspec_append(&rs, TAG_REFSPEC);
|
2018-02-10 04:32:16 +08:00
|
|
|
|
2018-05-17 06:58:04 +08:00
|
|
|
for (i = 0; i < argc; i++) {
|
|
|
|
if (!strcmp(argv[i], "tag")) {
|
|
|
|
i++;
|
|
|
|
if (i >= argc)
|
|
|
|
die(_("You need to specify a tag name."));
|
|
|
|
|
2020-09-05 22:49:30 +08:00
|
|
|
refspec_appendf(&rs, "refs/tags/%s:refs/tags/%s",
|
|
|
|
argv[i], argv[i]);
|
2018-05-17 06:58:04 +08:00
|
|
|
} else {
|
|
|
|
refspec_append(&rs, argv[i]);
|
2007-09-11 11:03:25 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-18 12:01:32 +08:00
|
|
|
if (use_stdin_refspecs) {
|
|
|
|
struct strbuf line = STRBUF_INIT;
|
|
|
|
while (strbuf_getline_lf(&line, stdin) != EOF)
|
|
|
|
refspec_append(&rs, line.buf);
|
|
|
|
strbuf_release(&line);
|
|
|
|
}
|
|
|
|
|
2018-04-24 06:46:24 +08:00
|
|
|
if (server_options.nr)
|
|
|
|
gtransport->server_options = &server_options;
|
|
|
|
|
2009-01-22 14:03:08 +08:00
|
|
|
sigchain_push_common(unlock_pack_on_signal);
|
2007-09-14 15:31:25 +08:00
|
|
|
atexit(unlock_pack);
|
fetch: ignore SIGPIPE during network operation
The default SIGPIPE behavior can be useful for a command that generates
a lot of output: if the receiver of our output goes away, we'll be
notified asynchronously to stop generating it (typically by killing the
program).
But for a command like fetch, which is primarily concerned with
receiving data and writing it to disk, an unexpected SIGPIPE can be
awkward. We're already checking the return value of all of our write()
calls, and dying due to the signal takes away our chance to gracefully
handle the error.
On Linux, we wouldn't generally see SIGPIPE at all during fetch. If the
other side of the network connection hangs up, we'll see ECONNRESET. But
on OS X, we get a SIGPIPE, and the process is killed. This causes t5570
to racily fail, as we sometimes die by signal (instead of the expected
die() call) when the server side hangs up.
Let's ignore SIGPIPE during the network portion of the fetch, which will
cause our write() to return EPIPE, giving us consistent behavior across
platforms.
This fixes the test flakiness, but note that it stops short of fixing
the larger problem. The server side hit a fatal error, sent us an "ERR"
packet, and then hung up. We notice the failure because we're trying to
write to a closed socket. But by dying immediately, we never actually
read the ERR packet and report its content to the user. This is a (racy)
problem on all platforms. So this patch lays the groundwork from which
that problem might be fixed consistently, but it doesn't actually fix
it.
Note the placement of the SIGPIPE handling. The absolute minimal change
would be to ignore SIGPIPE only when we're writing. But twiddling the
signal handler for each write call is inefficient and maintenance
burden. On the opposite end of the spectrum, we could simply declare
that fetch does not need SIGPIPE handling, since it doesn't generate a
lot of output, and we could just ignore it at the start of cmd_fetch().
This patch takes a middle ground. It ignores SIGPIPE during the network
operation (which is admittedly most of the program, since the actual
network operations are all done under the hood by the transport code).
So it's still pretty coarse.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-03-04 00:58:43 +08:00
|
|
|
sigchain_push(SIGPIPE, SIG_IGN);
|
2018-05-17 06:58:07 +08:00
|
|
|
exit_code = do_fetch(gtransport, &rs);
|
fetch: ignore SIGPIPE during network operation
The default SIGPIPE behavior can be useful for a command that generates
a lot of output: if the receiver of our output goes away, we'll be
notified asynchronously to stop generating it (typically by killing the
program).
But for a command like fetch, which is primarily concerned with
receiving data and writing it to disk, an unexpected SIGPIPE can be
awkward. We're already checking the return value of all of our write()
calls, and dying due to the signal takes away our chance to gracefully
handle the error.
On Linux, we wouldn't generally see SIGPIPE at all during fetch. If the
other side of the network connection hangs up, we'll see ECONNRESET. But
on OS X, we get a SIGPIPE, and the process is killed. This causes t5570
to racily fail, as we sometimes die by signal (instead of the expected
die() call) when the server side hangs up.
Let's ignore SIGPIPE during the network portion of the fetch, which will
cause our write() to return EPIPE, giving us consistent behavior across
platforms.
This fixes the test flakiness, but note that it stops short of fixing
the larger problem. The server side hit a fatal error, sent us an "ERR"
packet, and then hung up. We notice the failure because we're trying to
write to a closed socket. But by dying immediately, we never actually
read the ERR packet and report its content to the user. This is a (racy)
problem on all platforms. So this patch lays the groundwork from which
that problem might be fixed consistently, but it doesn't actually fix
it.
Note the placement of the SIGPIPE handling. The absolute minimal change
would be to ignore SIGPIPE only when we're writing. But twiddling the
signal handler for each write call is inefficient and maintenance
burden. On the opposite end of the spectrum, we could simply declare
that fetch does not need SIGPIPE handling, since it doesn't generate a
lot of output, and we could just ignore it at the start of cmd_fetch().
This patch takes a middle ground. It ignores SIGPIPE during the network
operation (which is admittedly most of the program, since the actual
network operations are all done under the hood by the transport code).
So it's still pretty coarse.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-03-04 00:58:43 +08:00
|
|
|
sigchain_pop(SIGPIPE);
|
2018-05-17 06:58:04 +08:00
|
|
|
refspec_clear(&rs);
|
2013-08-08 06:38:45 +08:00
|
|
|
transport_disconnect(gtransport);
|
|
|
|
gtransport = NULL;
|
2008-04-29 04:23:35 +08:00
|
|
|
return exit_code;
|
2007-09-11 11:03:25 +08:00
|
|
|
}
|
2009-11-10 04:09:56 +08:00
|
|
|
|
|
|
|
int cmd_fetch(int argc, const char **argv, const char *prefix)
|
|
|
|
{
|
|
|
|
int i;
|
2016-06-15 02:28:56 +08:00
|
|
|
struct string_list list = STRING_LIST_INIT_DUP;
|
2017-12-08 23:58:43 +08:00
|
|
|
struct remote *remote = NULL;
|
2009-11-10 04:09:56 +08:00
|
|
|
int result = 0;
|
2018-03-07 06:54:01 +08:00
|
|
|
int prune_tags_ok = 1;
|
2009-11-10 04:09:56 +08:00
|
|
|
|
2011-02-24 22:30:19 +08:00
|
|
|
packet_trace_identity("fetch");
|
|
|
|
|
2009-11-10 04:09:56 +08:00
|
|
|
/* Record the command line for the reflog */
|
|
|
|
strbuf_addstr(&default_rla, "fetch");
|
2020-06-05 04:08:29 +08:00
|
|
|
for (i = 1; i < argc; i++) {
|
|
|
|
/* This handles non-URLs gracefully */
|
|
|
|
char *anon = transport_anonymize_url(argv[i]);
|
|
|
|
|
|
|
|
strbuf_addf(&default_rla, " %s", anon);
|
|
|
|
free(anon);
|
|
|
|
}
|
2009-11-10 04:09:56 +08:00
|
|
|
|
2013-07-13 17:36:24 +08:00
|
|
|
git_config(git_fetch_config, NULL);
|
|
|
|
|
2009-11-10 04:09:56 +08:00
|
|
|
argc = parse_options(argc, argv, prefix,
|
|
|
|
builtin_fetch_options, builtin_fetch_usage, 0);
|
fetch: avoid reading submodule config until needed
In "fetch", there are two parameters submodule_fetch_jobs_config and
recurse_submodules that can be set in a variety of ways: through
.gitmodules, through .git/config, and through the command line.
Currently "fetch" handles this by first reading .gitmodules, then
reading .git/config (allowing it to overwrite existing values), then
reading the command line (allowing it to overwrite existing values).
Notice that we can avoid reading .gitmodules if .git/config and/or the
command line already provides us with what we need. In addition, if
recurse_submodules is found to be "no", we do not need the value of
submodule_fetch_jobs_config.
Avoiding reading .gitmodules is especially important when we use "git
fetch" to perform lazy fetches in a partial clone because the
.gitmodules file itself might need to be lazy fetched (and otherwise
causing an infinite loop).
In light of all this, avoid reading .gitmodules until necessary. When
reading it, we may only need one of the two parameters it provides, so
teach fetch_config_from_gitmodules() to support NULL arguments. With
this patch, users (including Git itself when invoking "git fetch" to
lazy-fetch) will be able to guarantee avoiding reading .gitmodules by
passing --recurse-submodules=no.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-08-18 12:01:33 +08:00
|
|
|
if (recurse_submodules != RECURSE_SUBMODULES_OFF) {
|
|
|
|
int *sfjc = submodule_fetch_jobs_config == -1
|
|
|
|
? &submodule_fetch_jobs_config : NULL;
|
|
|
|
int *rs = recurse_submodules == RECURSE_SUBMODULES_DEFAULT
|
|
|
|
? &recurse_submodules : NULL;
|
|
|
|
|
|
|
|
fetch_config_from_gitmodules(sfjc, rs);
|
|
|
|
}
|
2009-11-10 04:09:56 +08:00
|
|
|
|
2021-07-08 18:53:15 +08:00
|
|
|
if (negotiate_only && !negotiation_tip.nr)
|
|
|
|
die(_("--negotiate-only needs one or more --negotiate-tip=*"));
|
|
|
|
|
fetch, upload-pack: --deepen=N extends shallow boundary by N commits
In git-fetch, --depth argument is always relative with the latest
remote refs. This makes it a bit difficult to cover this use case,
where the user wants to make the shallow history, say 3 levels
deeper. It would work if remote refs have not moved yet, but nobody
can guarantee that, especially when that use case is performed a
couple months after the last clone or "git fetch --depth". Also,
modifying shallow boundary using --depth does not work well with
clones created by --since or --not.
This patch fixes that. A new argument --deepen=<N> will add <N> more (*)
parent commits to the current history regardless of where remote refs
are.
Have/Want negotiation is still respected. So if remote refs move, the
server will send two chunks: one between "have" and "want" and another
to extend shallow history. In theory, the client could send no "want"s
in order to get the second chunk only. But the protocol does not allow
that. Either you send no want lines, which means ls-remote; or you
have to send at least one want line that carries deep-relative to the
server..
The main work was done by Dongcan Jiang. I fixed it up here and there.
And of course all the bugs belong to me.
(*) We could even support --deepen=<N> where <N> is negative. In that
case we can cut some history from the shallow clone. This operation
(and --depth=<shorter depth>) does not require interaction with remote
side (and more complicated to implement as a result).
Helped-by: Duy Nguyen <pclouds@gmail.com>
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Helped-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Dongcan Jiang <dongcan.jiang@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-12 18:54:09 +08:00
|
|
|
if (deepen_relative) {
|
|
|
|
if (deepen_relative < 0)
|
|
|
|
die(_("Negative depth in --deepen is not supported"));
|
|
|
|
if (depth)
|
|
|
|
die(_("--deepen and --depth are mutually exclusive"));
|
|
|
|
depth = xstrfmt("%d", deepen_relative);
|
|
|
|
}
|
2013-01-11 17:05:46 +08:00
|
|
|
if (unshallow) {
|
|
|
|
if (depth)
|
|
|
|
die(_("--depth and --unshallow cannot be used together"));
|
2018-05-18 06:51:46 +08:00
|
|
|
else if (!is_repository_shallow(the_repository))
|
2013-01-11 17:05:46 +08:00
|
|
|
die(_("--unshallow on a complete repository does not make sense"));
|
2015-09-25 05:07:07 +08:00
|
|
|
else
|
|
|
|
depth = xstrfmt("%d", INFINITE_DEPTH);
|
2013-01-11 17:05:46 +08:00
|
|
|
}
|
|
|
|
|
2013-12-05 11:31:11 +08:00
|
|
|
/* no need to be strict, transport_set_option() will validate it again */
|
|
|
|
if (depth && atoi(depth) < 1)
|
|
|
|
die(_("depth %s is not a positive number"), depth);
|
2016-06-12 18:54:04 +08:00
|
|
|
if (depth || deepen_since || deepen_not.nr)
|
2016-06-12 18:53:59 +08:00
|
|
|
deepen = 1;
|
2013-12-05 11:31:11 +08:00
|
|
|
|
2020-08-18 22:25:22 +08:00
|
|
|
/* FETCH_HEAD never gets updated in --dry-run mode */
|
|
|
|
if (dry_run)
|
|
|
|
write_fetch_head = 0;
|
|
|
|
|
2009-11-10 04:09:56 +08:00
|
|
|
if (all) {
|
|
|
|
if (argc == 1)
|
2011-02-23 07:41:51 +08:00
|
|
|
die(_("fetch --all does not take a repository argument"));
|
2009-11-10 04:09:56 +08:00
|
|
|
else if (argc > 1)
|
2011-02-23 07:41:51 +08:00
|
|
|
die(_("fetch --all does not make sense with refspecs"));
|
2009-11-10 04:09:56 +08:00
|
|
|
(void) for_each_remote(get_one_remote_for_fetch, &list);
|
|
|
|
} else if (argc == 0) {
|
|
|
|
/* No arguments -- use default remote */
|
|
|
|
remote = remote_get(NULL);
|
2009-11-10 04:10:32 +08:00
|
|
|
} else if (multiple) {
|
|
|
|
/* All arguments are assumed to be remotes or groups */
|
|
|
|
for (i = 0; i < argc; i++)
|
|
|
|
if (!add_remote_or_group(argv[i], &list))
|
2011-02-23 07:41:51 +08:00
|
|
|
die(_("No such remote or remote group: %s"), argv[i]);
|
2009-11-10 04:09:56 +08:00
|
|
|
} else {
|
|
|
|
/* Single remote or group */
|
|
|
|
(void) add_remote_or_group(argv[0], &list);
|
|
|
|
if (list.nr > 1) {
|
|
|
|
/* More than one remote */
|
|
|
|
if (argc > 1)
|
2011-02-23 07:41:51 +08:00
|
|
|
die(_("Fetching a group and specifying refspecs does not make sense"));
|
2009-11-10 04:09:56 +08:00
|
|
|
} else {
|
|
|
|
/* Zero or one remotes */
|
|
|
|
remote = remote_get(argv[0]);
|
2018-03-07 06:54:01 +08:00
|
|
|
prune_tags_ok = (argc == 1);
|
2017-12-08 23:58:43 +08:00
|
|
|
argc--;
|
|
|
|
argv++;
|
2009-11-10 04:09:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
fetch: teach independent negotiation (no packfile)
Currently, the packfile negotiation step within a Git fetch cannot be
done independent of sending the packfile, even though there is at least
one application wherein this is useful. Therefore, make it possible for
this negotiation step to be done independently. A subsequent commit will
use this for one such application - push negotiation.
This feature is for protocol v2 only. (An implementation for protocol v0
would require a separate implementation in the fetch, transport, and
transport helper code.)
In the protocol, the main hindrance towards independent negotiation is
that the server can unilaterally decide to send the packfile. This is
solved by a "wait-for-done" argument: the server will then wait for the
client to say "done". In practice, the client will never say it; instead
it will cease requests once it is satisfied.
In the client, the main change lies in the transport and transport
helper code. fetch_refs_via_pack() performs everything needed - protocol
version and capability checks, and the negotiation itself.
There are 2 code paths that do not go through fetch_refs_via_pack() that
needed to be individually excluded: the bundle transport (excluded
through requiring smart_options, which the bundle transport doesn't
support) and transport helpers that do not support takeover. If or when
we support independent negotiation for protocol v0, we will need to
modify these 2 code paths to support it. But for now, report failure if
independent negotiation is requested in these cases.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-05-05 05:16:01 +08:00
|
|
|
if (negotiate_only) {
|
|
|
|
struct oidset acked_commits = OIDSET_INIT;
|
|
|
|
struct oidset_iter iter;
|
|
|
|
const struct object_id *oid;
|
|
|
|
|
|
|
|
if (!remote)
|
|
|
|
die(_("must supply remote when using --negotiate-only"));
|
|
|
|
gtransport = prepare_transport(remote, 1);
|
|
|
|
if (gtransport->smart_options) {
|
|
|
|
gtransport->smart_options->acked_commits = &acked_commits;
|
|
|
|
} else {
|
|
|
|
warning(_("Protocol does not support --negotiate-only, exiting."));
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
if (server_options.nr)
|
|
|
|
gtransport->server_options = &server_options;
|
|
|
|
result = transport_fetch_refs(gtransport, NULL);
|
|
|
|
|
|
|
|
oidset_iter_init(&acked_commits, &iter);
|
|
|
|
while ((oid = oidset_iter_next(&iter)))
|
|
|
|
printf("%s\n", oid_to_hex(oid));
|
|
|
|
oidset_clear(&acked_commits);
|
|
|
|
} else if (remote) {
|
2019-06-25 21:40:31 +08:00
|
|
|
if (filter_options.choice || has_promisor_remote())
|
2017-12-08 23:58:50 +08:00
|
|
|
fetch_one_setup_partial(remote);
|
2020-08-18 12:01:32 +08:00
|
|
|
result = fetch_one(remote, argc, argv, prune_tags_ok, stdin_refspecs);
|
2017-12-08 23:58:44 +08:00
|
|
|
} else {
|
2019-10-06 02:46:40 +08:00
|
|
|
int max_children = max_jobs;
|
|
|
|
|
2017-12-08 23:58:44 +08:00
|
|
|
if (filter_options.choice)
|
2019-01-13 16:52:19 +08:00
|
|
|
die(_("--filter can only be used with the remote "
|
|
|
|
"configured in extensions.partialclone"));
|
2019-10-06 02:46:40 +08:00
|
|
|
|
fetch: implement support for atomic reference updates
When executing a fetch, then git will currently allocate one reference
transaction per reference update and directly commit it. This means that
fetches are non-atomic: even if some of the reference updates fail,
others may still succeed and modify local references.
This is fine in many scenarios, but this strategy has its downsides.
- The view of remote references may be inconsistent and may show a
bastardized state of the remote repository.
- Batching together updates may improve performance in certain
scenarios. While the impact probably isn't as pronounced with loose
references, the upcoming reftable backend may benefit as it needs to
write less files in case the update is batched.
- The reference-update hook is currently being executed twice per
updated reference. While this doesn't matter when there is no such
hook, we have seen severe performance regressions when doing a
git-fetch(1) with reference-transaction hook when the remote
repository has hundreds of thousands of references.
Similar to `git push --atomic`, this commit thus introduces atomic
fetches. Instead of allocating one reference transaction per updated
reference, it causes us to only allocate a single transaction and commit
it as soon as all updates were received. If locking of any reference
fails, then we abort the complete transaction and don't update any
reference, which gives us an all-or-nothing fetch.
Note that this may not completely fix the first of above downsides, as
the consistent view also depends on the server-side. If the server
doesn't have a consistent view of its own references during the
reference negotiation phase, then the client would get the same
inconsistent view the server has. This is a separate problem though and,
if it actually exists, can be fixed at a later point.
This commit also changes the way we write FETCH_HEAD in case `--atomic`
is passed. Instead of writing changes as we go, we need to accumulate
all changes first and only commit them at the end when we know that all
reference updates succeeded. Ideally, we'd just do so via a temporary
file so that we don't need to carry all updates in-memory. This isn't
trivially doable though considering the `--append` mode, where we do not
truncate the file but simply append to it. And given that we support
concurrent processes appending to FETCH_HEAD at the same time without
any loss of data, seeding the temporary file with current contents of
FETCH_HEAD initially and then doing a rename wouldn't work either. So
this commit implements the simple strategy of buffering all changes and
appending them to the file on commit.
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-12 20:27:52 +08:00
|
|
|
if (atomic_fetch)
|
|
|
|
die(_("--atomic can only be used when fetching "
|
|
|
|
"from one remote"));
|
|
|
|
|
2020-08-18 12:01:32 +08:00
|
|
|
if (stdin_refspecs)
|
|
|
|
die(_("--stdin can only be used when fetching "
|
|
|
|
"from one remote"));
|
|
|
|
|
2019-10-06 02:46:40 +08:00
|
|
|
if (max_children < 0)
|
|
|
|
max_children = fetch_parallel_config;
|
|
|
|
|
2017-12-08 23:58:50 +08:00
|
|
|
/* TODO should this also die if we have a previous partial-clone? */
|
2019-10-06 02:46:40 +08:00
|
|
|
result = fetch_multiple(&list, max_children);
|
2017-12-08 23:58:44 +08:00
|
|
|
}
|
2017-12-08 23:58:43 +08:00
|
|
|
|
2010-11-11 07:55:02 +08:00
|
|
|
if (!result && (recurse_submodules != RECURSE_SUBMODULES_OFF)) {
|
2020-07-29 04:24:27 +08:00
|
|
|
struct strvec options = STRVEC_INIT;
|
2019-10-06 02:46:40 +08:00
|
|
|
int max_children = max_jobs;
|
|
|
|
|
|
|
|
if (max_children < 0)
|
|
|
|
max_children = submodule_fetch_jobs_config;
|
|
|
|
if (max_children < 0)
|
|
|
|
max_children = fetch_parallel_config;
|
2012-09-01 19:27:35 +08:00
|
|
|
|
|
|
|
add_options_to_argv(&options);
|
2017-12-13 03:53:52 +08:00
|
|
|
result = fetch_populated_submodules(the_repository,
|
|
|
|
&options,
|
2010-11-12 20:54:52 +08:00
|
|
|
submodule_prefix,
|
2011-03-07 06:11:21 +08:00
|
|
|
recurse_submodules,
|
2017-08-03 03:49:19 +08:00
|
|
|
recurse_submodules_default,
|
2015-12-16 08:04:12 +08:00
|
|
|
verbosity < 0,
|
|
|
|
max_children);
|
2020-07-29 04:24:27 +08:00
|
|
|
strvec_clear(&options);
|
2010-11-12 20:54:52 +08:00
|
|
|
}
|
|
|
|
|
2009-11-10 04:09:56 +08:00
|
|
|
string_list_clear(&list, 0);
|
|
|
|
|
2019-09-03 10:22:02 +08:00
|
|
|
prepare_repo_settings(the_repository);
|
2019-11-03 08:21:56 +08:00
|
|
|
if (fetch_write_commit_graph > 0 ||
|
|
|
|
(fetch_write_commit_graph < 0 &&
|
|
|
|
the_repository->settings.fetch_write_commit_graph)) {
|
2019-09-30 12:19:32 +08:00
|
|
|
int commit_graph_flags = COMMIT_GRAPH_WRITE_SPLIT;
|
2019-09-03 10:22:02 +08:00
|
|
|
|
|
|
|
if (progress)
|
2019-09-30 12:19:32 +08:00
|
|
|
commit_graph_flags |= COMMIT_GRAPH_WRITE_PROGRESS;
|
2019-09-03 10:22:02 +08:00
|
|
|
|
2020-02-04 13:51:50 +08:00
|
|
|
write_commit_graph_reachable(the_repository->objects->odb,
|
2019-09-03 10:22:02 +08:00
|
|
|
commit_graph_flags,
|
commit-graph: prefer default size_mult when given zero
In 50f26bd ("fetch: add fetch.writeCommitGraph config setting",
2019-09-02), the fetch builtin added the capability to write a
commit-graph using the "--split" feature. This feature creates
multiple commit-graph files, and those can merge based on a set
of "split options" including a size multiple. The default size
multiple is 2, which intends to provide a log_2 N depth of the
commit-graph chain where N is the number of commits.
However, I noticed during dogfooding that my commit-graph chains
were becoming quite large when left only to builds by 'git fetch'.
It turns out that in split_graph_merge_strategy(), we default the
size_mult variable to 2 except we override it with the context's
split_opts if they exist. In builtin/fetch.c, we create such a
split_opts, but do not populate it with values.
This problem is due to two failures:
1. It is unclear that we can add the flag COMMIT_GRAPH_WRITE_SPLIT
with a NULL split_opts.
2. If we have a non-NULL split_opts, then we override the default
values even if a zero value is given.
Correct both of these issues. First, do not override size_mult when
the options provide a zero value. Second, stop creating a split_opts
in the fetch builtin.
Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-01-03 00:14:14 +08:00
|
|
|
NULL);
|
2019-09-03 10:22:02 +08:00
|
|
|
}
|
|
|
|
|
2019-05-18 02:41:49 +08:00
|
|
|
close_object_store(the_repository->objects);
|
2016-01-14 01:20:11 +08:00
|
|
|
|
auto-gc: extract a reusable helper from "git fetch"
Back in 1991006c (fetch: convert argv_gc_auto to struct argv_array,
2014-08-16), we taught "git fetch --quiet" to pass the "--quiet"
option down to "gc --auto". This issue, however, is not limited to
"fetch":
$ git grep -e 'gc.*--auto' \*.c
finds hits in "am", "commit", "merge", and "rebase" and these
commands do not pass "--quiet" down to "gc --auto" when they
themselves are told to be quiet.
As a preparatory step, let's introduce a helper function
run_auto_gc(), that the caller can pass a boolean "quiet",
and redo the fix to "git fetch" using the helper.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
Reviewed-by: Taylor Blau <me@ttaylorr.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-05-07 04:18:29 +08:00
|
|
|
if (enable_auto_gc)
|
2020-09-18 02:11:44 +08:00
|
|
|
run_auto_maintenance(verbosity < 0);
|
2013-01-27 06:40:38 +08:00
|
|
|
|
2009-11-10 04:09:56 +08:00
|
|
|
return result;
|
|
|
|
}
|