2005-04-19 04:04:43 +08:00
|
|
|
/*
|
|
|
|
* GIT - The information manager from hell
|
|
|
|
*
|
|
|
|
* Copyright (C) Linus Torvalds, 2005
|
|
|
|
*
|
|
|
|
* This handles basic git sha1 object files - packing, unpacking,
|
|
|
|
* creation etc.
|
|
|
|
*/
|
|
|
|
#include "cache.h"
|
2012-11-05 16:41:22 +08:00
|
|
|
#include "string-list.h"
|
2005-06-27 18:35:33 +08:00
|
|
|
#include "delta.h"
|
2005-06-29 05:21:02 +08:00
|
|
|
#include "pack.h"
|
2006-04-02 20:44:09 +08:00
|
|
|
#include "blob.h"
|
|
|
|
#include "commit.h"
|
2011-05-08 16:47:35 +08:00
|
|
|
#include "run-command.h"
|
2006-04-02 20:44:09 +08:00
|
|
|
#include "tag.h"
|
|
|
|
#include "tree.h"
|
2011-02-05 18:52:21 +08:00
|
|
|
#include "tree-walk.h"
|
2007-04-10 12:20:29 +08:00
|
|
|
#include "refs.h"
|
2008-02-28 13:25:19 +08:00
|
|
|
#include "pack-revindex.h"
|
sha1-lookup: more memory efficient search in sorted list of SHA-1
Currently, when looking for a packed object from the pack idx, a
simple binary search is used.
A conventional binary search loop looks like this:
unsigned lo, hi;
do {
unsigned mi = (lo + hi) / 2;
int cmp = "entry pointed at by mi" minus "target";
if (!cmp)
return mi; "mi is the wanted one"
if (cmp > 0)
hi = mi; "mi is larger than target"
else
lo = mi+1; "mi is smaller than target"
} while (lo < hi);
"did not find what we wanted"
The invariants are:
- When entering the loop, 'lo' points at a slot that is never
above the target (it could be at the target), 'hi' points at
a slot that is guaranteed to be above the target (it can
never be at the target).
- We find a point 'mi' between 'lo' and 'hi' ('mi' could be
the same as 'lo', but never can be as high as 'hi'), and
check if 'mi' hits the target. There are three cases:
- if it is a hit, we have found what we are looking for;
- if it is strictly higher than the target, we set it to
'hi', and repeat the search.
- if it is strictly lower than the target, we update 'lo'
to one slot after it, because we allow 'lo' to be at the
target and 'mi' is known to be below the target.
If the loop exits, there is no matching entry.
When choosing 'mi', we do not have to take the "middle" but
anywhere in between 'lo' and 'hi', as long as lo <= mi < hi is
satisfied. When we somehow know that the distance between the
target and 'lo' is much shorter than the target and 'hi', we
could pick 'mi' that is much closer to 'lo' than (hi+lo)/2,
which a conventional binary search would pick.
This patch takes advantage of the fact that the SHA-1 is a good
hash function, and as long as there are enough entries in the
table, we can expect uniform distribution. An entry that begins
with for example "deadbeef..." is much likely to appear much
later than in the midway of a reasonably populated table. In
fact, it can be expected to be near 87% (222/256) from the top
of the table.
This is a work-in-progress and has switches to allow easier
experiments and debugging. Exporting GIT_USE_LOOKUP environment
variable enables this code.
On my admittedly memory starved machine, with a partial KDE
repository (3.0G pack with 95M idx):
$ GIT_USE_LOOKUP=t git log -800 --stat HEAD >/dev/null
3.93user 0.16system 0:04.09elapsed 100%CPU (0avgtext+0avgdata 0maxresident)k
0inputs+0outputs (0major+55588minor)pagefaults 0swaps
Without the patch, the numbers are:
$ git log -800 --stat HEAD >/dev/null
4.00user 0.15system 0:04.17elapsed 99%CPU (0avgtext+0avgdata 0maxresident)k
0inputs+0outputs (0major+60258minor)pagefaults 0swaps
In the same repository:
$ GIT_USE_LOOKUP=t git log -2000 HEAD >/dev/null
0.12user 0.00system 0:00.12elapsed 97%CPU (0avgtext+0avgdata 0maxresident)k
0inputs+0outputs (0major+4241minor)pagefaults 0swaps
Without the patch, the numbers are:
$ git log -2000 HEAD >/dev/null
0.05user 0.01system 0:00.07elapsed 100%CPU (0avgtext+0avgdata 0maxresident)k
0inputs+0outputs (0major+8506minor)pagefaults 0swaps
There isn't much time difference, but the number of minor faults
seems to show that we are touching much smaller number of pages,
which is expected.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-12-29 18:05:47 +08:00
|
|
|
#include "sha1-lookup.h"
|
2011-10-29 05:48:40 +08:00
|
|
|
#include "bulk-checkin.h"
|
2012-03-07 18:54:18 +08:00
|
|
|
#include "streaming.h"
|
2013-02-15 20:07:10 +08:00
|
|
|
#include "dir.h"
|
2005-04-19 04:04:43 +08:00
|
|
|
|
2005-04-24 02:09:32 +08:00
|
|
|
#ifndef O_NOATIME
|
|
|
|
#if defined(__linux__) && (defined(__i386__) || defined(__PPC__))
|
|
|
|
#define O_NOATIME 01000000
|
|
|
|
#else
|
|
|
|
#define O_NOATIME 0
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2011-03-16 13:15:31 +08:00
|
|
|
#define SZ_FMT PRIuMAX
|
|
|
|
static inline uintmax_t sz_fmt(size_t s) { return s; }
|
2007-01-10 12:07:11 +08:00
|
|
|
|
2006-08-16 01:23:48 +08:00
|
|
|
const unsigned char null_sha1[20];
|
2005-10-01 05:02:47 +08:00
|
|
|
|
2011-02-05 22:03:01 +08:00
|
|
|
/*
|
|
|
|
* This is meant to hold a *small* number of objects that you would
|
|
|
|
* want read_sha1_file() to be able to return, but yet you do not want
|
|
|
|
* to write them into the object store (e.g. a browse-only
|
|
|
|
* application).
|
|
|
|
*/
|
|
|
|
static struct cached_object {
|
|
|
|
unsigned char sha1[20];
|
|
|
|
enum object_type type;
|
|
|
|
void *buf;
|
|
|
|
unsigned long size;
|
|
|
|
} *cached_objects;
|
|
|
|
static int cached_object_nr, cached_object_alloc;
|
|
|
|
|
|
|
|
static struct cached_object empty_tree = {
|
2011-02-07 16:17:27 +08:00
|
|
|
EMPTY_TREE_SHA1_BIN_LITERAL,
|
2011-02-05 22:03:01 +08:00
|
|
|
OBJ_TREE,
|
|
|
|
"",
|
|
|
|
0
|
|
|
|
};
|
|
|
|
|
2012-02-01 21:48:55 +08:00
|
|
|
static struct packed_git *last_found_pack;
|
|
|
|
|
2011-02-05 22:03:01 +08:00
|
|
|
static struct cached_object *find_cached_object(const unsigned char *sha1)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct cached_object *co = cached_objects;
|
|
|
|
|
|
|
|
for (i = 0; i < cached_object_nr; i++, co++) {
|
|
|
|
if (!hashcmp(co->sha1, sha1))
|
|
|
|
return co;
|
|
|
|
}
|
|
|
|
if (!hashcmp(sha1, empty_tree.sha1))
|
|
|
|
return &empty_tree;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2011-03-11 08:02:50 +08:00
|
|
|
int mkdir_in_gitdir(const char *path)
|
|
|
|
{
|
|
|
|
if (mkdir(path, 0777)) {
|
|
|
|
int saved_errno = errno;
|
|
|
|
struct stat st;
|
|
|
|
struct strbuf sb = STRBUF_INIT;
|
|
|
|
|
|
|
|
if (errno != EEXIST)
|
|
|
|
return -1;
|
|
|
|
/*
|
|
|
|
* Are we looking at a path in a symlinked worktree
|
|
|
|
* whose original repository does not yet have it?
|
|
|
|
* e.g. .git/rr-cache pointing at its original
|
|
|
|
* repository in which the user hasn't performed any
|
|
|
|
* conflict resolution yet?
|
|
|
|
*/
|
|
|
|
if (lstat(path, &st) || !S_ISLNK(st.st_mode) ||
|
|
|
|
strbuf_readlink(&sb, path, st.st_size) ||
|
|
|
|
!is_absolute_path(sb.buf) ||
|
|
|
|
mkdir(sb.buf, 0777)) {
|
|
|
|
strbuf_release(&sb);
|
|
|
|
errno = saved_errno;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
strbuf_release(&sb);
|
|
|
|
}
|
|
|
|
return adjust_shared_perm(path);
|
|
|
|
}
|
|
|
|
|
2005-07-06 16:11:52 +08:00
|
|
|
int safe_create_leading_directories(char *path)
|
|
|
|
{
|
2007-12-01 04:36:00 +08:00
|
|
|
char *pos = path + offset_1st_component(path);
|
2006-02-10 09:56:13 +08:00
|
|
|
struct stat st;
|
|
|
|
|
2005-07-06 16:11:52 +08:00
|
|
|
while (pos) {
|
|
|
|
pos = strchr(pos, '/');
|
|
|
|
if (!pos)
|
|
|
|
break;
|
2008-09-03 05:10:15 +08:00
|
|
|
while (*++pos == '/')
|
|
|
|
;
|
|
|
|
if (!*pos)
|
|
|
|
break;
|
|
|
|
*--pos = '\0';
|
2006-02-10 09:56:13 +08:00
|
|
|
if (!stat(path, &st)) {
|
|
|
|
/* path exists */
|
|
|
|
if (!S_ISDIR(st.st_mode)) {
|
2005-07-06 16:11:52 +08:00
|
|
|
*pos = '/';
|
2006-02-10 09:56:13 +08:00
|
|
|
return -3;
|
2005-07-06 16:11:52 +08:00
|
|
|
}
|
2005-12-23 06:13:56 +08:00
|
|
|
}
|
2006-02-10 09:56:13 +08:00
|
|
|
else if (mkdir(path, 0777)) {
|
2013-03-17 22:09:27 +08:00
|
|
|
if (errno == EEXIST &&
|
|
|
|
!stat(path, &st) && S_ISDIR(st.st_mode)) {
|
|
|
|
; /* somebody created it since we checked */
|
|
|
|
} else {
|
|
|
|
*pos = '/';
|
|
|
|
return -1;
|
|
|
|
}
|
2006-02-10 09:56:13 +08:00
|
|
|
}
|
2005-12-23 06:13:56 +08:00
|
|
|
else if (adjust_shared_perm(path)) {
|
|
|
|
*pos = '/';
|
|
|
|
return -2;
|
|
|
|
}
|
2005-07-06 16:11:52 +08:00
|
|
|
*pos++ = '/';
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2005-07-06 02:31:32 +08:00
|
|
|
|
2008-06-25 13:41:34 +08:00
|
|
|
int safe_create_leading_directories_const(const char *path)
|
|
|
|
{
|
|
|
|
/* path points to cache entries, so xstrdup before messing with it */
|
|
|
|
char *buf = xstrdup(path);
|
|
|
|
int result = safe_create_leading_directories(buf);
|
|
|
|
free(buf);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2005-05-07 15:38:04 +08:00
|
|
|
static void fill_sha1_path(char *pathbuf, const unsigned char *sha1)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < 20; i++) {
|
|
|
|
static char hex[] = "0123456789abcdef";
|
|
|
|
unsigned int val = sha1[i];
|
|
|
|
char *pos = pathbuf + i*2 + (i > 0);
|
|
|
|
*pos++ = hex[val >> 4];
|
|
|
|
*pos = hex[val & 0xf];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-19 04:04:43 +08:00
|
|
|
/*
|
|
|
|
* NOTE! This returns a statically allocated buffer, so you have to be
|
2008-01-03 22:18:07 +08:00
|
|
|
* careful about using it. Do an "xstrdup()" if you need to save the
|
2005-04-19 04:04:43 +08:00
|
|
|
* filename.
|
2005-05-07 15:38:04 +08:00
|
|
|
*
|
|
|
|
* Also note that this returns the location for creating. Reading
|
|
|
|
* SHA1 file can happen from any alternate directory listed in the
|
2005-05-10 08:57:56 +08:00
|
|
|
* DB_ENVIRONMENT environment variable if it is not found in
|
2005-05-07 15:38:04 +08:00
|
|
|
* the primary object database.
|
2005-04-19 04:04:43 +08:00
|
|
|
*/
|
|
|
|
char *sha1_file_name(const unsigned char *sha1)
|
|
|
|
{
|
remove over-eager caching in sha1_file_name
This function takes a sha1 and produces a loose object
filename. It caches the location of the object directory so
that it can fill the sha1 information directly without
allocating a new buffer (and in its original incarnation,
without calling getenv(), though these days we cache that
with the code in environment.c).
This cached base directory can become stale, however, if in
a single process git changes the location of the object
directory (e.g., by running setup_work_tree, which will
chdir to the new worktree).
In most cases this isn't a problem, because we tend to set
up the git repository location and do any chdir()s before
actually looking up any objects, so the first lookup will
cache the correct location. In the case of reset --hard,
however, we do something like:
1. look up the commit object
2. notice we are doing --hard, run setup_work_tree
3. look up the tree object to reset
Step (3) fails because our cache object directory value is
bogus.
This patch simply removes the caching. We use a static
buffer instead of allocating one each time (the original
version treated the malloc'd buffer as a static, so there is
no change in calling semantics).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-05-22 14:59:42 +08:00
|
|
|
static char buf[PATH_MAX];
|
|
|
|
const char *objdir;
|
|
|
|
int len;
|
2005-04-19 04:04:43 +08:00
|
|
|
|
remove over-eager caching in sha1_file_name
This function takes a sha1 and produces a loose object
filename. It caches the location of the object directory so
that it can fill the sha1 information directly without
allocating a new buffer (and in its original incarnation,
without calling getenv(), though these days we cache that
with the code in environment.c).
This cached base directory can become stale, however, if in
a single process git changes the location of the object
directory (e.g., by running setup_work_tree, which will
chdir to the new worktree).
In most cases this isn't a problem, because we tend to set
up the git repository location and do any chdir()s before
actually looking up any objects, so the first lookup will
cache the correct location. In the case of reset --hard,
however, we do something like:
1. look up the commit object
2. notice we are doing --hard, run setup_work_tree
3. look up the tree object to reset
Step (3) fails because our cache object directory value is
bogus.
This patch simply removes the caching. We use a static
buffer instead of allocating one each time (the original
version treated the malloc'd buffer as a static, so there is
no change in calling semantics).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-05-22 14:59:42 +08:00
|
|
|
objdir = get_object_directory();
|
|
|
|
len = strlen(objdir);
|
|
|
|
|
|
|
|
/* '/' + sha1(2) + '/' + sha1(38) + '\0' */
|
|
|
|
if (len + 43 > PATH_MAX)
|
|
|
|
die("insanely long object directory %s", objdir);
|
|
|
|
memcpy(buf, objdir, len);
|
|
|
|
buf[len] = '/';
|
|
|
|
buf[len+3] = '/';
|
|
|
|
buf[len+42] = '\0';
|
|
|
|
fill_sha1_path(buf + len + 1, sha1);
|
|
|
|
return buf;
|
2005-04-19 04:04:43 +08:00
|
|
|
}
|
|
|
|
|
2008-05-24 06:43:55 +08:00
|
|
|
static char *sha1_get_pack_name(const unsigned char *sha1,
|
2008-05-29 00:47:43 +08:00
|
|
|
char **name, char **base, const char *which)
|
2005-08-01 08:53:44 +08:00
|
|
|
{
|
|
|
|
static const char hex[] = "0123456789abcdef";
|
2008-05-24 06:43:55 +08:00
|
|
|
char *buf;
|
2005-08-01 08:53:44 +08:00
|
|
|
int i;
|
|
|
|
|
2008-05-24 06:43:55 +08:00
|
|
|
if (!*base) {
|
2005-08-01 08:53:44 +08:00
|
|
|
const char *sha1_file_directory = get_object_directory();
|
|
|
|
int len = strlen(sha1_file_directory);
|
2008-05-24 06:43:55 +08:00
|
|
|
*base = xmalloc(len + 60);
|
2008-05-29 00:47:43 +08:00
|
|
|
sprintf(*base, "%s/pack/pack-1234567890123456789012345678901234567890.%s",
|
|
|
|
sha1_file_directory, which);
|
2008-05-24 06:43:55 +08:00
|
|
|
*name = *base + len + 11;
|
2005-08-01 08:53:44 +08:00
|
|
|
}
|
|
|
|
|
2008-05-24 06:43:55 +08:00
|
|
|
buf = *name;
|
2005-08-01 08:53:44 +08:00
|
|
|
|
|
|
|
for (i = 0; i < 20; i++) {
|
|
|
|
unsigned int val = *sha1++;
|
|
|
|
*buf++ = hex[val >> 4];
|
|
|
|
*buf++ = hex[val & 0xf];
|
|
|
|
}
|
2007-06-07 15:04:01 +08:00
|
|
|
|
2008-05-24 06:43:55 +08:00
|
|
|
return *base;
|
2005-08-01 08:53:44 +08:00
|
|
|
}
|
|
|
|
|
2008-05-24 06:43:55 +08:00
|
|
|
char *sha1_pack_name(const unsigned char *sha1)
|
2005-08-01 08:53:44 +08:00
|
|
|
{
|
2008-05-24 06:43:55 +08:00
|
|
|
static char *name, *base;
|
2005-08-01 08:53:44 +08:00
|
|
|
|
2008-05-29 00:47:43 +08:00
|
|
|
return sha1_get_pack_name(sha1, &name, &base, "pack");
|
2008-05-24 06:43:55 +08:00
|
|
|
}
|
2005-08-01 08:53:44 +08:00
|
|
|
|
2008-05-24 06:43:55 +08:00
|
|
|
char *sha1_pack_index_name(const unsigned char *sha1)
|
|
|
|
{
|
|
|
|
static char *name, *base;
|
2007-06-07 15:04:01 +08:00
|
|
|
|
2008-05-29 00:47:43 +08:00
|
|
|
return sha1_get_pack_name(sha1, &name, &base, "idx");
|
2005-08-01 08:53:44 +08:00
|
|
|
}
|
|
|
|
|
2005-08-15 08:25:57 +08:00
|
|
|
struct alternate_object_database *alt_odb_list;
|
|
|
|
static struct alternate_object_database **alt_odb_tail;
|
2005-05-07 15:38:04 +08:00
|
|
|
|
2011-05-16 03:16:29 +08:00
|
|
|
static int git_open_noatime(const char *name);
|
2006-05-08 02:19:21 +08:00
|
|
|
|
2005-05-09 04:51:13 +08:00
|
|
|
/*
|
|
|
|
* Prepare alternate object database registry.
|
2005-08-15 08:25:57 +08:00
|
|
|
*
|
|
|
|
* The variable alt_odb_list points at the list of struct
|
|
|
|
* alternate_object_database. The elements on this list come from
|
|
|
|
* non-empty elements from colon separated ALTERNATE_DB_ENVIRONMENT
|
|
|
|
* environment variable, and $GIT_OBJECT_DIRECTORY/info/alternates,
|
2005-12-05 14:48:43 +08:00
|
|
|
* whose contents is similar to that environment variable but can be
|
|
|
|
* LF separated. Its base points at a statically allocated buffer that
|
2005-08-15 08:25:57 +08:00
|
|
|
* contains "/the/directory/corresponding/to/.git/objects/...", while
|
|
|
|
* its name points just after the slash at the end of ".git/objects/"
|
|
|
|
* in the example above, and has enough space to hold 40-byte hex
|
|
|
|
* SHA1, an extra slash for the first level indirection, and the
|
|
|
|
* terminating NUL.
|
2005-05-09 04:51:13 +08:00
|
|
|
*/
|
2012-11-05 16:41:22 +08:00
|
|
|
static int link_alt_odb_entry(const char *entry, const char *relative_base, int depth)
|
2005-05-07 15:38:04 +08:00
|
|
|
{
|
2005-12-05 14:48:43 +08:00
|
|
|
const char *objdir = get_object_directory();
|
2006-05-08 02:19:21 +08:00
|
|
|
struct alternate_object_database *ent;
|
|
|
|
struct alternate_object_database *alt;
|
2011-09-07 18:37:47 +08:00
|
|
|
int pfxlen, entlen;
|
|
|
|
struct strbuf pathbuf = STRBUF_INIT;
|
2005-08-15 08:25:57 +08:00
|
|
|
|
2007-11-14 04:05:00 +08:00
|
|
|
if (!is_absolute_path(entry) && relative_base) {
|
2011-09-07 18:37:47 +08:00
|
|
|
strbuf_addstr(&pathbuf, real_path(relative_base));
|
|
|
|
strbuf_addch(&pathbuf, '/');
|
2006-05-08 02:19:21 +08:00
|
|
|
}
|
2012-11-05 16:41:22 +08:00
|
|
|
strbuf_addstr(&pathbuf, entry);
|
2006-05-08 02:19:21 +08:00
|
|
|
|
2011-09-07 18:37:47 +08:00
|
|
|
normalize_path_copy(pathbuf.buf, pathbuf.buf);
|
|
|
|
|
|
|
|
pfxlen = strlen(pathbuf.buf);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The trailing slash after the directory name is given by
|
|
|
|
* this function at the end. Remove duplicates.
|
|
|
|
*/
|
|
|
|
while (pfxlen && pathbuf.buf[pfxlen-1] == '/')
|
|
|
|
pfxlen -= 1;
|
|
|
|
|
|
|
|
entlen = pfxlen + 43; /* '/' + 2 hex + '/' + 38 hex + NUL */
|
|
|
|
ent = xmalloc(sizeof(*ent) + entlen);
|
|
|
|
memcpy(ent->base, pathbuf.buf, pfxlen);
|
|
|
|
strbuf_release(&pathbuf);
|
2006-05-08 02:19:21 +08:00
|
|
|
|
|
|
|
ent->name = ent->base + pfxlen + 1;
|
|
|
|
ent->base[pfxlen + 3] = '/';
|
|
|
|
ent->base[pfxlen] = ent->base[entlen-1] = 0;
|
|
|
|
|
|
|
|
/* Detect cases where alternate disappeared */
|
2008-09-09 16:27:07 +08:00
|
|
|
if (!is_directory(ent->base)) {
|
2006-05-08 02:19:21 +08:00
|
|
|
error("object directory %s does not exist; "
|
|
|
|
"check .git/objects/info/alternates.",
|
|
|
|
ent->base);
|
|
|
|
free(ent);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Prevent the common mistake of listing the same
|
|
|
|
* thing twice, or object directory itself.
|
|
|
|
*/
|
|
|
|
for (alt = alt_odb_list; alt; alt = alt->next) {
|
|
|
|
if (!memcmp(ent->base, alt->base, pfxlen)) {
|
|
|
|
free(ent);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
2012-07-28 23:46:36 +08:00
|
|
|
if (!strcmp(ent->base, objdir)) {
|
2006-05-08 02:19:21 +08:00
|
|
|
free(ent);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* add the alternate entry */
|
|
|
|
*alt_odb_tail = ent;
|
|
|
|
alt_odb_tail = &(ent->next);
|
|
|
|
ent->next = NULL;
|
|
|
|
|
|
|
|
/* recursively add alternates */
|
|
|
|
read_info_alternates(ent->base, depth + 1);
|
|
|
|
|
|
|
|
ent->base[pfxlen] = '/';
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-11-05 16:41:23 +08:00
|
|
|
static void link_alt_odb_entries(const char *alt, int len, int sep,
|
2006-05-08 02:19:21 +08:00
|
|
|
const char *relative_base, int depth)
|
|
|
|
{
|
2012-11-05 16:41:22 +08:00
|
|
|
struct string_list entries = STRING_LIST_INIT_NODUP;
|
|
|
|
char *alt_copy;
|
|
|
|
int i;
|
2006-05-08 02:19:21 +08:00
|
|
|
|
|
|
|
if (depth > 5) {
|
|
|
|
error("%s: ignoring alternate object stores, nesting too deep.",
|
|
|
|
relative_base);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-11-05 16:41:23 +08:00
|
|
|
alt_copy = xmemdupz(alt, len);
|
2012-11-05 16:41:22 +08:00
|
|
|
string_list_split_in_place(&entries, alt_copy, sep, -1);
|
|
|
|
for (i = 0; i < entries.nr; i++) {
|
|
|
|
const char *entry = entries.items[i].string;
|
|
|
|
if (entry[0] == '\0' || entry[0] == '#')
|
2005-08-17 09:22:05 +08:00
|
|
|
continue;
|
2012-11-05 16:41:22 +08:00
|
|
|
if (!is_absolute_path(entry) && depth) {
|
|
|
|
error("%s: ignoring relative alternate object store %s",
|
|
|
|
relative_base, entry);
|
|
|
|
} else {
|
|
|
|
link_alt_odb_entry(entry, relative_base, depth);
|
2005-08-17 09:22:05 +08:00
|
|
|
}
|
|
|
|
}
|
2012-11-05 16:41:22 +08:00
|
|
|
string_list_clear(&entries, 0);
|
|
|
|
free(alt_copy);
|
2005-08-15 08:25:57 +08:00
|
|
|
}
|
|
|
|
|
2012-05-15 00:24:45 +08:00
|
|
|
void read_info_alternates(const char * relative_base, int depth)
|
2005-08-15 08:25:57 +08:00
|
|
|
{
|
2005-08-17 09:22:05 +08:00
|
|
|
char *map;
|
2007-03-07 09:44:37 +08:00
|
|
|
size_t mapsz;
|
2005-08-15 08:25:57 +08:00
|
|
|
struct stat st;
|
2007-07-03 18:40:20 +08:00
|
|
|
const char alt_file_name[] = "info/alternates";
|
|
|
|
/* Given that relative_base is no longer than PATH_MAX,
|
|
|
|
ensure that "path" has enough space to append "/", the
|
|
|
|
file name, "info/alternates", and a trailing NUL. */
|
|
|
|
char path[PATH_MAX + 1 + sizeof alt_file_name];
|
2006-05-08 02:19:21 +08:00
|
|
|
int fd;
|
2005-08-15 08:25:57 +08:00
|
|
|
|
2007-07-03 18:40:20 +08:00
|
|
|
sprintf(path, "%s/%s", relative_base, alt_file_name);
|
2011-05-16 03:16:29 +08:00
|
|
|
fd = git_open_noatime(path);
|
2005-08-15 08:25:57 +08:00
|
|
|
if (fd < 0)
|
|
|
|
return;
|
|
|
|
if (fstat(fd, &st) || (st.st_size == 0)) {
|
|
|
|
close(fd);
|
2005-06-29 05:56:57 +08:00
|
|
|
return;
|
2005-05-07 15:38:04 +08:00
|
|
|
}
|
2007-03-07 09:44:37 +08:00
|
|
|
mapsz = xsize_t(st.st_size);
|
|
|
|
map = xmmap(NULL, mapsz, PROT_READ, MAP_PRIVATE, fd, 0);
|
2005-08-15 08:25:57 +08:00
|
|
|
close(fd);
|
|
|
|
|
2012-11-05 16:41:23 +08:00
|
|
|
link_alt_odb_entries(map, mapsz, '\n', relative_base, depth);
|
2006-05-08 02:19:21 +08:00
|
|
|
|
2007-03-07 09:44:37 +08:00
|
|
|
munmap(map, mapsz);
|
2005-05-07 15:38:04 +08:00
|
|
|
}
|
|
|
|
|
2008-04-18 07:32:30 +08:00
|
|
|
void add_to_alternates_file(const char *reference)
|
|
|
|
{
|
|
|
|
struct lock_file *lock = xcalloc(1, sizeof(struct lock_file));
|
2008-10-18 06:44:39 +08:00
|
|
|
int fd = hold_lock_file_for_append(lock, git_path("objects/info/alternates"), LOCK_DIE_ON_ERROR);
|
2011-08-23 09:05:16 +08:00
|
|
|
char *alt = mkpath("%s\n", reference);
|
2008-04-18 07:32:30 +08:00
|
|
|
write_or_die(fd, alt, strlen(alt));
|
|
|
|
if (commit_lock_file(lock))
|
|
|
|
die("could not close alternates file");
|
|
|
|
if (alt_odb_tail)
|
2012-11-05 16:41:23 +08:00
|
|
|
link_alt_odb_entries(alt, strlen(alt), '\n', NULL, 0);
|
2008-04-18 07:32:30 +08:00
|
|
|
}
|
|
|
|
|
push: receiver end advertises refs from alternate repositories
Earlier, when pushing into a repository that borrows from alternate object
stores, we followed the longstanding design decision not to trust refs in
the alternate repository that houses the object store we are borrowing
from. If your public repository is borrowing from Linus's public
repository, you pushed into it long time ago, and now when you try to push
your updated history that is in sync with more recent history from Linus,
you will end up sending not just your own development, but also the
changes you acquired through Linus's tree, even though the objects needed
for the latter already exists at the receiving end. This is because the
receiving end does not advertise that the objects only reachable from the
borrowed repository (i.e. Linus's) are already available there.
This solves the issue by making the receiving end advertise refs from
borrowed repositories. They are not sent with their true names but with a
phoney name ".have" to make sure that the old senders will safely ignore
them (otherwise, the old senders will misbehave, trying to push matching
refs, and mirror push that deletes refs that only exist at the receiving
end).
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2008-09-09 16:27:10 +08:00
|
|
|
void foreach_alt_odb(alt_odb_fn fn, void *cb)
|
|
|
|
{
|
|
|
|
struct alternate_object_database *ent;
|
|
|
|
|
|
|
|
prepare_alt_odb();
|
|
|
|
for (ent = alt_odb_list; ent; ent = ent->next)
|
|
|
|
if (fn(ent, cb))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2006-05-08 02:19:21 +08:00
|
|
|
void prepare_alt_odb(void)
|
|
|
|
{
|
2006-06-28 17:04:39 +08:00
|
|
|
const char *alt;
|
2006-05-08 02:19:21 +08:00
|
|
|
|
2007-05-26 13:24:40 +08:00
|
|
|
if (alt_odb_tail)
|
|
|
|
return;
|
|
|
|
|
2006-05-08 02:19:21 +08:00
|
|
|
alt = getenv(ALTERNATE_DB_ENVIRONMENT);
|
|
|
|
if (!alt) alt = "";
|
|
|
|
|
|
|
|
alt_odb_tail = &alt_odb_list;
|
2012-11-05 16:41:23 +08:00
|
|
|
link_alt_odb_entries(alt, strlen(alt), PATH_SEP, NULL, 0);
|
2006-05-08 02:19:21 +08:00
|
|
|
|
|
|
|
read_info_alternates(get_object_directory(), 0);
|
|
|
|
}
|
|
|
|
|
2008-11-10 13:59:57 +08:00
|
|
|
static int has_loose_object_local(const unsigned char *sha1)
|
2005-05-07 15:38:04 +08:00
|
|
|
{
|
|
|
|
char *name = sha1_file_name(sha1);
|
2008-11-10 13:59:57 +08:00
|
|
|
return !access(name, F_OK);
|
|
|
|
}
|
2005-05-07 15:38:04 +08:00
|
|
|
|
2008-11-10 13:59:57 +08:00
|
|
|
int has_loose_object_nonlocal(const unsigned char *sha1)
|
|
|
|
{
|
|
|
|
struct alternate_object_database *alt;
|
2005-06-29 05:56:57 +08:00
|
|
|
prepare_alt_odb();
|
2005-08-15 08:25:57 +08:00
|
|
|
for (alt = alt_odb_list; alt; alt = alt->next) {
|
2008-11-10 13:59:57 +08:00
|
|
|
fill_sha1_path(alt->name, sha1);
|
2008-06-15 02:43:01 +08:00
|
|
|
if (!access(alt->base, F_OK))
|
|
|
|
return 1;
|
2005-05-07 15:38:04 +08:00
|
|
|
}
|
2008-06-15 02:43:01 +08:00
|
|
|
return 0;
|
2005-05-07 15:38:04 +08:00
|
|
|
}
|
|
|
|
|
2008-11-10 13:59:57 +08:00
|
|
|
static int has_loose_object(const unsigned char *sha1)
|
|
|
|
{
|
|
|
|
return has_loose_object_local(sha1) ||
|
|
|
|
has_loose_object_nonlocal(sha1);
|
|
|
|
}
|
|
|
|
|
2006-12-23 15:34:28 +08:00
|
|
|
static unsigned int pack_used_ctr;
|
2006-12-23 15:34:47 +08:00
|
|
|
static unsigned int pack_mmap_calls;
|
|
|
|
static unsigned int peak_pack_open_windows;
|
|
|
|
static unsigned int pack_open_windows;
|
2011-03-01 04:52:39 +08:00
|
|
|
static unsigned int pack_open_fds;
|
|
|
|
static unsigned int pack_max_fds;
|
2006-12-23 15:34:47 +08:00
|
|
|
static size_t peak_pack_mapped;
|
2006-12-23 15:34:28 +08:00
|
|
|
static size_t pack_mapped;
|
2005-06-29 05:56:57 +08:00
|
|
|
struct packed_git *packed_git;
|
2005-06-27 18:35:33 +08:00
|
|
|
|
2007-06-13 16:22:51 +08:00
|
|
|
void pack_report(void)
|
2006-12-23 15:34:47 +08:00
|
|
|
{
|
|
|
|
fprintf(stderr,
|
2007-01-10 12:07:11 +08:00
|
|
|
"pack_report: getpagesize() = %10" SZ_FMT "\n"
|
|
|
|
"pack_report: core.packedGitWindowSize = %10" SZ_FMT "\n"
|
|
|
|
"pack_report: core.packedGitLimit = %10" SZ_FMT "\n",
|
2007-11-22 04:27:19 +08:00
|
|
|
sz_fmt(getpagesize()),
|
|
|
|
sz_fmt(packed_git_window_size),
|
|
|
|
sz_fmt(packed_git_limit));
|
2006-12-23 15:34:47 +08:00
|
|
|
fprintf(stderr,
|
|
|
|
"pack_report: pack_used_ctr = %10u\n"
|
|
|
|
"pack_report: pack_mmap_calls = %10u\n"
|
|
|
|
"pack_report: pack_open_windows = %10u / %10u\n"
|
2007-01-10 12:07:11 +08:00
|
|
|
"pack_report: pack_mapped = "
|
|
|
|
"%10" SZ_FMT " / %10" SZ_FMT "\n",
|
2006-12-23 15:34:47 +08:00
|
|
|
pack_used_ctr,
|
|
|
|
pack_mmap_calls,
|
|
|
|
pack_open_windows, peak_pack_open_windows,
|
2007-11-22 04:27:19 +08:00
|
|
|
sz_fmt(pack_mapped), sz_fmt(peak_pack_mapped));
|
2006-12-23 15:34:47 +08:00
|
|
|
}
|
|
|
|
|
2007-03-17 04:42:50 +08:00
|
|
|
static int check_packed_git_idx(const char *path, struct packed_git *p)
|
2005-06-27 18:35:33 +08:00
|
|
|
{
|
|
|
|
void *idx_map;
|
2007-03-17 04:42:50 +08:00
|
|
|
struct pack_idx_header *hdr;
|
2007-03-07 09:44:37 +08:00
|
|
|
size_t idx_size;
|
2007-04-09 13:06:35 +08:00
|
|
|
uint32_t version, nr, i, *index;
|
2011-05-16 03:16:29 +08:00
|
|
|
int fd = git_open_noatime(path);
|
2005-06-27 18:35:33 +08:00
|
|
|
struct stat st;
|
2007-03-17 04:42:50 +08:00
|
|
|
|
2005-06-27 18:35:33 +08:00
|
|
|
if (fd < 0)
|
|
|
|
return -1;
|
|
|
|
if (fstat(fd, &st)) {
|
|
|
|
close(fd);
|
|
|
|
return -1;
|
|
|
|
}
|
2007-03-07 09:44:37 +08:00
|
|
|
idx_size = xsize_t(st.st_size);
|
2007-03-07 09:44:11 +08:00
|
|
|
if (idx_size < 4 * 256 + 20 + 20) {
|
|
|
|
close(fd);
|
|
|
|
return error("index file %s is too small", path);
|
|
|
|
}
|
2006-12-24 13:47:23 +08:00
|
|
|
idx_map = xmmap(NULL, idx_size, PROT_READ, MAP_PRIVATE, fd, 0);
|
2005-06-27 18:35:33 +08:00
|
|
|
close(fd);
|
|
|
|
|
2007-03-17 04:42:50 +08:00
|
|
|
hdr = idx_map;
|
|
|
|
if (hdr->idx_signature == htonl(PACK_IDX_SIGNATURE)) {
|
2007-04-09 13:06:35 +08:00
|
|
|
version = ntohl(hdr->idx_version);
|
|
|
|
if (version < 2 || version > 2) {
|
|
|
|
munmap(idx_map, idx_size);
|
2008-07-03 23:52:09 +08:00
|
|
|
return error("index file %s is version %"PRIu32
|
2007-04-09 13:06:35 +08:00
|
|
|
" and is not supported by this binary"
|
|
|
|
" (try upgrading GIT to a newer version)",
|
|
|
|
path, version);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
version = 1;
|
2007-01-18 09:43:57 +08:00
|
|
|
|
2005-06-27 18:35:33 +08:00
|
|
|
nr = 0;
|
2007-03-17 04:42:50 +08:00
|
|
|
index = idx_map;
|
2007-04-09 13:06:35 +08:00
|
|
|
if (version > 1)
|
|
|
|
index += 2; /* skip index header */
|
2005-06-27 18:35:33 +08:00
|
|
|
for (i = 0; i < 256; i++) {
|
2007-03-07 09:44:19 +08:00
|
|
|
uint32_t n = ntohl(index[i]);
|
2007-03-07 09:44:11 +08:00
|
|
|
if (n < nr) {
|
|
|
|
munmap(idx_map, idx_size);
|
2007-01-18 09:43:57 +08:00
|
|
|
return error("non-monotonic index %s", path);
|
2007-03-07 09:44:11 +08:00
|
|
|
}
|
2005-06-27 18:35:33 +08:00
|
|
|
nr = n;
|
|
|
|
}
|
|
|
|
|
2007-04-09 13:06:35 +08:00
|
|
|
if (version == 1) {
|
|
|
|
/*
|
|
|
|
* Total size:
|
|
|
|
* - 256 index entries 4 bytes each
|
|
|
|
* - 24-byte entries * nr (20-byte sha1 + 4-byte offset)
|
|
|
|
* - 20-byte SHA1 of the packfile
|
|
|
|
* - 20-byte SHA1 file checksum
|
|
|
|
*/
|
|
|
|
if (idx_size != 4*256 + nr * 24 + 20 + 20) {
|
|
|
|
munmap(idx_map, idx_size);
|
2007-08-15 03:42:37 +08:00
|
|
|
return error("wrong index v1 file size in %s", path);
|
2007-04-09 13:06:35 +08:00
|
|
|
}
|
|
|
|
} else if (version == 2) {
|
|
|
|
/*
|
|
|
|
* Minimum size:
|
|
|
|
* - 8 bytes of header
|
|
|
|
* - 256 index entries 4 bytes each
|
|
|
|
* - 20-byte sha1 entry * nr
|
|
|
|
* - 4-byte crc entry * nr
|
|
|
|
* - 4-byte offset entry * nr
|
|
|
|
* - 20-byte SHA1 of the packfile
|
|
|
|
* - 20-byte SHA1 file checksum
|
|
|
|
* And after the 4-byte offset table might be a
|
|
|
|
* variable sized table containing 8-byte entries
|
|
|
|
* for offsets larger than 2^31.
|
|
|
|
*/
|
|
|
|
unsigned long min_size = 8 + 4*256 + nr*(20 + 4 + 4) + 20 + 20;
|
2007-06-27 05:34:02 +08:00
|
|
|
unsigned long max_size = min_size;
|
|
|
|
if (nr)
|
|
|
|
max_size += (nr - 1)*8;
|
|
|
|
if (idx_size < min_size || idx_size > max_size) {
|
2007-04-09 13:06:35 +08:00
|
|
|
munmap(idx_map, idx_size);
|
2007-08-15 03:42:37 +08:00
|
|
|
return error("wrong index v2 file size in %s", path);
|
2007-04-09 13:06:35 +08:00
|
|
|
}
|
2007-10-30 02:53:55 +08:00
|
|
|
if (idx_size != min_size &&
|
|
|
|
/*
|
|
|
|
* make sure we can deal with large pack offsets.
|
|
|
|
* 31-bit signed offset won't be enough, neither
|
|
|
|
* 32-bit unsigned one will be.
|
|
|
|
*/
|
|
|
|
(sizeof(off_t) <= 4)) {
|
|
|
|
munmap(idx_map, idx_size);
|
|
|
|
return error("pack too large for current definition of off_t in %s", path);
|
2007-04-09 13:06:35 +08:00
|
|
|
}
|
2007-03-07 09:44:11 +08:00
|
|
|
}
|
2005-06-27 18:35:33 +08:00
|
|
|
|
2007-04-09 13:06:35 +08:00
|
|
|
p->index_version = version;
|
2007-03-17 04:42:50 +08:00
|
|
|
p->index_data = idx_map;
|
|
|
|
p->index_size = idx_size;
|
2007-04-09 13:06:28 +08:00
|
|
|
p->num_objects = nr;
|
2005-06-27 18:35:33 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-05-30 14:13:42 +08:00
|
|
|
int open_pack_index(struct packed_git *p)
|
2007-05-26 13:24:19 +08:00
|
|
|
{
|
|
|
|
char *idx_name;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (p->index_data)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
idx_name = xstrdup(p->pack_name);
|
|
|
|
strcpy(idx_name + strlen(idx_name) - strlen(".pack"), ".idx");
|
|
|
|
ret = check_packed_git_idx(idx_name, p);
|
|
|
|
free(idx_name);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2006-12-23 15:34:44 +08:00
|
|
|
static void scan_windows(struct packed_git *p,
|
|
|
|
struct packed_git **lru_p,
|
|
|
|
struct pack_window **lru_w,
|
|
|
|
struct pack_window **lru_l)
|
2005-06-27 18:35:33 +08:00
|
|
|
{
|
2006-12-23 15:34:44 +08:00
|
|
|
struct pack_window *w, *w_l;
|
|
|
|
|
|
|
|
for (w_l = NULL, w = p->windows; w; w = w->next) {
|
|
|
|
if (!w->inuse_cnt) {
|
|
|
|
if (!*lru_w || w->last_used < (*lru_w)->last_used) {
|
|
|
|
*lru_p = p;
|
|
|
|
*lru_w = w;
|
|
|
|
*lru_l = w_l;
|
2006-12-23 15:34:23 +08:00
|
|
|
}
|
|
|
|
}
|
2006-12-23 15:34:44 +08:00
|
|
|
w_l = w;
|
2005-06-29 17:51:27 +08:00
|
|
|
}
|
2006-12-23 15:34:44 +08:00
|
|
|
}
|
|
|
|
|
Actually handle some-low memory conditions
Tim Ansell discovered his Debian server didn't permit git-daemon to
use as much memory as it needed to handle cloning a project with
a 128 MiB packfile. Filtering the strace provided by Tim of the
rev-list child showed this gem of a sequence:
open("./objects/pack/pack-*.pack", O_RDONLY|O_LARGEFILE <unfinished ...>
<... open resumed> ) = 5
OK, so the packfile is fd 5...
mmap2(NULL, 33554432, PROT_READ, MAP_PRIVATE, 5, 0 <unfinished ...>
<... mmap2 resumed> ) = 0xb5e2d000
and we mapped one 32 MiB window from it at position 0...
mmap2(NULL, 31020635, PROT_READ, MAP_PRIVATE, 5, 0x6000 <unfinished ...>
<... mmap2 resumed> ) = -1 ENOMEM (Cannot allocate memory)
And we asked for another window further into the file. But got
denied. In Tim's case this was due to a resource limit on the
git-daemon process, and its children.
Now where are we in the code? We're down inside use_pack(),
after we have called unuse_one_window() enough times to make sure
we stay within our allowed maximum window size. However since we
didn't unmap the prior window at 0xb5e2d000 we aren't exceeding
the current limit (which probably was just the defaults).
But we're actually down inside xmmap()...
So we release the window we do have (by calling release_pack_memory),
assuming there is some memory pressure...
munmap(0xb5e2d000, 33554432 <unfinished ...>
<... munmap resumed> ) = 0
close(5 <unfinished ...>
<... close resumed> ) = 0
And that was the last window in this packfile. So we closed it.
Way to go us. Our xmmap did not expect release_pack_memory to
close the fd its about to map...
mmap2(NULL, 31020635, PROT_READ, MAP_PRIVATE, 5, 0x6000 <unfinished ...>
<... mmap2 resumed> ) = -1 EBADF (Bad file descriptor)
And so the Linux kernel happily tells us f' off.
write(2, "fatal: ", 7 <unfinished ...>
<... write resumed> ) = 7
write(2, "Out of memory? mmap failed: Bad "..., 47 <unfinished ...>
<... write resumed> ) = 47
And we report the bad file descriptor error, and not the ENOMEM,
and die, claiming we are out of memory. But actually that mmap
should have succeeded, as we had enough memory for that window,
seeing as how we released the prior one.
Originally when I developed the sliding window mmap feature I had
this exact same bug in fast-import, and I dealt with it by handing
in the struct packed_git* we want to open the new window for, as the
caller wasn't prepared to reopen the packfile if unuse_one_window
closed it. The same is true here from xmmap, but the caller doesn't
have the struct packed_git* handy. So I'm using the file descriptor
instead to perform the same test.
Signed-off-by: Shawn O. Pearce <spearce@spearce.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-04-25 16:02:27 +08:00
|
|
|
static int unuse_one_window(struct packed_git *current, int keep_fd)
|
2006-12-23 15:34:44 +08:00
|
|
|
{
|
|
|
|
struct packed_git *p, *lru_p = NULL;
|
|
|
|
struct pack_window *lru_w = NULL, *lru_l = NULL;
|
|
|
|
|
|
|
|
if (current)
|
|
|
|
scan_windows(current, &lru_p, &lru_w, &lru_l);
|
|
|
|
for (p = packed_git; p; p = p->next)
|
|
|
|
scan_windows(p, &lru_p, &lru_w, &lru_l);
|
2006-12-23 15:34:23 +08:00
|
|
|
if (lru_p) {
|
|
|
|
munmap(lru_w->base, lru_w->len);
|
|
|
|
pack_mapped -= lru_w->len;
|
|
|
|
if (lru_l)
|
|
|
|
lru_l->next = lru_w->next;
|
|
|
|
else {
|
|
|
|
lru_p->windows = lru_w->next;
|
2011-03-03 02:01:54 +08:00
|
|
|
if (!lru_p->windows && lru_p->pack_fd != -1
|
|
|
|
&& lru_p->pack_fd != keep_fd) {
|
2006-12-23 15:34:23 +08:00
|
|
|
close(lru_p->pack_fd);
|
2011-03-01 04:52:39 +08:00
|
|
|
pack_open_fds--;
|
2006-12-23 15:34:23 +08:00
|
|
|
lru_p->pack_fd = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
free(lru_w);
|
2006-12-23 15:34:47 +08:00
|
|
|
pack_open_windows--;
|
2006-12-23 15:34:23 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
2005-06-29 17:51:27 +08:00
|
|
|
}
|
|
|
|
|
Actually handle some-low memory conditions
Tim Ansell discovered his Debian server didn't permit git-daemon to
use as much memory as it needed to handle cloning a project with
a 128 MiB packfile. Filtering the strace provided by Tim of the
rev-list child showed this gem of a sequence:
open("./objects/pack/pack-*.pack", O_RDONLY|O_LARGEFILE <unfinished ...>
<... open resumed> ) = 5
OK, so the packfile is fd 5...
mmap2(NULL, 33554432, PROT_READ, MAP_PRIVATE, 5, 0 <unfinished ...>
<... mmap2 resumed> ) = 0xb5e2d000
and we mapped one 32 MiB window from it at position 0...
mmap2(NULL, 31020635, PROT_READ, MAP_PRIVATE, 5, 0x6000 <unfinished ...>
<... mmap2 resumed> ) = -1 ENOMEM (Cannot allocate memory)
And we asked for another window further into the file. But got
denied. In Tim's case this was due to a resource limit on the
git-daemon process, and its children.
Now where are we in the code? We're down inside use_pack(),
after we have called unuse_one_window() enough times to make sure
we stay within our allowed maximum window size. However since we
didn't unmap the prior window at 0xb5e2d000 we aren't exceeding
the current limit (which probably was just the defaults).
But we're actually down inside xmmap()...
So we release the window we do have (by calling release_pack_memory),
assuming there is some memory pressure...
munmap(0xb5e2d000, 33554432 <unfinished ...>
<... munmap resumed> ) = 0
close(5 <unfinished ...>
<... close resumed> ) = 0
And that was the last window in this packfile. So we closed it.
Way to go us. Our xmmap did not expect release_pack_memory to
close the fd its about to map...
mmap2(NULL, 31020635, PROT_READ, MAP_PRIVATE, 5, 0x6000 <unfinished ...>
<... mmap2 resumed> ) = -1 EBADF (Bad file descriptor)
And so the Linux kernel happily tells us f' off.
write(2, "fatal: ", 7 <unfinished ...>
<... write resumed> ) = 7
write(2, "Out of memory? mmap failed: Bad "..., 47 <unfinished ...>
<... write resumed> ) = 47
And we report the bad file descriptor error, and not the ENOMEM,
and die, claiming we are out of memory. But actually that mmap
should have succeeded, as we had enough memory for that window,
seeing as how we released the prior one.
Originally when I developed the sliding window mmap feature I had
this exact same bug in fast-import, and I dealt with it by handing
in the struct packed_git* we want to open the new window for, as the
caller wasn't prepared to reopen the packfile if unuse_one_window
closed it. The same is true here from xmmap, but the caller doesn't
have the struct packed_git* handy. So I'm using the file descriptor
instead to perform the same test.
Signed-off-by: Shawn O. Pearce <spearce@spearce.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-04-25 16:02:27 +08:00
|
|
|
void release_pack_memory(size_t need, int fd)
|
2006-12-24 13:47:19 +08:00
|
|
|
{
|
|
|
|
size_t cur = pack_mapped;
|
Actually handle some-low memory conditions
Tim Ansell discovered his Debian server didn't permit git-daemon to
use as much memory as it needed to handle cloning a project with
a 128 MiB packfile. Filtering the strace provided by Tim of the
rev-list child showed this gem of a sequence:
open("./objects/pack/pack-*.pack", O_RDONLY|O_LARGEFILE <unfinished ...>
<... open resumed> ) = 5
OK, so the packfile is fd 5...
mmap2(NULL, 33554432, PROT_READ, MAP_PRIVATE, 5, 0 <unfinished ...>
<... mmap2 resumed> ) = 0xb5e2d000
and we mapped one 32 MiB window from it at position 0...
mmap2(NULL, 31020635, PROT_READ, MAP_PRIVATE, 5, 0x6000 <unfinished ...>
<... mmap2 resumed> ) = -1 ENOMEM (Cannot allocate memory)
And we asked for another window further into the file. But got
denied. In Tim's case this was due to a resource limit on the
git-daemon process, and its children.
Now where are we in the code? We're down inside use_pack(),
after we have called unuse_one_window() enough times to make sure
we stay within our allowed maximum window size. However since we
didn't unmap the prior window at 0xb5e2d000 we aren't exceeding
the current limit (which probably was just the defaults).
But we're actually down inside xmmap()...
So we release the window we do have (by calling release_pack_memory),
assuming there is some memory pressure...
munmap(0xb5e2d000, 33554432 <unfinished ...>
<... munmap resumed> ) = 0
close(5 <unfinished ...>
<... close resumed> ) = 0
And that was the last window in this packfile. So we closed it.
Way to go us. Our xmmap did not expect release_pack_memory to
close the fd its about to map...
mmap2(NULL, 31020635, PROT_READ, MAP_PRIVATE, 5, 0x6000 <unfinished ...>
<... mmap2 resumed> ) = -1 EBADF (Bad file descriptor)
And so the Linux kernel happily tells us f' off.
write(2, "fatal: ", 7 <unfinished ...>
<... write resumed> ) = 7
write(2, "Out of memory? mmap failed: Bad "..., 47 <unfinished ...>
<... write resumed> ) = 47
And we report the bad file descriptor error, and not the ENOMEM,
and die, claiming we are out of memory. But actually that mmap
should have succeeded, as we had enough memory for that window,
seeing as how we released the prior one.
Originally when I developed the sliding window mmap feature I had
this exact same bug in fast-import, and I dealt with it by handing
in the struct packed_git* we want to open the new window for, as the
caller wasn't prepared to reopen the packfile if unuse_one_window
closed it. The same is true here from xmmap, but the caller doesn't
have the struct packed_git* handy. So I'm using the file descriptor
instead to perform the same test.
Signed-off-by: Shawn O. Pearce <spearce@spearce.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-04-25 16:02:27 +08:00
|
|
|
while (need >= (cur - pack_mapped) && unuse_one_window(NULL, fd))
|
2006-12-24 13:47:19 +08:00
|
|
|
; /* nothing */
|
|
|
|
}
|
|
|
|
|
2010-11-06 19:44:11 +08:00
|
|
|
void *xmmap(void *start, size_t length,
|
|
|
|
int prot, int flags, int fd, off_t offset)
|
|
|
|
{
|
|
|
|
void *ret = mmap(start, length, prot, flags, fd, offset);
|
|
|
|
if (ret == MAP_FAILED) {
|
|
|
|
if (!length)
|
|
|
|
return NULL;
|
|
|
|
release_pack_memory(length, fd);
|
|
|
|
ret = mmap(start, length, prot, flags, fd, offset);
|
|
|
|
if (ret == MAP_FAILED)
|
|
|
|
die_errno("Out of memory? mmap failed");
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-01-18 11:57:00 +08:00
|
|
|
void close_pack_windows(struct packed_git *p)
|
|
|
|
{
|
|
|
|
while (p->windows) {
|
|
|
|
struct pack_window *w = p->windows;
|
|
|
|
|
|
|
|
if (w->inuse_cnt)
|
|
|
|
die("pack '%s' still has open windows to it",
|
|
|
|
p->pack_name);
|
|
|
|
munmap(w->base, w->len);
|
|
|
|
pack_mapped -= w->len;
|
|
|
|
pack_open_windows--;
|
|
|
|
p->windows = w->next;
|
|
|
|
free(w);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-12-23 15:34:08 +08:00
|
|
|
void unuse_pack(struct pack_window **w_cursor)
|
2005-06-29 17:51:27 +08:00
|
|
|
{
|
2006-12-23 15:34:08 +08:00
|
|
|
struct pack_window *w = *w_cursor;
|
|
|
|
if (w) {
|
|
|
|
w->inuse_cnt--;
|
|
|
|
*w_cursor = NULL;
|
|
|
|
}
|
2005-06-27 18:35:33 +08:00
|
|
|
}
|
|
|
|
|
2010-04-19 22:23:06 +08:00
|
|
|
void close_pack_index(struct packed_git *p)
|
|
|
|
{
|
|
|
|
if (p->index_data) {
|
|
|
|
munmap((void *)p->index_data, p->index_size);
|
|
|
|
p->index_data = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-12-10 03:26:52 +08:00
|
|
|
/*
|
|
|
|
* This is used by git-repack in case a newly created pack happens to
|
|
|
|
* contain the same set of objects as an existing one. In that case
|
|
|
|
* the resulting file might be different even if its name would be the
|
|
|
|
* same. It is best to close any reference to the old pack before it is
|
|
|
|
* replaced on disk. Of course no index pointers nor windows for given pack
|
|
|
|
* must subsist at this point. If ever objects from this pack are requested
|
|
|
|
* again, the new version of the pack will be reinitialized through
|
|
|
|
* reprepare_packed_git().
|
|
|
|
*/
|
|
|
|
void free_pack_by_name(const char *pack_name)
|
|
|
|
{
|
|
|
|
struct packed_git *p, **pp = &packed_git;
|
|
|
|
|
|
|
|
while (*pp) {
|
|
|
|
p = *pp;
|
|
|
|
if (strcmp(pack_name, p->pack_name) == 0) {
|
2009-02-12 02:15:30 +08:00
|
|
|
clear_delta_base_cache();
|
2008-12-10 03:26:52 +08:00
|
|
|
close_pack_windows(p);
|
2011-03-01 04:52:39 +08:00
|
|
|
if (p->pack_fd != -1) {
|
2008-12-10 03:26:52 +08:00
|
|
|
close(p->pack_fd);
|
2011-03-01 04:52:39 +08:00
|
|
|
pack_open_fds--;
|
|
|
|
}
|
2010-04-19 22:23:06 +08:00
|
|
|
close_pack_index(p);
|
2008-12-10 03:26:52 +08:00
|
|
|
free(p->bad_object_sha1);
|
|
|
|
*pp = p->next;
|
2012-02-01 21:48:55 +08:00
|
|
|
if (last_found_pack == p)
|
|
|
|
last_found_pack = NULL;
|
2008-12-10 03:26:52 +08:00
|
|
|
free(p);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
pp = &p->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-24 17:52:22 +08:00
|
|
|
static unsigned int get_max_fd_limit(void)
|
|
|
|
{
|
|
|
|
#ifdef RLIMIT_NOFILE
|
|
|
|
struct rlimit lim;
|
|
|
|
|
|
|
|
if (getrlimit(RLIMIT_NOFILE, &lim))
|
|
|
|
die_errno("cannot get RLIMIT_NOFILE");
|
|
|
|
|
|
|
|
return lim.rlim_cur;
|
|
|
|
#elif defined(_SC_OPEN_MAX)
|
|
|
|
return sysconf(_SC_OPEN_MAX);
|
|
|
|
#elif defined(OPEN_MAX)
|
|
|
|
return OPEN_MAX;
|
|
|
|
#else
|
|
|
|
return 1; /* see the caller ;-) */
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2007-02-02 16:00:03 +08:00
|
|
|
/*
|
|
|
|
* Do not call this directly as this leaks p->pack_fd on error return;
|
|
|
|
* call open_packed_git() instead.
|
|
|
|
*/
|
|
|
|
static int open_packed_git_1(struct packed_git *p)
|
2005-06-27 18:35:33 +08:00
|
|
|
{
|
2006-12-23 15:34:01 +08:00
|
|
|
struct stat st;
|
|
|
|
struct pack_header hdr;
|
|
|
|
unsigned char sha1[20];
|
|
|
|
unsigned char *idx_sha1;
|
2006-12-29 16:30:01 +08:00
|
|
|
long fd_flag;
|
2006-12-23 15:34:01 +08:00
|
|
|
|
2007-05-26 13:24:19 +08:00
|
|
|
if (!p->index_data && open_pack_index(p))
|
|
|
|
return error("packfile %s index unavailable", p->pack_name);
|
|
|
|
|
2011-03-01 04:52:39 +08:00
|
|
|
if (!pack_max_fds) {
|
2012-08-24 17:52:22 +08:00
|
|
|
unsigned int max_fds = get_max_fd_limit();
|
2011-03-01 04:52:39 +08:00
|
|
|
|
|
|
|
/* Save 3 for stdin/stdout/stderr, 22 for work */
|
|
|
|
if (25 < max_fds)
|
|
|
|
pack_max_fds = max_fds - 25;
|
|
|
|
else
|
|
|
|
pack_max_fds = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (pack_max_fds <= pack_open_fds && unuse_one_window(NULL, -1))
|
|
|
|
; /* nothing */
|
|
|
|
|
2011-05-16 03:16:29 +08:00
|
|
|
p->pack_fd = git_open_noatime(p->pack_name);
|
2006-12-23 15:34:01 +08:00
|
|
|
if (p->pack_fd < 0 || fstat(p->pack_fd, &st))
|
2007-02-02 04:52:33 +08:00
|
|
|
return -1;
|
2011-03-01 04:52:39 +08:00
|
|
|
pack_open_fds++;
|
2006-12-23 15:34:01 +08:00
|
|
|
|
|
|
|
/* If we created the struct before we had the pack we lack size. */
|
2005-08-01 08:53:44 +08:00
|
|
|
if (!p->pack_size) {
|
|
|
|
if (!S_ISREG(st.st_mode))
|
2007-02-02 04:52:33 +08:00
|
|
|
return error("packfile %s not a regular file", p->pack_name);
|
2005-08-01 08:53:44 +08:00
|
|
|
p->pack_size = st.st_size;
|
2006-12-23 15:34:01 +08:00
|
|
|
} else if (p->pack_size != st.st_size)
|
2007-02-02 04:52:33 +08:00
|
|
|
return error("packfile %s size changed", p->pack_name);
|
2006-12-23 15:34:01 +08:00
|
|
|
|
2006-12-29 16:30:01 +08:00
|
|
|
/* We leave these file descriptors open with sliding mmap;
|
|
|
|
* there is no point keeping them open across exec(), though.
|
|
|
|
*/
|
|
|
|
fd_flag = fcntl(p->pack_fd, F_GETFD, 0);
|
|
|
|
if (fd_flag < 0)
|
2007-02-02 04:52:33 +08:00
|
|
|
return error("cannot determine file descriptor flags");
|
2006-12-29 16:30:01 +08:00
|
|
|
fd_flag |= FD_CLOEXEC;
|
|
|
|
if (fcntl(p->pack_fd, F_SETFD, fd_flag) == -1)
|
2007-02-02 04:52:33 +08:00
|
|
|
return error("cannot set FD_CLOEXEC");
|
2006-12-29 16:30:01 +08:00
|
|
|
|
2006-12-23 15:34:01 +08:00
|
|
|
/* Verify we recognize this pack file format. */
|
2007-01-14 14:01:49 +08:00
|
|
|
if (read_in_full(p->pack_fd, &hdr, sizeof(hdr)) != sizeof(hdr))
|
2007-02-02 04:52:33 +08:00
|
|
|
return error("file %s is far too short to be a packfile", p->pack_name);
|
2006-12-23 15:34:01 +08:00
|
|
|
if (hdr.hdr_signature != htonl(PACK_SIGNATURE))
|
2007-02-02 04:52:33 +08:00
|
|
|
return error("file %s is not a GIT packfile", p->pack_name);
|
2006-12-23 15:34:01 +08:00
|
|
|
if (!pack_version_ok(hdr.hdr_version))
|
2008-07-03 23:52:09 +08:00
|
|
|
return error("packfile %s is version %"PRIu32" and not"
|
|
|
|
" supported (try upgrading GIT to a newer version)",
|
2006-12-23 15:34:01 +08:00
|
|
|
p->pack_name, ntohl(hdr.hdr_version));
|
|
|
|
|
|
|
|
/* Verify the pack matches its index. */
|
2007-04-09 13:06:28 +08:00
|
|
|
if (p->num_objects != ntohl(hdr.hdr_entries))
|
2008-07-03 23:52:09 +08:00
|
|
|
return error("packfile %s claims to have %"PRIu32" objects"
|
|
|
|
" while index indicates %"PRIu32" objects",
|
2007-04-09 13:06:28 +08:00
|
|
|
p->pack_name, ntohl(hdr.hdr_entries),
|
|
|
|
p->num_objects);
|
2006-12-23 15:34:01 +08:00
|
|
|
if (lseek(p->pack_fd, p->pack_size - sizeof(sha1), SEEK_SET) == -1)
|
2007-02-02 04:52:33 +08:00
|
|
|
return error("end of packfile %s is unavailable", p->pack_name);
|
2007-01-14 14:01:49 +08:00
|
|
|
if (read_in_full(p->pack_fd, sha1, sizeof(sha1)) != sizeof(sha1))
|
2007-02-02 04:52:33 +08:00
|
|
|
return error("packfile %s signature is unavailable", p->pack_name);
|
2007-03-17 04:42:50 +08:00
|
|
|
idx_sha1 = ((unsigned char *)p->index_data) + p->index_size - 40;
|
2006-12-23 15:34:01 +08:00
|
|
|
if (hashcmp(sha1, idx_sha1))
|
2007-02-02 04:52:33 +08:00
|
|
|
return error("packfile %s does not match index", p->pack_name);
|
|
|
|
return 0;
|
2006-12-23 15:34:01 +08:00
|
|
|
}
|
|
|
|
|
2007-02-02 16:00:03 +08:00
|
|
|
static int open_packed_git(struct packed_git *p)
|
|
|
|
{
|
|
|
|
if (!open_packed_git_1(p))
|
|
|
|
return 0;
|
|
|
|
if (p->pack_fd != -1) {
|
|
|
|
close(p->pack_fd);
|
2011-03-01 04:52:39 +08:00
|
|
|
pack_open_fds--;
|
2007-02-02 16:00:03 +08:00
|
|
|
p->pack_fd = -1;
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2007-03-07 09:44:30 +08:00
|
|
|
static int in_window(struct pack_window *win, off_t offset)
|
2006-12-23 15:34:28 +08:00
|
|
|
{
|
|
|
|
/* We must promise at least 20 bytes (one hash) after the
|
|
|
|
* offset is available from this window, otherwise the offset
|
|
|
|
* is not actually in this window and a different window (which
|
|
|
|
* has that one hash excess) must be used. This is to support
|
|
|
|
* the object header and delta base parsing routines below.
|
|
|
|
*/
|
|
|
|
off_t win_off = win->offset;
|
|
|
|
return win_off <= offset
|
|
|
|
&& (offset + 20) <= (win_off + win->len);
|
|
|
|
}
|
|
|
|
|
2009-05-01 17:06:36 +08:00
|
|
|
unsigned char *use_pack(struct packed_git *p,
|
2006-12-23 15:34:08 +08:00
|
|
|
struct pack_window **w_cursor,
|
2007-03-07 09:44:30 +08:00
|
|
|
off_t offset,
|
2011-06-11 02:52:15 +08:00
|
|
|
unsigned long *left)
|
2006-12-23 15:34:01 +08:00
|
|
|
{
|
2006-12-23 15:34:28 +08:00
|
|
|
struct pack_window *win = *w_cursor;
|
2006-12-23 15:34:08 +08:00
|
|
|
|
2009-02-25 05:59:05 +08:00
|
|
|
/* Since packfiles end in a hash of their content and it's
|
2006-12-23 15:34:28 +08:00
|
|
|
* pointless to ask for an offset into the middle of that
|
|
|
|
* hash, and the in_window function above wouldn't match
|
|
|
|
* don't allow an offset too close to the end of the file.
|
|
|
|
*/
|
2011-03-03 02:01:54 +08:00
|
|
|
if (!p->pack_size && p->pack_fd == -1 && open_packed_git(p))
|
|
|
|
die("packfile %s cannot be accessed", p->pack_name);
|
2006-12-23 15:34:28 +08:00
|
|
|
if (offset > (p->pack_size - 20))
|
|
|
|
die("offset beyond end of packfile (truncated pack?)");
|
|
|
|
|
|
|
|
if (!win || !in_window(win, offset)) {
|
|
|
|
if (win)
|
|
|
|
win->inuse_cnt--;
|
|
|
|
for (win = p->windows; win; win = win->next) {
|
|
|
|
if (in_window(win, offset))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!win) {
|
2007-02-15 01:11:40 +08:00
|
|
|
size_t window_align = packed_git_window_size / 2;
|
2007-03-07 09:44:37 +08:00
|
|
|
off_t len;
|
2011-03-03 02:01:54 +08:00
|
|
|
|
|
|
|
if (p->pack_fd == -1 && open_packed_git(p))
|
|
|
|
die("packfile %s cannot be accessed", p->pack_name);
|
|
|
|
|
2006-12-23 15:34:28 +08:00
|
|
|
win = xcalloc(1, sizeof(*win));
|
2007-02-15 01:11:40 +08:00
|
|
|
win->offset = (offset / window_align) * window_align;
|
2007-03-07 09:44:37 +08:00
|
|
|
len = p->pack_size - win->offset;
|
|
|
|
if (len > packed_git_window_size)
|
|
|
|
len = packed_git_window_size;
|
|
|
|
win->len = (size_t)len;
|
2006-12-23 15:34:28 +08:00
|
|
|
pack_mapped += win->len;
|
2006-12-23 15:34:44 +08:00
|
|
|
while (packed_git_limit < pack_mapped
|
Actually handle some-low memory conditions
Tim Ansell discovered his Debian server didn't permit git-daemon to
use as much memory as it needed to handle cloning a project with
a 128 MiB packfile. Filtering the strace provided by Tim of the
rev-list child showed this gem of a sequence:
open("./objects/pack/pack-*.pack", O_RDONLY|O_LARGEFILE <unfinished ...>
<... open resumed> ) = 5
OK, so the packfile is fd 5...
mmap2(NULL, 33554432, PROT_READ, MAP_PRIVATE, 5, 0 <unfinished ...>
<... mmap2 resumed> ) = 0xb5e2d000
and we mapped one 32 MiB window from it at position 0...
mmap2(NULL, 31020635, PROT_READ, MAP_PRIVATE, 5, 0x6000 <unfinished ...>
<... mmap2 resumed> ) = -1 ENOMEM (Cannot allocate memory)
And we asked for another window further into the file. But got
denied. In Tim's case this was due to a resource limit on the
git-daemon process, and its children.
Now where are we in the code? We're down inside use_pack(),
after we have called unuse_one_window() enough times to make sure
we stay within our allowed maximum window size. However since we
didn't unmap the prior window at 0xb5e2d000 we aren't exceeding
the current limit (which probably was just the defaults).
But we're actually down inside xmmap()...
So we release the window we do have (by calling release_pack_memory),
assuming there is some memory pressure...
munmap(0xb5e2d000, 33554432 <unfinished ...>
<... munmap resumed> ) = 0
close(5 <unfinished ...>
<... close resumed> ) = 0
And that was the last window in this packfile. So we closed it.
Way to go us. Our xmmap did not expect release_pack_memory to
close the fd its about to map...
mmap2(NULL, 31020635, PROT_READ, MAP_PRIVATE, 5, 0x6000 <unfinished ...>
<... mmap2 resumed> ) = -1 EBADF (Bad file descriptor)
And so the Linux kernel happily tells us f' off.
write(2, "fatal: ", 7 <unfinished ...>
<... write resumed> ) = 7
write(2, "Out of memory? mmap failed: Bad "..., 47 <unfinished ...>
<... write resumed> ) = 47
And we report the bad file descriptor error, and not the ENOMEM,
and die, claiming we are out of memory. But actually that mmap
should have succeeded, as we had enough memory for that window,
seeing as how we released the prior one.
Originally when I developed the sliding window mmap feature I had
this exact same bug in fast-import, and I dealt with it by handing
in the struct packed_git* we want to open the new window for, as the
caller wasn't prepared to reopen the packfile if unuse_one_window
closed it. The same is true here from xmmap, but the caller doesn't
have the struct packed_git* handy. So I'm using the file descriptor
instead to perform the same test.
Signed-off-by: Shawn O. Pearce <spearce@spearce.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-04-25 16:02:27 +08:00
|
|
|
&& unuse_one_window(p, p->pack_fd))
|
2006-12-23 15:34:28 +08:00
|
|
|
; /* nothing */
|
2006-12-24 13:47:23 +08:00
|
|
|
win->base = xmmap(NULL, win->len,
|
2006-12-23 15:34:28 +08:00
|
|
|
PROT_READ, MAP_PRIVATE,
|
|
|
|
p->pack_fd, win->offset);
|
|
|
|
if (win->base == MAP_FAILED)
|
2006-12-23 15:34:41 +08:00
|
|
|
die("packfile %s cannot be mapped: %s",
|
|
|
|
p->pack_name,
|
|
|
|
strerror(errno));
|
2011-03-03 02:01:54 +08:00
|
|
|
if (!win->offset && win->len == p->pack_size
|
|
|
|
&& !p->do_not_close) {
|
|
|
|
close(p->pack_fd);
|
|
|
|
pack_open_fds--;
|
|
|
|
p->pack_fd = -1;
|
|
|
|
}
|
2006-12-23 15:34:47 +08:00
|
|
|
pack_mmap_calls++;
|
|
|
|
pack_open_windows++;
|
|
|
|
if (pack_mapped > peak_pack_mapped)
|
|
|
|
peak_pack_mapped = pack_mapped;
|
|
|
|
if (pack_open_windows > peak_pack_open_windows)
|
|
|
|
peak_pack_open_windows = pack_open_windows;
|
2006-12-23 15:34:28 +08:00
|
|
|
win->next = p->windows;
|
|
|
|
p->windows = win;
|
|
|
|
}
|
2005-06-27 18:35:33 +08:00
|
|
|
}
|
2006-12-23 15:34:08 +08:00
|
|
|
if (win != *w_cursor) {
|
|
|
|
win->last_used = pack_used_ctr++;
|
|
|
|
win->inuse_cnt++;
|
|
|
|
*w_cursor = win;
|
|
|
|
}
|
2006-12-23 15:34:28 +08:00
|
|
|
offset -= win->offset;
|
2006-12-23 15:34:08 +08:00
|
|
|
if (left)
|
2007-03-07 09:44:37 +08:00
|
|
|
*left = win->len - xsize_t(offset);
|
2006-12-23 15:34:08 +08:00
|
|
|
return win->base + offset;
|
2005-06-27 18:35:33 +08:00
|
|
|
}
|
|
|
|
|
2008-06-25 06:58:06 +08:00
|
|
|
static struct packed_git *alloc_packed_git(int extra)
|
|
|
|
{
|
|
|
|
struct packed_git *p = xmalloc(sizeof(*p) + extra);
|
|
|
|
memset(p, 0, sizeof(*p));
|
|
|
|
p->pack_fd = -1;
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2010-11-07 03:00:38 +08:00
|
|
|
static void try_to_free_pack_memory(size_t size)
|
|
|
|
{
|
|
|
|
release_pack_memory(size, -1);
|
|
|
|
}
|
|
|
|
|
2007-03-17 04:42:50 +08:00
|
|
|
struct packed_git *add_packed_git(const char *path, int path_len, int local)
|
2005-06-27 18:35:33 +08:00
|
|
|
{
|
2010-11-07 03:00:38 +08:00
|
|
|
static int have_set_try_to_free_routine;
|
2005-06-27 18:35:33 +08:00
|
|
|
struct stat st;
|
2008-06-25 06:58:06 +08:00
|
|
|
struct packed_git *p = alloc_packed_git(path_len + 2);
|
2005-06-27 18:35:33 +08:00
|
|
|
|
2010-11-07 03:00:38 +08:00
|
|
|
if (!have_set_try_to_free_routine) {
|
|
|
|
have_set_try_to_free_routine = 1;
|
|
|
|
set_try_to_free_routine(try_to_free_pack_memory);
|
|
|
|
}
|
|
|
|
|
2007-03-17 04:42:50 +08:00
|
|
|
/*
|
|
|
|
* Make sure a corresponding .pack file exists and that
|
|
|
|
* the index looks sane.
|
|
|
|
*/
|
|
|
|
path_len -= strlen(".idx");
|
2008-06-25 06:58:06 +08:00
|
|
|
if (path_len < 1) {
|
|
|
|
free(p);
|
2005-06-27 18:35:33 +08:00
|
|
|
return NULL;
|
2008-06-25 06:58:06 +08:00
|
|
|
}
|
2007-03-17 04:42:50 +08:00
|
|
|
memcpy(p->pack_name, path, path_len);
|
2008-11-13 01:59:03 +08:00
|
|
|
|
|
|
|
strcpy(p->pack_name + path_len, ".keep");
|
|
|
|
if (!access(p->pack_name, F_OK))
|
|
|
|
p->pack_keep = 1;
|
|
|
|
|
2007-03-17 04:42:50 +08:00
|
|
|
strcpy(p->pack_name + path_len, ".pack");
|
2007-05-26 13:24:19 +08:00
|
|
|
if (stat(p->pack_name, &st) || !S_ISREG(st.st_mode)) {
|
2007-03-17 04:42:50 +08:00
|
|
|
free(p);
|
2005-06-27 18:35:33 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
2007-03-17 04:42:50 +08:00
|
|
|
|
2005-06-27 18:35:33 +08:00
|
|
|
/* ok, it looks sane as far as we can check without
|
|
|
|
* actually mapping the pack file.
|
|
|
|
*/
|
|
|
|
p->pack_size = st.st_size;
|
2005-10-14 06:38:28 +08:00
|
|
|
p->pack_local = local;
|
2007-03-09 19:52:12 +08:00
|
|
|
p->mtime = st.st_mtime;
|
2007-03-17 04:42:50 +08:00
|
|
|
if (path_len < 40 || get_sha1_hex(path + path_len - 40, p->sha1))
|
|
|
|
hashclr(p->sha1);
|
2005-06-27 18:35:33 +08:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2010-04-19 22:23:08 +08:00
|
|
|
struct packed_git *parse_pack_index(unsigned char *sha1, const char *idx_path)
|
2005-08-16 12:10:03 +08:00
|
|
|
{
|
2007-03-17 04:42:50 +08:00
|
|
|
const char *path = sha1_pack_name(sha1);
|
2008-06-25 06:58:06 +08:00
|
|
|
struct packed_git *p = alloc_packed_git(strlen(path) + 1);
|
2005-08-01 08:53:44 +08:00
|
|
|
|
2008-06-25 06:58:06 +08:00
|
|
|
strcpy(p->pack_name, path);
|
|
|
|
hashcpy(p->sha1, sha1);
|
2007-03-17 04:42:50 +08:00
|
|
|
if (check_packed_git_idx(idx_path, p)) {
|
|
|
|
free(p);
|
2005-08-01 08:53:44 +08:00
|
|
|
return NULL;
|
2007-03-17 04:42:50 +08:00
|
|
|
}
|
2005-08-01 08:53:44 +08:00
|
|
|
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
void install_packed_git(struct packed_git *pack)
|
|
|
|
{
|
2011-03-01 04:52:39 +08:00
|
|
|
if (pack->pack_fd != -1)
|
|
|
|
pack_open_fds++;
|
|
|
|
|
2005-08-01 08:53:44 +08:00
|
|
|
pack->next = packed_git;
|
|
|
|
packed_git = pack;
|
|
|
|
}
|
|
|
|
|
2013-02-15 20:07:10 +08:00
|
|
|
void (*report_garbage)(const char *desc, const char *path);
|
|
|
|
|
|
|
|
static void report_helper(const struct string_list *list,
|
|
|
|
int seen_bits, int first, int last)
|
|
|
|
{
|
|
|
|
const char *msg;
|
|
|
|
switch (seen_bits) {
|
|
|
|
case 0:
|
|
|
|
msg = "no corresponding .idx nor .pack";
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
msg = "no corresponding .idx";
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
msg = "no corresponding .pack";
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
for (; first < last; first++)
|
|
|
|
report_garbage(msg, list->items[first].string);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void report_pack_garbage(struct string_list *list)
|
|
|
|
{
|
|
|
|
int i, baselen = -1, first = 0, seen_bits = 0;
|
|
|
|
|
|
|
|
if (!report_garbage)
|
|
|
|
return;
|
|
|
|
|
|
|
|
sort_string_list(list);
|
|
|
|
|
|
|
|
for (i = 0; i < list->nr; i++) {
|
|
|
|
const char *path = list->items[i].string;
|
|
|
|
if (baselen != -1 &&
|
|
|
|
strncmp(path, list->items[first].string, baselen)) {
|
|
|
|
report_helper(list, seen_bits, first, i);
|
|
|
|
baselen = -1;
|
|
|
|
seen_bits = 0;
|
|
|
|
}
|
|
|
|
if (baselen == -1) {
|
|
|
|
const char *dot = strrchr(path, '.');
|
|
|
|
if (!dot) {
|
|
|
|
report_garbage("garbage found", path);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
baselen = dot - path + 1;
|
|
|
|
first = i;
|
|
|
|
}
|
|
|
|
if (!strcmp(path + baselen, "pack"))
|
|
|
|
seen_bits |= 1;
|
|
|
|
else if (!strcmp(path + baselen, "idx"))
|
|
|
|
seen_bits |= 2;
|
|
|
|
}
|
|
|
|
report_helper(list, seen_bits, first, list->nr);
|
|
|
|
}
|
|
|
|
|
2005-10-14 06:38:28 +08:00
|
|
|
static void prepare_packed_git_one(char *objdir, int local)
|
2005-06-27 18:35:33 +08:00
|
|
|
{
|
2007-07-03 18:40:20 +08:00
|
|
|
/* Ensure that this buffer is large enough so that we can
|
|
|
|
append "/pack/" without clobbering the stack even if
|
|
|
|
strlen(objdir) were PATH_MAX. */
|
|
|
|
char path[PATH_MAX + 1 + 4 + 1 + 1];
|
2005-06-27 18:35:33 +08:00
|
|
|
int len;
|
|
|
|
DIR *dir;
|
|
|
|
struct dirent *de;
|
2013-02-15 20:07:10 +08:00
|
|
|
struct string_list garbage = STRING_LIST_INIT_DUP;
|
2005-06-27 18:35:33 +08:00
|
|
|
|
|
|
|
sprintf(path, "%s/pack", objdir);
|
|
|
|
len = strlen(path);
|
|
|
|
dir = opendir(path);
|
2006-02-18 08:14:52 +08:00
|
|
|
if (!dir) {
|
2006-02-23 03:16:38 +08:00
|
|
|
if (errno != ENOENT)
|
2006-02-23 09:47:10 +08:00
|
|
|
error("unable to open object pack directory: %s: %s",
|
2006-02-23 03:16:38 +08:00
|
|
|
path, strerror(errno));
|
2005-06-27 18:35:33 +08:00
|
|
|
return;
|
2006-02-18 08:14:52 +08:00
|
|
|
}
|
2005-06-27 18:35:33 +08:00
|
|
|
path[len++] = '/';
|
|
|
|
while ((de = readdir(dir)) != NULL) {
|
|
|
|
int namelen = strlen(de->d_name);
|
|
|
|
struct packed_git *p;
|
|
|
|
|
2013-02-15 20:07:10 +08:00
|
|
|
if (len + namelen + 1 > sizeof(path)) {
|
|
|
|
if (report_garbage) {
|
|
|
|
struct strbuf sb = STRBUF_INIT;
|
|
|
|
strbuf_addf(&sb, "%.*s/%s", len - 1, path, de->d_name);
|
|
|
|
report_garbage("path too long", sb.buf);
|
|
|
|
strbuf_release(&sb);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_dot_or_dotdot(de->d_name))
|
2007-07-03 18:40:20 +08:00
|
|
|
continue;
|
|
|
|
|
2005-06-27 18:35:33 +08:00
|
|
|
strcpy(path + len, de->d_name);
|
2013-02-13 17:13:17 +08:00
|
|
|
|
|
|
|
if (has_extension(de->d_name, ".idx")) {
|
|
|
|
/* Don't reopen a pack we already have. */
|
|
|
|
for (p = packed_git; p; p = p->next) {
|
|
|
|
if (!memcmp(path, p->pack_name, len + namelen - 4))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (p == NULL &&
|
|
|
|
/*
|
|
|
|
* See if it really is a valid .idx file with
|
|
|
|
* corresponding .pack file that we can map.
|
|
|
|
*/
|
|
|
|
(p = add_packed_git(path, len + namelen, local)) != NULL)
|
|
|
|
install_packed_git(p);
|
2006-06-03 00:49:32 +08:00
|
|
|
}
|
2013-02-15 20:07:10 +08:00
|
|
|
|
|
|
|
if (!report_garbage)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (has_extension(de->d_name, ".idx") ||
|
|
|
|
has_extension(de->d_name, ".pack") ||
|
|
|
|
has_extension(de->d_name, ".keep"))
|
|
|
|
string_list_append(&garbage, path);
|
|
|
|
else
|
|
|
|
report_garbage("garbage found", path);
|
2005-06-27 18:35:33 +08:00
|
|
|
}
|
2005-07-06 14:52:17 +08:00
|
|
|
closedir(dir);
|
2013-02-15 20:07:10 +08:00
|
|
|
report_pack_garbage(&garbage);
|
|
|
|
string_list_clear(&garbage, 0);
|
2005-06-27 18:35:33 +08:00
|
|
|
}
|
|
|
|
|
2007-03-09 19:52:12 +08:00
|
|
|
static int sort_pack(const void *a_, const void *b_)
|
|
|
|
{
|
|
|
|
struct packed_git *a = *((struct packed_git **)a_);
|
|
|
|
struct packed_git *b = *((struct packed_git **)b_);
|
|
|
|
int st;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Local packs tend to contain objects specific to our
|
|
|
|
* variant of the project than remote ones. In addition,
|
|
|
|
* remote ones could be on a network mounted filesystem.
|
|
|
|
* Favor local ones for these reasons.
|
|
|
|
*/
|
|
|
|
st = a->pack_local - b->pack_local;
|
|
|
|
if (st)
|
|
|
|
return -st;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Younger packs tend to contain more recent objects,
|
|
|
|
* and more recent objects tend to get accessed more
|
|
|
|
* often.
|
|
|
|
*/
|
|
|
|
if (a->mtime < b->mtime)
|
|
|
|
return 1;
|
|
|
|
else if (a->mtime == b->mtime)
|
|
|
|
return 0;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rearrange_packed_git(void)
|
|
|
|
{
|
|
|
|
struct packed_git **ary, *p;
|
|
|
|
int i, n;
|
|
|
|
|
|
|
|
for (n = 0, p = packed_git; p; p = p->next)
|
|
|
|
n++;
|
|
|
|
if (n < 2)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* prepare an array of packed_git for easier sorting */
|
|
|
|
ary = xcalloc(n, sizeof(struct packed_git *));
|
|
|
|
for (n = 0, p = packed_git; p; p = p->next)
|
|
|
|
ary[n++] = p;
|
|
|
|
|
|
|
|
qsort(ary, n, sizeof(struct packed_git *), sort_pack);
|
|
|
|
|
|
|
|
/* link them back again */
|
|
|
|
for (i = 0; i < n - 1; i++)
|
|
|
|
ary[i]->next = ary[i + 1];
|
|
|
|
ary[n - 1]->next = NULL;
|
|
|
|
packed_git = ary[0];
|
|
|
|
|
|
|
|
free(ary);
|
|
|
|
}
|
|
|
|
|
2006-06-02 23:32:23 +08:00
|
|
|
static int prepare_packed_git_run_once = 0;
|
2005-06-29 05:56:57 +08:00
|
|
|
void prepare_packed_git(void)
|
2005-06-27 18:35:33 +08:00
|
|
|
{
|
2005-08-15 08:25:57 +08:00
|
|
|
struct alternate_object_database *alt;
|
2005-06-27 18:35:33 +08:00
|
|
|
|
2006-06-02 23:32:23 +08:00
|
|
|
if (prepare_packed_git_run_once)
|
2005-06-27 18:35:33 +08:00
|
|
|
return;
|
2005-10-14 06:38:28 +08:00
|
|
|
prepare_packed_git_one(get_object_directory(), 1);
|
2005-06-29 05:56:57 +08:00
|
|
|
prepare_alt_odb();
|
2005-08-15 08:25:57 +08:00
|
|
|
for (alt = alt_odb_list; alt; alt = alt->next) {
|
2005-12-05 14:48:43 +08:00
|
|
|
alt->name[-1] = 0;
|
2005-10-14 06:38:28 +08:00
|
|
|
prepare_packed_git_one(alt->base, 0);
|
2005-12-05 14:48:43 +08:00
|
|
|
alt->name[-1] = '/';
|
2005-06-27 18:35:33 +08:00
|
|
|
}
|
2007-03-09 19:52:12 +08:00
|
|
|
rearrange_packed_git();
|
2006-06-02 23:32:23 +08:00
|
|
|
prepare_packed_git_run_once = 1;
|
|
|
|
}
|
|
|
|
|
2006-11-02 06:06:21 +08:00
|
|
|
void reprepare_packed_git(void)
|
2006-06-02 23:32:23 +08:00
|
|
|
{
|
2008-08-23 03:45:53 +08:00
|
|
|
discard_revindex();
|
2006-06-02 23:32:23 +08:00
|
|
|
prepare_packed_git_run_once = 0;
|
|
|
|
prepare_packed_git();
|
2005-06-27 18:35:33 +08:00
|
|
|
}
|
|
|
|
|
2008-06-24 09:23:39 +08:00
|
|
|
static void mark_bad_packed_object(struct packed_git *p,
|
|
|
|
const unsigned char *sha1)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
for (i = 0; i < p->num_bad_objects; i++)
|
|
|
|
if (!hashcmp(sha1, p->bad_object_sha1 + 20 * i))
|
|
|
|
return;
|
|
|
|
p->bad_object_sha1 = xrealloc(p->bad_object_sha1, 20 * (p->num_bad_objects + 1));
|
|
|
|
hashcpy(p->bad_object_sha1 + 20 * p->num_bad_objects, sha1);
|
|
|
|
p->num_bad_objects++;
|
|
|
|
}
|
|
|
|
|
2010-10-29 02:13:06 +08:00
|
|
|
static const struct packed_git *has_packed_and_bad(const unsigned char *sha1)
|
2008-07-15 09:46:48 +08:00
|
|
|
{
|
|
|
|
struct packed_git *p;
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (p = packed_git; p; p = p->next)
|
|
|
|
for (i = 0; i < p->num_bad_objects; i++)
|
|
|
|
if (!hashcmp(sha1, p->bad_object_sha1 + 20 * i))
|
2010-10-29 02:13:06 +08:00
|
|
|
return p;
|
|
|
|
return NULL;
|
2008-07-15 09:46:48 +08:00
|
|
|
}
|
|
|
|
|
2012-03-07 18:54:18 +08:00
|
|
|
/*
|
|
|
|
* With an in-core object data in "map", rehash it to make sure the
|
|
|
|
* object name actually matches "sha1" to detect object corruption.
|
|
|
|
* With "map" == NULL, try reading the object named with "sha1" using
|
|
|
|
* the streaming interface and rehash it to do the same.
|
|
|
|
*/
|
|
|
|
int check_sha1_signature(const unsigned char *sha1, void *map,
|
|
|
|
unsigned long size, const char *type)
|
2005-04-19 04:04:43 +08:00
|
|
|
{
|
|
|
|
unsigned char real_sha1[20];
|
2012-03-07 18:54:18 +08:00
|
|
|
enum object_type obj_type;
|
|
|
|
struct git_istream *st;
|
|
|
|
git_SHA_CTX c;
|
|
|
|
char hdr[32];
|
|
|
|
int hdrlen;
|
|
|
|
|
|
|
|
if (map) {
|
|
|
|
hash_sha1_file(map, size, type, real_sha1);
|
|
|
|
return hashcmp(sha1, real_sha1) ? -1 : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
st = open_istream(sha1, &obj_type, &size, NULL);
|
|
|
|
if (!st)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* Generate the header */
|
|
|
|
hdrlen = sprintf(hdr, "%s %lu", typename(obj_type), size) + 1;
|
|
|
|
|
|
|
|
/* Sha1.. */
|
|
|
|
git_SHA1_Init(&c);
|
|
|
|
git_SHA1_Update(&c, hdr, hdrlen);
|
|
|
|
for (;;) {
|
|
|
|
char buf[1024 * 16];
|
|
|
|
ssize_t readlen = read_istream(st, buf, sizeof(buf));
|
|
|
|
|
2013-03-26 04:17:17 +08:00
|
|
|
if (readlen < 0) {
|
|
|
|
close_istream(st);
|
|
|
|
return -1;
|
|
|
|
}
|
2012-03-07 18:54:18 +08:00
|
|
|
if (!readlen)
|
|
|
|
break;
|
|
|
|
git_SHA1_Update(&c, buf, readlen);
|
|
|
|
}
|
|
|
|
git_SHA1_Final(real_sha1, &c);
|
|
|
|
close_istream(st);
|
2006-08-18 02:54:57 +08:00
|
|
|
return hashcmp(sha1, real_sha1) ? -1 : 0;
|
2005-04-19 04:04:43 +08:00
|
|
|
}
|
|
|
|
|
2011-05-16 03:16:29 +08:00
|
|
|
static int git_open_noatime(const char *name)
|
2008-06-15 02:32:37 +08:00
|
|
|
{
|
|
|
|
static int sha1_file_open_flag = O_NOATIME;
|
|
|
|
|
2010-11-02 06:54:21 +08:00
|
|
|
for (;;) {
|
|
|
|
int fd = open(name, O_RDONLY | sha1_file_open_flag);
|
2008-06-15 02:32:37 +08:00
|
|
|
if (fd >= 0)
|
2010-11-02 06:54:21 +08:00
|
|
|
return fd;
|
|
|
|
|
|
|
|
/* Might the failure be due to O_NOATIME? */
|
|
|
|
if (errno != ENOENT && sha1_file_open_flag) {
|
2008-06-15 02:32:37 +08:00
|
|
|
sha1_file_open_flag = 0;
|
2010-11-02 06:54:21 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
2008-06-15 02:32:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int open_sha1_file(const unsigned char *sha1)
|
|
|
|
{
|
|
|
|
int fd;
|
|
|
|
char *name = sha1_file_name(sha1);
|
|
|
|
struct alternate_object_database *alt;
|
|
|
|
|
2011-05-16 03:16:29 +08:00
|
|
|
fd = git_open_noatime(name);
|
2008-06-15 02:32:37 +08:00
|
|
|
if (fd >= 0)
|
|
|
|
return fd;
|
|
|
|
|
|
|
|
prepare_alt_odb();
|
|
|
|
errno = ENOENT;
|
|
|
|
for (alt = alt_odb_list; alt; alt = alt->next) {
|
|
|
|
name = alt->name;
|
|
|
|
fill_sha1_path(name, sha1);
|
2011-05-16 03:16:29 +08:00
|
|
|
fd = git_open_noatime(alt->base);
|
2008-06-15 02:32:37 +08:00
|
|
|
if (fd >= 0)
|
|
|
|
return fd;
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2011-05-15 10:42:10 +08:00
|
|
|
void *map_sha1_file(const unsigned char *sha1, unsigned long *size)
|
2005-04-19 04:04:43 +08:00
|
|
|
{
|
|
|
|
void *map;
|
2005-04-24 02:09:32 +08:00
|
|
|
int fd;
|
2005-05-07 15:38:04 +08:00
|
|
|
|
2008-06-15 02:32:37 +08:00
|
|
|
fd = open_sha1_file(sha1);
|
|
|
|
map = NULL;
|
|
|
|
if (fd >= 0) {
|
|
|
|
struct stat st;
|
2005-04-19 04:04:43 +08:00
|
|
|
|
2008-06-15 02:32:37 +08:00
|
|
|
if (!fstat(fd, &st)) {
|
|
|
|
*size = xsize_t(st.st_size);
|
2012-02-07 00:24:52 +08:00
|
|
|
if (!*size) {
|
|
|
|
/* mmap() is forbidden on empty files */
|
|
|
|
error("object file %s is empty", sha1_file_name(sha1));
|
|
|
|
return NULL;
|
|
|
|
}
|
2008-06-15 02:32:37 +08:00
|
|
|
map = xmmap(NULL, *size, PROT_READ, MAP_PRIVATE, fd, 0);
|
2005-04-24 02:09:32 +08:00
|
|
|
}
|
2008-06-15 02:32:37 +08:00
|
|
|
close(fd);
|
2005-04-19 04:04:43 +08:00
|
|
|
}
|
|
|
|
return map;
|
|
|
|
}
|
|
|
|
|
2011-06-09 02:29:01 +08:00
|
|
|
/*
|
|
|
|
* There used to be a second loose object header format which
|
|
|
|
* was meant to mimic the in-pack format, allowing for direct
|
|
|
|
* copy of the object data. This format turned up not to be
|
|
|
|
* really worth it and we no longer write loose objects in that
|
|
|
|
* format.
|
|
|
|
*/
|
|
|
|
static int experimental_loose_object(unsigned char *map)
|
2006-07-18 06:04:47 +08:00
|
|
|
{
|
|
|
|
unsigned int word;
|
|
|
|
|
|
|
|
/*
|
Tolerate zlib deflation with window size < 32Kb
Git currently reports loose objects as 'corrupt' if they've been
deflated using a window size less than 32Kb, because the
experimental_loose_object() function doesn't recognise the header
byte as a zlib header. This patch makes the function tolerant of
all valid window sizes (15-bit to 8-bit) - but doesn't sacrifice
it's accuracy in distingushing the standard loose-object format
from the experimental (now abandoned) format.
On memory constrained systems zlib may use a much smaller window
size - working on Agit, I found that Android uses a 4KB window;
giving a header byte of 0x48, not 0x78. Consequently all loose
objects generated appear 'corrupt', which is why Agit is a read-only
Git client at this time - I don't want my client to generate Git
repos that other clients treat as broken :(
This patch makes Git tolerant of different deflate settings - it
might appear that it changes experimental_loose_object() to the point
where it could incorrectly identify the experimental format as the
standard one, but the two criteria (bitmask & checksum) can only
give a false result for an experimental object where both of the
following are true:
1) object size is exactly 8 bytes when uncompressed (bitmask)
2) [single-byte in-pack git type&size header] * 256
+ [1st byte of the following zlib header] % 31 = 0 (checksum)
As it happens, for all possible combinations of valid object type
(1-4) and window bits (0-7), the only time when the checksum will be
divisible by 31 is for 0x1838 - ie object type *1*, a Commit - which,
due the fields all Commit objects must contain, could never be as
small as 8 bytes in size.
Given this, the combination of the two criteria (bitmask & checksum)
always correctly determines the buffer format, and is more tolerant
than the previous version.
The alternative to this patch is simply removing support for the
experimental format, which I am also totally cool with.
References:
Android uses a 4KB window for deflation:
http://android.git.kernel.org/?p=platform/libcore.git;a=blob;f=luni/src/main/native/java_util_zip_Deflater.cpp;h=c0b2feff196e63a7b85d97cf9ae5bb2583409c28;hb=refs/heads/gingerbread#l53
Code snippet searching for false positives with the zlib checksum:
https://gist.github.com/1118177
Signed-off-by: Roberto Tyley <roberto.tyley@guardian.co.uk>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-08-08 02:46:13 +08:00
|
|
|
* We must determine if the buffer contains the standard
|
|
|
|
* zlib-deflated stream or the experimental format based
|
|
|
|
* on the in-pack object format. Compare the header byte
|
|
|
|
* for each format:
|
|
|
|
*
|
|
|
|
* RFC1950 zlib w/ deflate : 0www1000 : 0 <= www <= 7
|
|
|
|
* Experimental pack-based : Stttssss : ttt = 1,2,3,4
|
|
|
|
*
|
|
|
|
* If bit 7 is clear and bits 0-3 equal 8, the buffer MUST be
|
|
|
|
* in standard loose-object format, UNLESS it is a Git-pack
|
|
|
|
* format object *exactly* 8 bytes in size when inflated.
|
|
|
|
*
|
|
|
|
* However, RFC1950 also specifies that the 1st 16-bit word
|
|
|
|
* must be divisible by 31 - this checksum tells us our buffer
|
|
|
|
* is in the standard format, giving a false positive only if
|
|
|
|
* the 1st word of the Git-pack format object happens to be
|
|
|
|
* divisible by 31, ie:
|
|
|
|
* ((byte0 * 256) + byte1) % 31 = 0
|
|
|
|
* => 0ttt10000www1000 % 31 = 0
|
|
|
|
*
|
|
|
|
* As it happens, this case can only arise for www=3 & ttt=1
|
|
|
|
* - ie, a Commit object, which would have to be 8 bytes in
|
|
|
|
* size. As no Commit can be that small, we find that the
|
|
|
|
* combination of these two criteria (bitmask & checksum)
|
|
|
|
* can always correctly determine the buffer format.
|
2006-07-18 06:04:47 +08:00
|
|
|
*/
|
|
|
|
word = (map[0] << 8) + map[1];
|
Tolerate zlib deflation with window size < 32Kb
Git currently reports loose objects as 'corrupt' if they've been
deflated using a window size less than 32Kb, because the
experimental_loose_object() function doesn't recognise the header
byte as a zlib header. This patch makes the function tolerant of
all valid window sizes (15-bit to 8-bit) - but doesn't sacrifice
it's accuracy in distingushing the standard loose-object format
from the experimental (now abandoned) format.
On memory constrained systems zlib may use a much smaller window
size - working on Agit, I found that Android uses a 4KB window;
giving a header byte of 0x48, not 0x78. Consequently all loose
objects generated appear 'corrupt', which is why Agit is a read-only
Git client at this time - I don't want my client to generate Git
repos that other clients treat as broken :(
This patch makes Git tolerant of different deflate settings - it
might appear that it changes experimental_loose_object() to the point
where it could incorrectly identify the experimental format as the
standard one, but the two criteria (bitmask & checksum) can only
give a false result for an experimental object where both of the
following are true:
1) object size is exactly 8 bytes when uncompressed (bitmask)
2) [single-byte in-pack git type&size header] * 256
+ [1st byte of the following zlib header] % 31 = 0 (checksum)
As it happens, for all possible combinations of valid object type
(1-4) and window bits (0-7), the only time when the checksum will be
divisible by 31 is for 0x1838 - ie object type *1*, a Commit - which,
due the fields all Commit objects must contain, could never be as
small as 8 bytes in size.
Given this, the combination of the two criteria (bitmask & checksum)
always correctly determines the buffer format, and is more tolerant
than the previous version.
The alternative to this patch is simply removing support for the
experimental format, which I am also totally cool with.
References:
Android uses a 4KB window for deflation:
http://android.git.kernel.org/?p=platform/libcore.git;a=blob;f=luni/src/main/native/java_util_zip_Deflater.cpp;h=c0b2feff196e63a7b85d97cf9ae5bb2583409c28;hb=refs/heads/gingerbread#l53
Code snippet searching for false positives with the zlib checksum:
https://gist.github.com/1118177
Signed-off-by: Roberto Tyley <roberto.tyley@guardian.co.uk>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-08-08 02:46:13 +08:00
|
|
|
if ((map[0] & 0x8F) == 0x08 && !(word % 31))
|
2006-07-18 06:04:47 +08:00
|
|
|
return 0;
|
2011-06-09 02:29:01 +08:00
|
|
|
else
|
|
|
|
return 1;
|
2006-07-18 06:04:47 +08:00
|
|
|
}
|
|
|
|
|
2008-10-30 07:02:46 +08:00
|
|
|
unsigned long unpack_object_header_buffer(const unsigned char *buf,
|
|
|
|
unsigned long len, enum object_type *type, unsigned long *sizep)
|
2005-06-02 08:54:59 +08:00
|
|
|
{
|
2006-09-02 06:17:01 +08:00
|
|
|
unsigned shift;
|
Fix big left-shifts of unsigned char
Shifting 'unsigned char' or 'unsigned short' left can result in sign
extension errors, since the C integer promotion rules means that the
unsigned char/short will get implicitly promoted to a signed 'int' due to
the shift (or due to other operations).
This normally doesn't matter, but if you shift things up sufficiently, it
will now set the sign bit in 'int', and a subsequent cast to a bigger type
(eg 'long' or 'unsigned long') will now sign-extend the value despite the
original expression being unsigned.
One example of this would be something like
unsigned long size;
unsigned char c;
size += c << 24;
where despite all the variables being unsigned, 'c << 24' ends up being a
signed entity, and will get sign-extended when then doing the addition in
an 'unsigned long' type.
Since git uses 'unsigned char' pointers extensively, we actually have this
bug in a couple of places.
I may have missed some, but this is the result of looking at
git grep '[^0-9 ][ ]*<<[ ][a-z]' -- '*.c' '*.h'
git grep '<<[ ]*24'
which catches at least the common byte cases (shifting variables by a
variable amount, and shifting by 24 bits).
I also grepped for just 'unsigned char' variables in general, and
converted the ones that most obviously ended up getting implicitly cast
immediately anyway (eg hash_name(), encode_85()).
In addition to just avoiding 'unsigned char', this patch also tries to use
a common idiom for the delta header size thing. We had three different
variations on it: "& 0x7fUL" in one place (getting the sign extension
right), and "& ~0x80" and "& 0x7f" in two other places (not getting it
right). Apart from making them all just avoid using "unsigned char" at
all, I also unified them to then use a simple "& 0x7f".
I considered making a sparse extension which warns about doing implicit
casts from unsigned types to signed types, but it gets rather complex very
quickly, so this is just a hack.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-06-18 08:22:27 +08:00
|
|
|
unsigned long size, c;
|
2006-09-02 06:17:01 +08:00
|
|
|
unsigned long used = 0;
|
|
|
|
|
|
|
|
c = buf[used++];
|
|
|
|
*type = (c >> 4) & 7;
|
|
|
|
size = c & 15;
|
|
|
|
shift = 4;
|
|
|
|
while (c & 0x80) {
|
2009-07-23 05:34:34 +08:00
|
|
|
if (len <= used || bitsizeof(long) <= shift) {
|
2008-10-30 07:02:46 +08:00
|
|
|
error("bad object header");
|
2011-10-28 02:42:57 +08:00
|
|
|
size = used = 0;
|
|
|
|
break;
|
2008-10-30 07:02:46 +08:00
|
|
|
}
|
2006-09-02 06:17:01 +08:00
|
|
|
c = buf[used++];
|
|
|
|
size += (c & 0x7f) << shift;
|
|
|
|
shift += 7;
|
|
|
|
}
|
|
|
|
*sizep = size;
|
|
|
|
return used;
|
|
|
|
}
|
|
|
|
|
2011-07-20 00:33:03 +08:00
|
|
|
int unpack_sha1_header(git_zstream *stream, unsigned char *map, unsigned long mapsize, void *buffer, unsigned long bufsiz)
|
2006-09-02 06:17:01 +08:00
|
|
|
{
|
|
|
|
unsigned long size, used;
|
|
|
|
static const char valid_loose_object_type[8] = {
|
|
|
|
0, /* OBJ_EXT */
|
|
|
|
1, 1, 1, 1, /* "commit", "tree", "blob", "tag" */
|
|
|
|
0, /* "delta" and others are invalid in a loose object */
|
2006-07-12 03:48:08 +08:00
|
|
|
};
|
2006-09-02 06:17:01 +08:00
|
|
|
enum object_type type;
|
2006-07-12 03:48:08 +08:00
|
|
|
|
2005-06-02 08:54:59 +08:00
|
|
|
/* Get the data stream */
|
|
|
|
memset(stream, 0, sizeof(*stream));
|
|
|
|
stream->next_in = map;
|
|
|
|
stream->avail_in = mapsize;
|
|
|
|
stream->next_out = buffer;
|
2006-07-12 03:48:08 +08:00
|
|
|
stream->avail_out = bufsiz;
|
|
|
|
|
2011-06-09 02:29:01 +08:00
|
|
|
if (experimental_loose_object(map)) {
|
|
|
|
/*
|
|
|
|
* The old experimental format we no longer produce;
|
|
|
|
* we can still read it.
|
|
|
|
*/
|
|
|
|
used = unpack_object_header_buffer(map, mapsize, &type, &size);
|
|
|
|
if (!used || !valid_loose_object_type[type])
|
|
|
|
return -1;
|
|
|
|
map += used;
|
|
|
|
mapsize -= used;
|
2007-05-10 02:42:42 +08:00
|
|
|
|
2011-06-09 02:29:01 +08:00
|
|
|
/* Set up the stream for the rest.. */
|
|
|
|
stream->next_in = map;
|
|
|
|
stream->avail_in = mapsize;
|
|
|
|
git_inflate_init(stream);
|
2006-07-12 03:48:08 +08:00
|
|
|
|
2011-06-09 02:29:01 +08:00
|
|
|
/* And generate the fake traditional header */
|
|
|
|
stream->total_out = 1 + snprintf(buffer, bufsiz, "%s %lu",
|
|
|
|
typename(type), size);
|
|
|
|
return 0;
|
|
|
|
}
|
2009-01-08 11:54:47 +08:00
|
|
|
git_inflate_init(stream);
|
2011-06-09 02:29:01 +08:00
|
|
|
return git_inflate(stream, 0);
|
2005-06-02 08:54:59 +08:00
|
|
|
}
|
|
|
|
|
2011-06-11 02:52:15 +08:00
|
|
|
static void *unpack_sha1_rest(git_zstream *stream, void *buffer, unsigned long size, const unsigned char *sha1)
|
2005-06-02 22:57:25 +08:00
|
|
|
{
|
|
|
|
int bytes = strlen(buffer) + 1;
|
2010-01-27 02:24:14 +08:00
|
|
|
unsigned char *buf = xmallocz(size);
|
2006-07-12 03:48:08 +08:00
|
|
|
unsigned long n;
|
2007-03-05 16:21:37 +08:00
|
|
|
int status = Z_OK;
|
2005-06-02 22:57:25 +08:00
|
|
|
|
2006-07-12 03:48:08 +08:00
|
|
|
n = stream->total_out - bytes;
|
|
|
|
if (n > size)
|
|
|
|
n = size;
|
|
|
|
memcpy(buf, (char *) buffer + bytes, n);
|
|
|
|
bytes = n;
|
2007-03-20 13:49:53 +08:00
|
|
|
if (bytes <= size) {
|
|
|
|
/*
|
|
|
|
* The above condition must be (bytes <= size), not
|
|
|
|
* (bytes < size). In other words, even though we
|
2011-05-16 03:16:03 +08:00
|
|
|
* expect no more output and set avail_out to zero,
|
2007-03-20 13:49:53 +08:00
|
|
|
* the input zlib stream may have bytes that express
|
|
|
|
* "this concludes the stream", and we *do* want to
|
|
|
|
* eat that input.
|
|
|
|
*
|
|
|
|
* Otherwise we would not be able to test that we
|
|
|
|
* consumed all the input to reach the expected size;
|
|
|
|
* we also want to check that zlib tells us that all
|
|
|
|
* went well with status == Z_STREAM_END at the end.
|
|
|
|
*/
|
2005-06-02 22:57:25 +08:00
|
|
|
stream->next_out = buf + bytes;
|
|
|
|
stream->avail_out = size - bytes;
|
2007-03-05 16:21:37 +08:00
|
|
|
while (status == Z_OK)
|
2009-01-08 11:54:47 +08:00
|
|
|
status = git_inflate(stream, Z_FINISH);
|
2005-06-02 22:57:25 +08:00
|
|
|
}
|
2007-03-20 13:49:53 +08:00
|
|
|
if (status == Z_STREAM_END && !stream->avail_in) {
|
2009-01-08 11:54:47 +08:00
|
|
|
git_inflate_end(stream);
|
2007-03-05 16:21:37 +08:00
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (status < 0)
|
|
|
|
error("corrupt loose object '%s'", sha1_to_hex(sha1));
|
|
|
|
else if (stream->avail_in)
|
|
|
|
error("garbage at end of loose object '%s'",
|
|
|
|
sha1_to_hex(sha1));
|
|
|
|
free(buf);
|
|
|
|
return NULL;
|
2005-06-02 22:57:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We used to just use "sscanf()", but that's actually way
|
|
|
|
* too permissive for what we want to check. So do an anal
|
|
|
|
* object header parse by hand.
|
|
|
|
*/
|
2011-05-15 10:42:10 +08:00
|
|
|
int parse_sha1_header(const char *hdr, unsigned long *sizep)
|
2005-06-02 22:57:25 +08:00
|
|
|
{
|
2007-02-27 03:55:59 +08:00
|
|
|
char type[10];
|
2005-06-02 22:57:25 +08:00
|
|
|
int i;
|
|
|
|
unsigned long size;
|
|
|
|
|
|
|
|
/*
|
2007-06-07 15:04:01 +08:00
|
|
|
* The type can be at most ten bytes (including the
|
2005-06-02 22:57:25 +08:00
|
|
|
* terminating '\0' that we add), and is followed by
|
2007-02-27 03:55:59 +08:00
|
|
|
* a space.
|
2005-06-02 22:57:25 +08:00
|
|
|
*/
|
2007-02-27 03:55:59 +08:00
|
|
|
i = 0;
|
2005-06-02 22:57:25 +08:00
|
|
|
for (;;) {
|
|
|
|
char c = *hdr++;
|
|
|
|
if (c == ' ')
|
|
|
|
break;
|
2007-02-27 03:55:59 +08:00
|
|
|
type[i++] = c;
|
|
|
|
if (i >= sizeof(type))
|
2005-06-02 22:57:25 +08:00
|
|
|
return -1;
|
|
|
|
}
|
2007-02-27 03:55:59 +08:00
|
|
|
type[i] = 0;
|
2005-06-02 22:57:25 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The length must follow immediately, and be in canonical
|
|
|
|
* decimal format (ie "010" is not valid).
|
|
|
|
*/
|
|
|
|
size = *hdr++ - '0';
|
|
|
|
if (size > 9)
|
|
|
|
return -1;
|
|
|
|
if (size) {
|
|
|
|
for (;;) {
|
|
|
|
unsigned long c = *hdr - '0';
|
|
|
|
if (c > 9)
|
|
|
|
break;
|
|
|
|
hdr++;
|
|
|
|
size = size * 10 + c;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
*sizep = size;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The length must be followed by a zero byte
|
|
|
|
*/
|
2007-02-27 03:55:59 +08:00
|
|
|
return *hdr ? -1 : type_from_string(type);
|
2005-06-02 22:57:25 +08:00
|
|
|
}
|
|
|
|
|
2007-03-05 16:21:37 +08:00
|
|
|
static void *unpack_sha1_file(void *map, unsigned long mapsize, enum object_type *type, unsigned long *size, const unsigned char *sha1)
|
2005-04-19 04:04:43 +08:00
|
|
|
{
|
2005-06-02 22:57:25 +08:00
|
|
|
int ret;
|
2011-06-11 02:52:15 +08:00
|
|
|
git_zstream stream;
|
2005-06-02 22:57:25 +08:00
|
|
|
char hdr[8192];
|
2005-04-19 04:04:43 +08:00
|
|
|
|
2005-06-02 22:57:25 +08:00
|
|
|
ret = unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr));
|
2007-02-27 03:55:59 +08:00
|
|
|
if (ret < Z_OK || (*type = parse_sha1_header(hdr, size)) < 0)
|
2005-04-19 04:04:43 +08:00
|
|
|
return NULL;
|
|
|
|
|
2007-03-05 16:21:37 +08:00
|
|
|
return unpack_sha1_rest(&stream, hdr, *size, sha1);
|
2005-04-19 04:04:43 +08:00
|
|
|
}
|
|
|
|
|
2007-04-17 00:31:56 +08:00
|
|
|
unsigned long get_size_from_delta(struct packed_git *p,
|
|
|
|
struct pack_window **w_curs,
|
|
|
|
off_t curpos)
|
|
|
|
{
|
|
|
|
const unsigned char *data;
|
|
|
|
unsigned char delta_head[20], *in;
|
2011-06-11 02:52:15 +08:00
|
|
|
git_zstream stream;
|
2007-04-17 00:31:56 +08:00
|
|
|
int st;
|
|
|
|
|
|
|
|
memset(&stream, 0, sizeof(stream));
|
|
|
|
stream.next_out = delta_head;
|
|
|
|
stream.avail_out = sizeof(delta_head);
|
|
|
|
|
2009-01-08 11:54:47 +08:00
|
|
|
git_inflate_init(&stream);
|
2007-04-17 00:31:56 +08:00
|
|
|
do {
|
|
|
|
in = use_pack(p, w_curs, curpos, &stream.avail_in);
|
|
|
|
stream.next_in = in;
|
2009-01-08 11:54:47 +08:00
|
|
|
st = git_inflate(&stream, Z_FINISH);
|
2007-04-17 00:31:56 +08:00
|
|
|
curpos += stream.next_in - in;
|
|
|
|
} while ((st == Z_OK || st == Z_BUF_ERROR) &&
|
|
|
|
stream.total_out < sizeof(delta_head));
|
2009-01-08 11:54:47 +08:00
|
|
|
git_inflate_end(&stream);
|
2008-10-30 07:02:47 +08:00
|
|
|
if ((st != Z_STREAM_END) && stream.total_out != sizeof(delta_head)) {
|
|
|
|
error("delta data unpack-initial failed");
|
|
|
|
return 0;
|
|
|
|
}
|
2007-04-17 00:31:56 +08:00
|
|
|
|
|
|
|
/* Examine the initial part of the delta to figure out
|
|
|
|
* the result size.
|
|
|
|
*/
|
|
|
|
data = delta_head;
|
|
|
|
|
|
|
|
/* ignore base size */
|
|
|
|
get_delta_hdr_size(&data, delta_head+sizeof(delta_head));
|
|
|
|
|
|
|
|
/* Read the result size */
|
|
|
|
return get_delta_hdr_size(&data, delta_head+sizeof(delta_head));
|
|
|
|
}
|
|
|
|
|
2007-03-07 09:44:30 +08:00
|
|
|
static off_t get_delta_base(struct packed_git *p,
|
2006-12-23 15:34:08 +08:00
|
|
|
struct pack_window **w_curs,
|
2007-03-07 09:44:30 +08:00
|
|
|
off_t *curpos,
|
2007-02-27 03:55:59 +08:00
|
|
|
enum object_type type,
|
2007-03-07 09:44:30 +08:00
|
|
|
off_t delta_obj_offset)
|
2006-09-21 12:06:49 +08:00
|
|
|
{
|
2007-02-27 03:55:56 +08:00
|
|
|
unsigned char *base_info = use_pack(p, w_curs, *curpos, NULL);
|
2007-03-07 09:44:30 +08:00
|
|
|
off_t base_offset;
|
2006-09-21 12:06:49 +08:00
|
|
|
|
2006-12-23 15:34:18 +08:00
|
|
|
/* use_pack() assured us we have [base_info, base_info + 20)
|
|
|
|
* as a range that we can look at without walking off the
|
|
|
|
* end of the mapped window. Its actually the hash size
|
|
|
|
* that is assured. An OFS_DELTA longer than the hash size
|
|
|
|
* is stupid, as then a REF_DELTA would be smaller to store.
|
|
|
|
*/
|
2007-02-27 03:55:59 +08:00
|
|
|
if (type == OBJ_OFS_DELTA) {
|
2006-09-21 12:06:49 +08:00
|
|
|
unsigned used = 0;
|
|
|
|
unsigned char c = base_info[used++];
|
|
|
|
base_offset = c & 127;
|
|
|
|
while (c & 128) {
|
|
|
|
base_offset += 1;
|
2007-04-09 13:06:29 +08:00
|
|
|
if (!base_offset || MSB(base_offset, 7))
|
2008-06-24 09:23:39 +08:00
|
|
|
return 0; /* overflow */
|
2006-09-21 12:06:49 +08:00
|
|
|
c = base_info[used++];
|
|
|
|
base_offset = (base_offset << 7) + (c & 127);
|
|
|
|
}
|
|
|
|
base_offset = delta_obj_offset - base_offset;
|
2008-10-30 07:02:45 +08:00
|
|
|
if (base_offset <= 0 || base_offset >= delta_obj_offset)
|
2008-06-24 09:23:39 +08:00
|
|
|
return 0; /* out of bound */
|
2007-02-27 03:55:56 +08:00
|
|
|
*curpos += used;
|
2007-02-27 03:55:59 +08:00
|
|
|
} else if (type == OBJ_REF_DELTA) {
|
2006-09-21 12:06:49 +08:00
|
|
|
/* The base entry _must_ be in the same pack */
|
|
|
|
base_offset = find_pack_entry_one(base_info, p);
|
2007-02-27 03:55:56 +08:00
|
|
|
*curpos += 20;
|
2006-09-21 12:06:49 +08:00
|
|
|
} else
|
|
|
|
die("I am totally screwed");
|
2007-02-27 03:55:56 +08:00
|
|
|
return base_offset;
|
2006-09-21 12:06:49 +08:00
|
|
|
}
|
|
|
|
|
2011-05-14 06:33:33 +08:00
|
|
|
int unpack_object_header(struct packed_git *p,
|
|
|
|
struct pack_window **w_curs,
|
|
|
|
off_t *curpos,
|
|
|
|
unsigned long *sizep)
|
2005-06-29 05:21:02 +08:00
|
|
|
{
|
2006-12-23 15:34:08 +08:00
|
|
|
unsigned char *base;
|
2011-06-11 02:52:15 +08:00
|
|
|
unsigned long left;
|
2006-09-02 06:17:01 +08:00
|
|
|
unsigned long used;
|
2007-02-27 03:55:56 +08:00
|
|
|
enum object_type type;
|
2005-06-29 05:21:02 +08:00
|
|
|
|
2006-12-23 15:34:18 +08:00
|
|
|
/* use_pack() assures us we have [base, base + 20) available
|
2011-04-13 23:39:40 +08:00
|
|
|
* as a range that we can look at. (Its actually the hash
|
2007-02-04 12:49:16 +08:00
|
|
|
* size that is assured.) With our object header encoding
|
2006-12-23 15:34:18 +08:00
|
|
|
* the maximum deflated object size is 2^137, which is just
|
|
|
|
* insane, so we know won't exceed what we have been given.
|
|
|
|
*/
|
2007-02-27 03:55:56 +08:00
|
|
|
base = use_pack(p, w_curs, *curpos, &left);
|
2008-10-30 07:02:46 +08:00
|
|
|
used = unpack_object_header_buffer(base, left, &type, sizep);
|
|
|
|
if (!used) {
|
|
|
|
type = OBJ_BAD;
|
|
|
|
} else
|
|
|
|
*curpos += used;
|
2006-09-02 06:17:01 +08:00
|
|
|
|
2007-02-27 03:55:56 +08:00
|
|
|
return type;
|
2005-06-29 05:21:02 +08:00
|
|
|
}
|
|
|
|
|
sha1_file: remove recursion in packed_object_info
packed_object_info() and packed_delta_info() were mutually recursive.
The former would handle ordinary types and defer deltas to the latter;
the latter would use the former to resolve the delta base.
This arrangement, however, leads to trouble with threaded index-pack
and long delta chains on platforms where thread stacks are small, as
happened on OS X (512kB thread stacks by default) with the chromium
repo.
The task of the two functions is not all that hard to describe without
any recursion, however. It proceeds in three steps:
- determine the representation type and size, based on the outermost
object (delta or not)
- follow through the delta chain, if any
- determine the object type from what is found at the end of the delta
chain
The only complication stems from the error recovery. If parsing fails
at any step, we want to mark that object (within the pack) as bad and
try getting the corresponding SHA1 from elsewhere. If that also
fails, we want to repeat this process back up the delta chain until we
find a reasonable solution or conclude that there is no way to
reconstruct the object. (This is conveniently checked by t5303.)
To achieve that within the pack, we keep track of the entire delta
chain in a stack. When things go sour, we process that stack from the
top, marking entries as bad and attempting to re-resolve by sha1. To
avoid excessive malloc(), the stack starts out with a small
stack-allocated array. The choice of 64 is based on the default of
pack.depth, which is 50, in the hope that it covers "most" delta
chains without any need for malloc().
It's much harder to make the actual re-resolving by sha1 nonrecursive,
so we skip that. If you can't afford *that* recursion, your
corruption problems are more serious than your stack size problems.
Reported-by: Stefan Zager <szager@google.com>
Signed-off-by: Thomas Rast <trast@student.ethz.ch>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-26 02:07:39 +08:00
|
|
|
static int retry_bad_packed_offset(struct packed_git *p, off_t obj_offset)
|
|
|
|
{
|
|
|
|
int type;
|
|
|
|
struct revindex_entry *revidx;
|
|
|
|
const unsigned char *sha1;
|
|
|
|
revidx = find_pack_revindex(p, obj_offset);
|
|
|
|
if (!revidx)
|
|
|
|
return OBJ_BAD;
|
|
|
|
sha1 = nth_packed_object_sha1(p, revidx->nr);
|
|
|
|
mark_bad_packed_object(p, sha1);
|
|
|
|
type = sha1_object_info(sha1, NULL);
|
|
|
|
if (type <= OBJ_NONE)
|
|
|
|
return OBJ_BAD;
|
|
|
|
return type;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#define POI_STACK_PREALLOC 64
|
|
|
|
|
2007-03-07 09:44:30 +08:00
|
|
|
static int packed_object_info(struct packed_git *p, off_t obj_offset,
|
2011-05-13 06:51:38 +08:00
|
|
|
unsigned long *sizep, int *rtype)
|
2005-06-27 18:35:33 +08:00
|
|
|
{
|
2006-12-23 15:34:08 +08:00
|
|
|
struct pack_window *w_curs = NULL;
|
2007-03-07 09:44:30 +08:00
|
|
|
unsigned long size;
|
|
|
|
off_t curpos = obj_offset;
|
2007-02-27 03:55:59 +08:00
|
|
|
enum object_type type;
|
sha1_file: remove recursion in packed_object_info
packed_object_info() and packed_delta_info() were mutually recursive.
The former would handle ordinary types and defer deltas to the latter;
the latter would use the former to resolve the delta base.
This arrangement, however, leads to trouble with threaded index-pack
and long delta chains on platforms where thread stacks are small, as
happened on OS X (512kB thread stacks by default) with the chromium
repo.
The task of the two functions is not all that hard to describe without
any recursion, however. It proceeds in three steps:
- determine the representation type and size, based on the outermost
object (delta or not)
- follow through the delta chain, if any
- determine the object type from what is found at the end of the delta
chain
The only complication stems from the error recovery. If parsing fails
at any step, we want to mark that object (within the pack) as bad and
try getting the corresponding SHA1 from elsewhere. If that also
fails, we want to repeat this process back up the delta chain until we
find a reasonable solution or conclude that there is no way to
reconstruct the object. (This is conveniently checked by t5303.)
To achieve that within the pack, we keep track of the entire delta
chain in a stack. When things go sour, we process that stack from the
top, marking entries as bad and attempting to re-resolve by sha1. To
avoid excessive malloc(), the stack starts out with a small
stack-allocated array. The choice of 64 is based on the default of
pack.depth, which is 50, in the hope that it covers "most" delta
chains without any need for malloc().
It's much harder to make the actual re-resolving by sha1 nonrecursive,
so we skip that. If you can't afford *that* recursion, your
corruption problems are more serious than your stack size problems.
Reported-by: Stefan Zager <szager@google.com>
Signed-off-by: Thomas Rast <trast@student.ethz.ch>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-26 02:07:39 +08:00
|
|
|
off_t small_poi_stack[POI_STACK_PREALLOC];
|
|
|
|
off_t *poi_stack = small_poi_stack;
|
|
|
|
int poi_stack_nr = 0, poi_stack_alloc = POI_STACK_PREALLOC;
|
2005-06-28 14:58:08 +08:00
|
|
|
|
2007-02-27 03:55:59 +08:00
|
|
|
type = unpack_object_header(p, &w_curs, &curpos, &size);
|
sha1_file: remove recursion in packed_object_info
packed_object_info() and packed_delta_info() were mutually recursive.
The former would handle ordinary types and defer deltas to the latter;
the latter would use the former to resolve the delta base.
This arrangement, however, leads to trouble with threaded index-pack
and long delta chains on platforms where thread stacks are small, as
happened on OS X (512kB thread stacks by default) with the chromium
repo.
The task of the two functions is not all that hard to describe without
any recursion, however. It proceeds in three steps:
- determine the representation type and size, based on the outermost
object (delta or not)
- follow through the delta chain, if any
- determine the object type from what is found at the end of the delta
chain
The only complication stems from the error recovery. If parsing fails
at any step, we want to mark that object (within the pack) as bad and
try getting the corresponding SHA1 from elsewhere. If that also
fails, we want to repeat this process back up the delta chain until we
find a reasonable solution or conclude that there is no way to
reconstruct the object. (This is conveniently checked by t5303.)
To achieve that within the pack, we keep track of the entire delta
chain in a stack. When things go sour, we process that stack from the
top, marking entries as bad and attempting to re-resolve by sha1. To
avoid excessive malloc(), the stack starts out with a small
stack-allocated array. The choice of 64 is based on the default of
pack.depth, which is 50, in the hope that it covers "most" delta
chains without any need for malloc().
It's much harder to make the actual re-resolving by sha1 nonrecursive,
so we skip that. If you can't afford *that* recursion, your
corruption problems are more serious than your stack size problems.
Reported-by: Stefan Zager <szager@google.com>
Signed-off-by: Thomas Rast <trast@student.ethz.ch>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-26 02:07:39 +08:00
|
|
|
|
2011-05-13 06:51:38 +08:00
|
|
|
if (rtype)
|
|
|
|
*rtype = type; /* representation type */
|
2005-06-28 14:58:08 +08:00
|
|
|
|
sha1_file: remove recursion in packed_object_info
packed_object_info() and packed_delta_info() were mutually recursive.
The former would handle ordinary types and defer deltas to the latter;
the latter would use the former to resolve the delta base.
This arrangement, however, leads to trouble with threaded index-pack
and long delta chains on platforms where thread stacks are small, as
happened on OS X (512kB thread stacks by default) with the chromium
repo.
The task of the two functions is not all that hard to describe without
any recursion, however. It proceeds in three steps:
- determine the representation type and size, based on the outermost
object (delta or not)
- follow through the delta chain, if any
- determine the object type from what is found at the end of the delta
chain
The only complication stems from the error recovery. If parsing fails
at any step, we want to mark that object (within the pack) as bad and
try getting the corresponding SHA1 from elsewhere. If that also
fails, we want to repeat this process back up the delta chain until we
find a reasonable solution or conclude that there is no way to
reconstruct the object. (This is conveniently checked by t5303.)
To achieve that within the pack, we keep track of the entire delta
chain in a stack. When things go sour, we process that stack from the
top, marking entries as bad and attempting to re-resolve by sha1. To
avoid excessive malloc(), the stack starts out with a small
stack-allocated array. The choice of 64 is based on the default of
pack.depth, which is 50, in the hope that it covers "most" delta
chains without any need for malloc().
It's much harder to make the actual re-resolving by sha1 nonrecursive,
so we skip that. If you can't afford *that* recursion, your
corruption problems are more serious than your stack size problems.
Reported-by: Stefan Zager <szager@google.com>
Signed-off-by: Thomas Rast <trast@student.ethz.ch>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-26 02:07:39 +08:00
|
|
|
if (sizep) {
|
|
|
|
if (type == OBJ_OFS_DELTA || type == OBJ_REF_DELTA) {
|
|
|
|
off_t tmp_pos = curpos;
|
|
|
|
off_t base_offset = get_delta_base(p, &w_curs, &tmp_pos,
|
|
|
|
type, obj_offset);
|
|
|
|
if (!base_offset) {
|
|
|
|
type = OBJ_BAD;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
*sizep = get_size_from_delta(p, &w_curs, tmp_pos);
|
|
|
|
if (*sizep == 0) {
|
|
|
|
type = OBJ_BAD;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
*sizep = size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
while (type == OBJ_OFS_DELTA || type == OBJ_REF_DELTA) {
|
|
|
|
off_t base_offset;
|
|
|
|
/* Push the object we're going to leave behind */
|
|
|
|
if (poi_stack_nr >= poi_stack_alloc && poi_stack == small_poi_stack) {
|
|
|
|
poi_stack_alloc = alloc_nr(poi_stack_nr);
|
|
|
|
poi_stack = xmalloc(sizeof(off_t)*poi_stack_alloc);
|
|
|
|
memcpy(poi_stack, small_poi_stack, sizeof(off_t)*poi_stack_nr);
|
|
|
|
} else {
|
|
|
|
ALLOC_GROW(poi_stack, poi_stack_nr+1, poi_stack_alloc);
|
|
|
|
}
|
|
|
|
poi_stack[poi_stack_nr++] = obj_offset;
|
|
|
|
/* If parsing the base offset fails, just unwind */
|
|
|
|
base_offset = get_delta_base(p, &w_curs, &curpos, type, obj_offset);
|
|
|
|
if (!base_offset)
|
|
|
|
goto unwind;
|
|
|
|
curpos = obj_offset = base_offset;
|
|
|
|
type = unpack_object_header(p, &w_curs, &curpos, &size);
|
|
|
|
if (type <= OBJ_NONE) {
|
|
|
|
/* If getting the base itself fails, we first
|
|
|
|
* retry the base, otherwise unwind */
|
|
|
|
type = retry_bad_packed_offset(p, base_offset);
|
|
|
|
if (type > OBJ_NONE)
|
|
|
|
goto out;
|
|
|
|
goto unwind;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-02-27 03:55:59 +08:00
|
|
|
switch (type) {
|
sha1_file: remove recursion in packed_object_info
packed_object_info() and packed_delta_info() were mutually recursive.
The former would handle ordinary types and defer deltas to the latter;
the latter would use the former to resolve the delta base.
This arrangement, however, leads to trouble with threaded index-pack
and long delta chains on platforms where thread stacks are small, as
happened on OS X (512kB thread stacks by default) with the chromium
repo.
The task of the two functions is not all that hard to describe without
any recursion, however. It proceeds in three steps:
- determine the representation type and size, based on the outermost
object (delta or not)
- follow through the delta chain, if any
- determine the object type from what is found at the end of the delta
chain
The only complication stems from the error recovery. If parsing fails
at any step, we want to mark that object (within the pack) as bad and
try getting the corresponding SHA1 from elsewhere. If that also
fails, we want to repeat this process back up the delta chain until we
find a reasonable solution or conclude that there is no way to
reconstruct the object. (This is conveniently checked by t5303.)
To achieve that within the pack, we keep track of the entire delta
chain in a stack. When things go sour, we process that stack from the
top, marking entries as bad and attempting to re-resolve by sha1. To
avoid excessive malloc(), the stack starts out with a small
stack-allocated array. The choice of 64 is based on the default of
pack.depth, which is 50, in the hope that it covers "most" delta
chains without any need for malloc().
It's much harder to make the actual re-resolving by sha1 nonrecursive,
so we skip that. If you can't afford *that* recursion, your
corruption problems are more serious than your stack size problems.
Reported-by: Stefan Zager <szager@google.com>
Signed-off-by: Thomas Rast <trast@student.ethz.ch>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-26 02:07:39 +08:00
|
|
|
case OBJ_BAD:
|
2005-06-29 05:21:02 +08:00
|
|
|
case OBJ_COMMIT:
|
|
|
|
case OBJ_TREE:
|
|
|
|
case OBJ_BLOB:
|
|
|
|
case OBJ_TAG:
|
2005-06-29 00:58:23 +08:00
|
|
|
break;
|
2005-06-27 18:35:33 +08:00
|
|
|
default:
|
2008-10-30 07:02:47 +08:00
|
|
|
error("unknown object type %i at offset %"PRIuMAX" in %s",
|
|
|
|
type, (uintmax_t)obj_offset, p->pack_name);
|
|
|
|
type = OBJ_BAD;
|
2005-06-27 18:35:33 +08:00
|
|
|
}
|
sha1_file: remove recursion in packed_object_info
packed_object_info() and packed_delta_info() were mutually recursive.
The former would handle ordinary types and defer deltas to the latter;
the latter would use the former to resolve the delta base.
This arrangement, however, leads to trouble with threaded index-pack
and long delta chains on platforms where thread stacks are small, as
happened on OS X (512kB thread stacks by default) with the chromium
repo.
The task of the two functions is not all that hard to describe without
any recursion, however. It proceeds in three steps:
- determine the representation type and size, based on the outermost
object (delta or not)
- follow through the delta chain, if any
- determine the object type from what is found at the end of the delta
chain
The only complication stems from the error recovery. If parsing fails
at any step, we want to mark that object (within the pack) as bad and
try getting the corresponding SHA1 from elsewhere. If that also
fails, we want to repeat this process back up the delta chain until we
find a reasonable solution or conclude that there is no way to
reconstruct the object. (This is conveniently checked by t5303.)
To achieve that within the pack, we keep track of the entire delta
chain in a stack. When things go sour, we process that stack from the
top, marking entries as bad and attempting to re-resolve by sha1. To
avoid excessive malloc(), the stack starts out with a small
stack-allocated array. The choice of 64 is based on the default of
pack.depth, which is 50, in the hope that it covers "most" delta
chains without any need for malloc().
It's much harder to make the actual re-resolving by sha1 nonrecursive,
so we skip that. If you can't afford *that* recursion, your
corruption problems are more serious than your stack size problems.
Reported-by: Stefan Zager <szager@google.com>
Signed-off-by: Thomas Rast <trast@student.ethz.ch>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-26 02:07:39 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
if (poi_stack != small_poi_stack)
|
|
|
|
free(poi_stack);
|
2007-02-27 03:55:56 +08:00
|
|
|
unuse_pack(&w_curs);
|
2007-02-27 03:55:59 +08:00
|
|
|
return type;
|
sha1_file: remove recursion in packed_object_info
packed_object_info() and packed_delta_info() were mutually recursive.
The former would handle ordinary types and defer deltas to the latter;
the latter would use the former to resolve the delta base.
This arrangement, however, leads to trouble with threaded index-pack
and long delta chains on platforms where thread stacks are small, as
happened on OS X (512kB thread stacks by default) with the chromium
repo.
The task of the two functions is not all that hard to describe without
any recursion, however. It proceeds in three steps:
- determine the representation type and size, based on the outermost
object (delta or not)
- follow through the delta chain, if any
- determine the object type from what is found at the end of the delta
chain
The only complication stems from the error recovery. If parsing fails
at any step, we want to mark that object (within the pack) as bad and
try getting the corresponding SHA1 from elsewhere. If that also
fails, we want to repeat this process back up the delta chain until we
find a reasonable solution or conclude that there is no way to
reconstruct the object. (This is conveniently checked by t5303.)
To achieve that within the pack, we keep track of the entire delta
chain in a stack. When things go sour, we process that stack from the
top, marking entries as bad and attempting to re-resolve by sha1. To
avoid excessive malloc(), the stack starts out with a small
stack-allocated array. The choice of 64 is based on the default of
pack.depth, which is 50, in the hope that it covers "most" delta
chains without any need for malloc().
It's much harder to make the actual re-resolving by sha1 nonrecursive,
so we skip that. If you can't afford *that* recursion, your
corruption problems are more serious than your stack size problems.
Reported-by: Stefan Zager <szager@google.com>
Signed-off-by: Thomas Rast <trast@student.ethz.ch>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-26 02:07:39 +08:00
|
|
|
|
|
|
|
unwind:
|
|
|
|
while (poi_stack_nr) {
|
|
|
|
obj_offset = poi_stack[--poi_stack_nr];
|
|
|
|
type = retry_bad_packed_offset(p, obj_offset);
|
|
|
|
if (type > OBJ_NONE)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
type = OBJ_BAD;
|
|
|
|
goto out;
|
2005-06-27 18:35:33 +08:00
|
|
|
}
|
|
|
|
|
2006-08-26 16:12:27 +08:00
|
|
|
static void *unpack_compressed_entry(struct packed_git *p,
|
2006-12-23 15:34:08 +08:00
|
|
|
struct pack_window **w_curs,
|
2007-03-07 09:44:30 +08:00
|
|
|
off_t curpos,
|
2006-08-26 16:12:27 +08:00
|
|
|
unsigned long size)
|
2006-08-26 16:10:43 +08:00
|
|
|
{
|
|
|
|
int st;
|
2011-06-11 02:52:15 +08:00
|
|
|
git_zstream stream;
|
2006-12-23 15:34:13 +08:00
|
|
|
unsigned char *buffer, *in;
|
2006-08-26 16:10:43 +08:00
|
|
|
|
2010-01-27 02:24:15 +08:00
|
|
|
buffer = xmallocz(size);
|
2006-08-26 16:10:43 +08:00
|
|
|
memset(&stream, 0, sizeof(stream));
|
|
|
|
stream.next_out = buffer;
|
Fix incorrect error check while reading deflated pack data
The loop in get_size_from_delta() feeds a deflated delta data from the
pack stream _until_ we get inflated result of 20 bytes[*] or we reach the
end of stream.
Side note. This magic number 20 does not have anything to do with the
size of the hash we use, but comes from 1a3b55c (reduce delta head
inflated size, 2006-10-18).
The loop reads like this:
do {
in = use_pack();
stream.next_in = in;
st = git_inflate(&stream, Z_FINISH);
curpos += stream.next_in - in;
} while ((st == Z_OK || st == Z_BUF_ERROR) &&
stream.total_out < sizeof(delta_head));
This git_inflate() can return:
- Z_STREAM_END, if use_pack() fed it enough input and the delta itself
was smaller than 20 bytes;
- Z_OK, when some progress has been made;
- Z_BUF_ERROR, if no progress is possible, because we either ran out of
input (due to corrupt pack), or we ran out of output before we saw the
end of the stream.
The fix b3118bd (sha1_file: Fix infinite loop when pack is corrupted,
2009-10-14) attempted was against a corruption that appears to be a valid
stream that produces a result larger than the output buffer, but we are
not even trying to read the stream to the end in this loop. If avail_out
becomes zero, total_out will be the same as sizeof(delta_head) so the loop
will terminate without the "fix". There is no fix from b3118bd needed for
this loop, in other words.
The loop in unpack_compressed_entry() is quite a different story. It
feeds a deflated stream (either delta or base) and allows the stream to
produce output up to what we expect but no more.
do {
in = use_pack();
stream.next_in = in;
st = git_inflate(&stream, Z_FINISH);
curpos += stream.next_in - in;
} while (st == Z_OK || st == Z_BUF_ERROR)
This _does_ risk falling into an endless interation, as we can exhaust
avail_out if the length we expect is smaller than what the stream wants to
produce (due to pack corruption). In such a case, avail_out will become
zero and inflate() will return Z_BUF_ERROR, while avail_in may (or may
not) be zero.
But this is not a right fix:
do {
in = use_pack();
stream.next_in = in;
st = git_inflate(&stream, Z_FINISH);
+ if (st == Z_BUF_ERROR && (stream.avail_in || !stream.avail_out)
+ break; /* wants more input??? */
curpos += stream.next_in - in;
} while (st == Z_OK || st == Z_BUF_ERROR)
as Z_BUF_ERROR from inflate() may be telling us that avail_in has also run
out before reading the end of stream marker. In such a case, both avail_in
and avail_out would be zero, and the loop should iterate to allow the end
of stream marker to be seen by inflate from the input stream.
The right fix for this loop is likely to be to increment the initial
avail_out by one (we allocate one extra byte to terminate it with NUL
anyway, so there is no risk to overrun the buffer), and break out if we
see that avail_out has become zero, in order to detect that the stream
wants to produce more than what we expect. After the loop, we have a
check that exactly tests this condition:
if ((st != Z_STREAM_END) || stream.total_out != size) {
free(buffer);
return NULL;
}
So here is a patch (without my previous botched attempts) to fix this
issue. The first hunk reverts the corresponding hunk from b3118bd, and
the second hunk is the same fix proposed earlier.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-10-22 14:06:14 +08:00
|
|
|
stream.avail_out = size + 1;
|
2006-08-26 16:10:43 +08:00
|
|
|
|
2009-01-08 11:54:47 +08:00
|
|
|
git_inflate_init(&stream);
|
2006-12-23 15:34:13 +08:00
|
|
|
do {
|
2007-02-27 03:55:56 +08:00
|
|
|
in = use_pack(p, w_curs, curpos, &stream.avail_in);
|
2006-12-23 15:34:13 +08:00
|
|
|
stream.next_in = in;
|
2009-01-08 11:54:47 +08:00
|
|
|
st = git_inflate(&stream, Z_FINISH);
|
Fix incorrect error check while reading deflated pack data
The loop in get_size_from_delta() feeds a deflated delta data from the
pack stream _until_ we get inflated result of 20 bytes[*] or we reach the
end of stream.
Side note. This magic number 20 does not have anything to do with the
size of the hash we use, but comes from 1a3b55c (reduce delta head
inflated size, 2006-10-18).
The loop reads like this:
do {
in = use_pack();
stream.next_in = in;
st = git_inflate(&stream, Z_FINISH);
curpos += stream.next_in - in;
} while ((st == Z_OK || st == Z_BUF_ERROR) &&
stream.total_out < sizeof(delta_head));
This git_inflate() can return:
- Z_STREAM_END, if use_pack() fed it enough input and the delta itself
was smaller than 20 bytes;
- Z_OK, when some progress has been made;
- Z_BUF_ERROR, if no progress is possible, because we either ran out of
input (due to corrupt pack), or we ran out of output before we saw the
end of the stream.
The fix b3118bd (sha1_file: Fix infinite loop when pack is corrupted,
2009-10-14) attempted was against a corruption that appears to be a valid
stream that produces a result larger than the output buffer, but we are
not even trying to read the stream to the end in this loop. If avail_out
becomes zero, total_out will be the same as sizeof(delta_head) so the loop
will terminate without the "fix". There is no fix from b3118bd needed for
this loop, in other words.
The loop in unpack_compressed_entry() is quite a different story. It
feeds a deflated stream (either delta or base) and allows the stream to
produce output up to what we expect but no more.
do {
in = use_pack();
stream.next_in = in;
st = git_inflate(&stream, Z_FINISH);
curpos += stream.next_in - in;
} while (st == Z_OK || st == Z_BUF_ERROR)
This _does_ risk falling into an endless interation, as we can exhaust
avail_out if the length we expect is smaller than what the stream wants to
produce (due to pack corruption). In such a case, avail_out will become
zero and inflate() will return Z_BUF_ERROR, while avail_in may (or may
not) be zero.
But this is not a right fix:
do {
in = use_pack();
stream.next_in = in;
st = git_inflate(&stream, Z_FINISH);
+ if (st == Z_BUF_ERROR && (stream.avail_in || !stream.avail_out)
+ break; /* wants more input??? */
curpos += stream.next_in - in;
} while (st == Z_OK || st == Z_BUF_ERROR)
as Z_BUF_ERROR from inflate() may be telling us that avail_in has also run
out before reading the end of stream marker. In such a case, both avail_in
and avail_out would be zero, and the loop should iterate to allow the end
of stream marker to be seen by inflate from the input stream.
The right fix for this loop is likely to be to increment the initial
avail_out by one (we allocate one extra byte to terminate it with NUL
anyway, so there is no risk to overrun the buffer), and break out if we
see that avail_out has become zero, in order to detect that the stream
wants to produce more than what we expect. After the loop, we have a
check that exactly tests this condition:
if ((st != Z_STREAM_END) || stream.total_out != size) {
free(buffer);
return NULL;
}
So here is a patch (without my previous botched attempts) to fix this
issue. The first hunk reverts the corresponding hunk from b3118bd, and
the second hunk is the same fix proposed earlier.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-10-22 14:06:14 +08:00
|
|
|
if (!stream.avail_out)
|
|
|
|
break; /* the payload is larger than it should be */
|
2007-02-27 03:55:56 +08:00
|
|
|
curpos += stream.next_in - in;
|
2006-12-23 15:34:13 +08:00
|
|
|
} while (st == Z_OK || st == Z_BUF_ERROR);
|
2009-01-08 11:54:47 +08:00
|
|
|
git_inflate_end(&stream);
|
2006-08-26 16:10:43 +08:00
|
|
|
if ((st != Z_STREAM_END) || stream.total_out != size) {
|
|
|
|
free(buffer);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return buffer;
|
|
|
|
}
|
|
|
|
|
2007-03-18 03:44:06 +08:00
|
|
|
#define MAX_DELTA_CACHE (256)
|
|
|
|
|
2007-03-19 13:14:37 +08:00
|
|
|
static size_t delta_base_cached;
|
2007-03-20 04:31:04 +08:00
|
|
|
|
|
|
|
static struct delta_base_cache_lru_list {
|
|
|
|
struct delta_base_cache_lru_list *prev;
|
|
|
|
struct delta_base_cache_lru_list *next;
|
|
|
|
} delta_base_cache_lru = { &delta_base_cache_lru, &delta_base_cache_lru };
|
|
|
|
|
2007-03-18 03:44:06 +08:00
|
|
|
static struct delta_base_cache_entry {
|
2007-03-20 04:31:04 +08:00
|
|
|
struct delta_base_cache_lru_list lru;
|
|
|
|
void *data;
|
2007-03-18 03:44:06 +08:00
|
|
|
struct packed_git *p;
|
|
|
|
off_t base_offset;
|
|
|
|
unsigned long size;
|
|
|
|
enum object_type type;
|
|
|
|
} delta_base_cache[MAX_DELTA_CACHE];
|
|
|
|
|
|
|
|
static unsigned long pack_entry_hash(struct packed_git *p, off_t base_offset)
|
|
|
|
{
|
|
|
|
unsigned long hash;
|
|
|
|
|
|
|
|
hash = (unsigned long)p + (unsigned long)base_offset;
|
|
|
|
hash += (hash >> 8) + (hash >> 16);
|
2007-03-20 04:28:51 +08:00
|
|
|
return hash % MAX_DELTA_CACHE;
|
2007-03-18 03:44:06 +08:00
|
|
|
}
|
|
|
|
|
2013-03-28 04:03:41 +08:00
|
|
|
static struct delta_base_cache_entry *
|
|
|
|
get_delta_base_cache_entry(struct packed_git *p, off_t base_offset)
|
2011-05-14 04:20:43 +08:00
|
|
|
{
|
|
|
|
unsigned long hash = pack_entry_hash(p, base_offset);
|
2013-03-28 04:03:41 +08:00
|
|
|
return delta_base_cache + hash;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int eq_delta_base_cache_entry(struct delta_base_cache_entry *ent,
|
|
|
|
struct packed_git *p, off_t base_offset)
|
|
|
|
{
|
2011-05-14 04:20:43 +08:00
|
|
|
return (ent->data && ent->p == p && ent->base_offset == base_offset);
|
|
|
|
}
|
|
|
|
|
2013-03-28 04:03:41 +08:00
|
|
|
static int in_delta_base_cache(struct packed_git *p, off_t base_offset)
|
|
|
|
{
|
|
|
|
struct delta_base_cache_entry *ent;
|
|
|
|
ent = get_delta_base_cache_entry(p, base_offset);
|
|
|
|
return eq_delta_base_cache_entry(ent, p, base_offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void clear_delta_base_cache_entry(struct delta_base_cache_entry *ent)
|
|
|
|
{
|
|
|
|
ent->data = NULL;
|
|
|
|
ent->lru.next->prev = ent->lru.prev;
|
|
|
|
ent->lru.prev->next = ent->lru.next;
|
|
|
|
delta_base_cached -= ent->size;
|
|
|
|
}
|
|
|
|
|
2007-03-18 03:42:15 +08:00
|
|
|
static void *cache_or_unpack_entry(struct packed_git *p, off_t base_offset,
|
2007-03-18 09:13:57 +08:00
|
|
|
unsigned long *base_size, enum object_type *type, int keep_cache)
|
2007-03-18 03:42:15 +08:00
|
|
|
{
|
2013-03-28 04:03:41 +08:00
|
|
|
struct delta_base_cache_entry *ent;
|
2007-03-18 03:44:06 +08:00
|
|
|
void *ret;
|
|
|
|
|
2013-03-28 04:03:41 +08:00
|
|
|
ent = get_delta_base_cache_entry(p, base_offset);
|
|
|
|
|
|
|
|
if (!eq_delta_base_cache_entry(ent, p, base_offset))
|
2008-10-09 08:11:24 +08:00
|
|
|
return unpack_entry(p, base_offset, type, base_size);
|
2007-03-18 03:44:06 +08:00
|
|
|
|
2013-03-28 04:03:41 +08:00
|
|
|
ret = ent->data;
|
|
|
|
|
|
|
|
if (!keep_cache)
|
|
|
|
clear_delta_base_cache_entry(ent);
|
|
|
|
else
|
2007-09-16 06:32:36 +08:00
|
|
|
ret = xmemdupz(ent->data, ent->size);
|
2007-03-18 03:44:06 +08:00
|
|
|
*type = ent->type;
|
|
|
|
*base_size = ent->size;
|
|
|
|
return ret;
|
2007-03-18 03:42:15 +08:00
|
|
|
}
|
|
|
|
|
2007-03-19 13:14:37 +08:00
|
|
|
static inline void release_delta_base_cache(struct delta_base_cache_entry *ent)
|
|
|
|
{
|
|
|
|
if (ent->data) {
|
|
|
|
free(ent->data);
|
|
|
|
ent->data = NULL;
|
2007-03-20 04:31:04 +08:00
|
|
|
ent->lru.next->prev = ent->lru.prev;
|
|
|
|
ent->lru.prev->next = ent->lru.next;
|
2007-03-19 13:14:37 +08:00
|
|
|
delta_base_cached -= ent->size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-02-11 05:36:12 +08:00
|
|
|
void clear_delta_base_cache(void)
|
|
|
|
{
|
|
|
|
unsigned long p;
|
|
|
|
for (p = 0; p < MAX_DELTA_CACHE; p++)
|
|
|
|
release_delta_base_cache(&delta_base_cache[p]);
|
|
|
|
}
|
|
|
|
|
2007-03-18 03:42:15 +08:00
|
|
|
static void add_delta_base_cache(struct packed_git *p, off_t base_offset,
|
|
|
|
void *base, unsigned long base_size, enum object_type type)
|
|
|
|
{
|
2007-03-20 04:31:04 +08:00
|
|
|
unsigned long hash = pack_entry_hash(p, base_offset);
|
2007-03-18 03:44:06 +08:00
|
|
|
struct delta_base_cache_entry *ent = delta_base_cache + hash;
|
2007-03-20 04:31:04 +08:00
|
|
|
struct delta_base_cache_lru_list *lru;
|
2007-03-18 03:44:06 +08:00
|
|
|
|
2007-03-19 13:14:37 +08:00
|
|
|
release_delta_base_cache(ent);
|
|
|
|
delta_base_cached += base_size;
|
2007-03-20 04:31:04 +08:00
|
|
|
|
|
|
|
for (lru = delta_base_cache_lru.next;
|
|
|
|
delta_base_cached > delta_base_cache_limit
|
|
|
|
&& lru != &delta_base_cache_lru;
|
|
|
|
lru = lru->next) {
|
|
|
|
struct delta_base_cache_entry *f = (void *)lru;
|
2007-03-19 13:14:37 +08:00
|
|
|
if (f->type == OBJ_BLOB)
|
|
|
|
release_delta_base_cache(f);
|
|
|
|
}
|
2007-03-20 04:31:04 +08:00
|
|
|
for (lru = delta_base_cache_lru.next;
|
|
|
|
delta_base_cached > delta_base_cache_limit
|
|
|
|
&& lru != &delta_base_cache_lru;
|
|
|
|
lru = lru->next) {
|
|
|
|
struct delta_base_cache_entry *f = (void *)lru;
|
|
|
|
release_delta_base_cache(f);
|
|
|
|
}
|
2007-03-19 13:14:37 +08:00
|
|
|
|
2007-03-18 03:44:06 +08:00
|
|
|
ent->p = p;
|
|
|
|
ent->base_offset = base_offset;
|
|
|
|
ent->type = type;
|
|
|
|
ent->data = base;
|
|
|
|
ent->size = base_size;
|
2007-03-20 04:31:04 +08:00
|
|
|
ent->lru.next = &delta_base_cache_lru;
|
|
|
|
ent->lru.prev = delta_base_cache_lru.prev;
|
|
|
|
delta_base_cache_lru.prev->next = &ent->lru;
|
|
|
|
delta_base_cache_lru.prev = &ent->lru;
|
2007-03-18 03:42:15 +08:00
|
|
|
}
|
|
|
|
|
2009-01-13 01:42:24 +08:00
|
|
|
static void *read_object(const unsigned char *sha1, enum object_type *type,
|
|
|
|
unsigned long *size);
|
|
|
|
|
2011-07-07 10:08:55 +08:00
|
|
|
static void write_pack_access_log(struct packed_git *p, off_t obj_offset)
|
|
|
|
{
|
|
|
|
static FILE *log_file;
|
|
|
|
|
|
|
|
if (!log_file) {
|
|
|
|
log_file = fopen(log_pack_access, "w");
|
|
|
|
if (!log_file) {
|
|
|
|
error("cannot open pack access log '%s' for writing: %s",
|
|
|
|
log_pack_access, strerror(errno));
|
|
|
|
log_pack_access = NULL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fprintf(log_file, "%s %"PRIuMAX"\n",
|
|
|
|
p->pack_name, (uintmax_t)obj_offset);
|
|
|
|
fflush(log_file);
|
|
|
|
}
|
|
|
|
|
close another possibility for propagating pack corruption
Abstract
--------
With index v2 we have a per object CRC to allow quick and safe reuse of
pack data when repacking. This, however, doesn't currently prevent a
stealth corruption from being propagated into a new pack when _not_
reusing pack data as demonstrated by the modification to t5302 included
here.
The Context
-----------
The Git database is all checksummed with SHA1 hashes. Any kind of
corruption can be confirmed by verifying this per object hash against
corresponding data. However this can be costly to perform systematically
and therefore this check is often not performed at run time when
accessing the object database.
First, the loose object format is entirely compressed with zlib which
already provide a CRC verification of its own when inflating data. Any
disk corruption would be caught already in this case.
Then, packed objects are also compressed with zlib but only for their
actual payload. The object headers and delta base references are not
deflated for obvious performance reasons, however this leave them
vulnerable to potentially undetected disk corruptions. Object types
are often validated against the expected type when they're requested,
and deflated size must always match the size recorded in the object header,
so those cases are pretty much covered as well.
Where corruptions could go unnoticed is in the delta base reference.
Of course, in the OBJ_REF_DELTA case, the odds for a SHA1 reference to
get corrupted so it actually matches the SHA1 of another object with the
same size (the delta header stores the expected size of the base object
to apply against) are virtually zero. In the OBJ_OFS_DELTA case, the
reference is a pack offset which would have to match the start boundary
of a different base object but still with the same size, and although this
is relatively much more "probable" than in the OBJ_REF_DELTA case, the
probability is also about zero in absolute terms. Still, the possibility
exists as demonstrated in t5302 and is certainly greater than a SHA1
collision, especially in the OBJ_OFS_DELTA case which is now the default
when repacking.
Again, repacking by reusing existing pack data is OK since the per object
CRC provided by index v2 guards against any such corruptions. What t5302
failed to test is a full repack in such case.
The Solution
------------
As unlikely as this kind of stealth corruption can be in practice, it
certainly isn't acceptable to propagate it into a freshly created pack.
But, because this is so unlikely, we don't want to pay the run time cost
associated with extra validation checks all the time either. Furthermore,
consequences of such corruption in anything but repacking should be rather
visible, and even if it could be quite unpleasant, it still has far less
severe consequences than actively creating bad packs.
So the best compromize is to check packed object CRC when unpacking
objects, and only during the compression/writing phase of a repack, and
only when not streaming the result. The cost of this is minimal (less
than 1% CPU time), and visible only with a full repack.
Someone with a stats background could provide an objective evaluation of
this, but I suspect that it's bad RAM that has more potential for data
corruptions at this point, even in those cases where this extra check
is not performed. Still, it is best to prevent a known hole for
corruption when recreating object data into a new pack.
What about the streamed pack case? Well, any client receiving a pack
must always consider that pack as untrusty and perform full validation
anyway, hence no such stealth corruption could be propagated to remote
repositoryes already. It is therefore worthless doing local validation
in that case.
Signed-off-by: Nicolas Pitre <nico@cam.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2008-10-31 23:31:08 +08:00
|
|
|
int do_check_packed_object_crc;
|
|
|
|
|
2013-03-28 04:03:42 +08:00
|
|
|
#define UNPACK_ENTRY_STACK_PREALLOC 64
|
|
|
|
struct unpack_entry_stack_ent {
|
|
|
|
off_t obj_offset;
|
|
|
|
off_t curpos;
|
|
|
|
unsigned long size;
|
|
|
|
};
|
|
|
|
|
2007-03-07 09:44:30 +08:00
|
|
|
void *unpack_entry(struct packed_git *p, off_t obj_offset,
|
2013-03-28 04:03:42 +08:00
|
|
|
enum object_type *final_type, unsigned long *final_size)
|
2005-06-27 18:35:33 +08:00
|
|
|
{
|
2006-12-23 15:34:08 +08:00
|
|
|
struct pack_window *w_curs = NULL;
|
2007-03-07 09:44:30 +08:00
|
|
|
off_t curpos = obj_offset;
|
2013-03-28 04:03:42 +08:00
|
|
|
void *data = NULL;
|
|
|
|
unsigned long size;
|
|
|
|
enum object_type type;
|
|
|
|
struct unpack_entry_stack_ent small_delta_stack[UNPACK_ENTRY_STACK_PREALLOC];
|
|
|
|
struct unpack_entry_stack_ent *delta_stack = small_delta_stack;
|
|
|
|
int delta_stack_nr = 0, delta_stack_alloc = UNPACK_ENTRY_STACK_PREALLOC;
|
|
|
|
int base_from_cache = 0;
|
2005-06-27 18:35:33 +08:00
|
|
|
|
2011-07-07 10:08:55 +08:00
|
|
|
if (log_pack_access)
|
|
|
|
write_pack_access_log(p, obj_offset);
|
|
|
|
|
2013-03-28 04:03:42 +08:00
|
|
|
/* PHASE 1: drill down to the innermost base object */
|
|
|
|
for (;;) {
|
|
|
|
off_t base_offset;
|
|
|
|
int i;
|
|
|
|
struct delta_base_cache_entry *ent;
|
|
|
|
|
|
|
|
if (do_check_packed_object_crc && p->index_version > 1) {
|
|
|
|
struct revindex_entry *revidx = find_pack_revindex(p, obj_offset);
|
|
|
|
unsigned long len = revidx[1].offset - obj_offset;
|
|
|
|
if (check_pack_crc(p, &w_curs, obj_offset, len, revidx->nr)) {
|
|
|
|
const unsigned char *sha1 =
|
|
|
|
nth_packed_object_sha1(p, revidx->nr);
|
|
|
|
error("bad packed object CRC for %s",
|
|
|
|
sha1_to_hex(sha1));
|
|
|
|
mark_bad_packed_object(p, sha1);
|
|
|
|
unuse_pack(&w_curs);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ent = get_delta_base_cache_entry(p, curpos);
|
|
|
|
if (eq_delta_base_cache_entry(ent, p, curpos)) {
|
|
|
|
type = ent->type;
|
|
|
|
data = ent->data;
|
|
|
|
size = ent->size;
|
|
|
|
clear_delta_base_cache_entry(ent);
|
|
|
|
base_from_cache = 1;
|
|
|
|
break;
|
close another possibility for propagating pack corruption
Abstract
--------
With index v2 we have a per object CRC to allow quick and safe reuse of
pack data when repacking. This, however, doesn't currently prevent a
stealth corruption from being propagated into a new pack when _not_
reusing pack data as demonstrated by the modification to t5302 included
here.
The Context
-----------
The Git database is all checksummed with SHA1 hashes. Any kind of
corruption can be confirmed by verifying this per object hash against
corresponding data. However this can be costly to perform systematically
and therefore this check is often not performed at run time when
accessing the object database.
First, the loose object format is entirely compressed with zlib which
already provide a CRC verification of its own when inflating data. Any
disk corruption would be caught already in this case.
Then, packed objects are also compressed with zlib but only for their
actual payload. The object headers and delta base references are not
deflated for obvious performance reasons, however this leave them
vulnerable to potentially undetected disk corruptions. Object types
are often validated against the expected type when they're requested,
and deflated size must always match the size recorded in the object header,
so those cases are pretty much covered as well.
Where corruptions could go unnoticed is in the delta base reference.
Of course, in the OBJ_REF_DELTA case, the odds for a SHA1 reference to
get corrupted so it actually matches the SHA1 of another object with the
same size (the delta header stores the expected size of the base object
to apply against) are virtually zero. In the OBJ_OFS_DELTA case, the
reference is a pack offset which would have to match the start boundary
of a different base object but still with the same size, and although this
is relatively much more "probable" than in the OBJ_REF_DELTA case, the
probability is also about zero in absolute terms. Still, the possibility
exists as demonstrated in t5302 and is certainly greater than a SHA1
collision, especially in the OBJ_OFS_DELTA case which is now the default
when repacking.
Again, repacking by reusing existing pack data is OK since the per object
CRC provided by index v2 guards against any such corruptions. What t5302
failed to test is a full repack in such case.
The Solution
------------
As unlikely as this kind of stealth corruption can be in practice, it
certainly isn't acceptable to propagate it into a freshly created pack.
But, because this is so unlikely, we don't want to pay the run time cost
associated with extra validation checks all the time either. Furthermore,
consequences of such corruption in anything but repacking should be rather
visible, and even if it could be quite unpleasant, it still has far less
severe consequences than actively creating bad packs.
So the best compromize is to check packed object CRC when unpacking
objects, and only during the compression/writing phase of a repack, and
only when not streaming the result. The cost of this is minimal (less
than 1% CPU time), and visible only with a full repack.
Someone with a stats background could provide an objective evaluation of
this, but I suspect that it's bad RAM that has more potential for data
corruptions at this point, even in those cases where this extra check
is not performed. Still, it is best to prevent a known hole for
corruption when recreating object data into a new pack.
What about the streamed pack case? Well, any client receiving a pack
must always consider that pack as untrusty and perform full validation
anyway, hence no such stealth corruption could be propagated to remote
repositoryes already. It is therefore worthless doing local validation
in that case.
Signed-off-by: Nicolas Pitre <nico@cam.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2008-10-31 23:31:08 +08:00
|
|
|
}
|
2013-03-28 04:03:42 +08:00
|
|
|
|
|
|
|
type = unpack_object_header(p, &w_curs, &curpos, &size);
|
|
|
|
if (type != OBJ_OFS_DELTA && type != OBJ_REF_DELTA)
|
|
|
|
break;
|
|
|
|
|
|
|
|
base_offset = get_delta_base(p, &w_curs, &curpos, type, obj_offset);
|
|
|
|
if (!base_offset) {
|
|
|
|
error("failed to validate delta base reference "
|
|
|
|
"at offset %"PRIuMAX" from %s",
|
|
|
|
(uintmax_t)curpos, p->pack_name);
|
|
|
|
/* bail to phase 2, in hopes of recovery */
|
|
|
|
data = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* push object, proceed to base */
|
|
|
|
if (delta_stack_nr >= delta_stack_alloc
|
|
|
|
&& delta_stack == small_delta_stack) {
|
|
|
|
delta_stack_alloc = alloc_nr(delta_stack_nr);
|
|
|
|
delta_stack = xmalloc(sizeof(*delta_stack)*delta_stack_alloc);
|
|
|
|
memcpy(delta_stack, small_delta_stack,
|
|
|
|
sizeof(*delta_stack)*delta_stack_nr);
|
|
|
|
} else {
|
|
|
|
ALLOC_GROW(delta_stack, delta_stack_nr+1, delta_stack_alloc);
|
|
|
|
}
|
|
|
|
i = delta_stack_nr++;
|
|
|
|
delta_stack[i].obj_offset = obj_offset;
|
|
|
|
delta_stack[i].curpos = curpos;
|
|
|
|
delta_stack[i].size = size;
|
|
|
|
|
|
|
|
curpos = obj_offset = base_offset;
|
close another possibility for propagating pack corruption
Abstract
--------
With index v2 we have a per object CRC to allow quick and safe reuse of
pack data when repacking. This, however, doesn't currently prevent a
stealth corruption from being propagated into a new pack when _not_
reusing pack data as demonstrated by the modification to t5302 included
here.
The Context
-----------
The Git database is all checksummed with SHA1 hashes. Any kind of
corruption can be confirmed by verifying this per object hash against
corresponding data. However this can be costly to perform systematically
and therefore this check is often not performed at run time when
accessing the object database.
First, the loose object format is entirely compressed with zlib which
already provide a CRC verification of its own when inflating data. Any
disk corruption would be caught already in this case.
Then, packed objects are also compressed with zlib but only for their
actual payload. The object headers and delta base references are not
deflated for obvious performance reasons, however this leave them
vulnerable to potentially undetected disk corruptions. Object types
are often validated against the expected type when they're requested,
and deflated size must always match the size recorded in the object header,
so those cases are pretty much covered as well.
Where corruptions could go unnoticed is in the delta base reference.
Of course, in the OBJ_REF_DELTA case, the odds for a SHA1 reference to
get corrupted so it actually matches the SHA1 of another object with the
same size (the delta header stores the expected size of the base object
to apply against) are virtually zero. In the OBJ_OFS_DELTA case, the
reference is a pack offset which would have to match the start boundary
of a different base object but still with the same size, and although this
is relatively much more "probable" than in the OBJ_REF_DELTA case, the
probability is also about zero in absolute terms. Still, the possibility
exists as demonstrated in t5302 and is certainly greater than a SHA1
collision, especially in the OBJ_OFS_DELTA case which is now the default
when repacking.
Again, repacking by reusing existing pack data is OK since the per object
CRC provided by index v2 guards against any such corruptions. What t5302
failed to test is a full repack in such case.
The Solution
------------
As unlikely as this kind of stealth corruption can be in practice, it
certainly isn't acceptable to propagate it into a freshly created pack.
But, because this is so unlikely, we don't want to pay the run time cost
associated with extra validation checks all the time either. Furthermore,
consequences of such corruption in anything but repacking should be rather
visible, and even if it could be quite unpleasant, it still has far less
severe consequences than actively creating bad packs.
So the best compromize is to check packed object CRC when unpacking
objects, and only during the compression/writing phase of a repack, and
only when not streaming the result. The cost of this is minimal (less
than 1% CPU time), and visible only with a full repack.
Someone with a stats background could provide an objective evaluation of
this, but I suspect that it's bad RAM that has more potential for data
corruptions at this point, even in those cases where this extra check
is not performed. Still, it is best to prevent a known hole for
corruption when recreating object data into a new pack.
What about the streamed pack case? Well, any client receiving a pack
must always consider that pack as untrusty and perform full validation
anyway, hence no such stealth corruption could be propagated to remote
repositoryes already. It is therefore worthless doing local validation
in that case.
Signed-off-by: Nicolas Pitre <nico@cam.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2008-10-31 23:31:08 +08:00
|
|
|
}
|
|
|
|
|
2013-03-28 04:03:42 +08:00
|
|
|
/* PHASE 2: handle the base */
|
|
|
|
switch (type) {
|
2006-09-21 12:06:49 +08:00
|
|
|
case OBJ_OFS_DELTA:
|
|
|
|
case OBJ_REF_DELTA:
|
2013-03-28 04:03:42 +08:00
|
|
|
if (data)
|
|
|
|
die("BUG in unpack_entry: left loop at a valid delta");
|
2006-12-23 15:33:25 +08:00
|
|
|
break;
|
2005-06-29 05:21:02 +08:00
|
|
|
case OBJ_COMMIT:
|
|
|
|
case OBJ_TREE:
|
|
|
|
case OBJ_BLOB:
|
|
|
|
case OBJ_TAG:
|
2013-03-28 04:03:42 +08:00
|
|
|
if (!base_from_cache)
|
|
|
|
data = unpack_compressed_entry(p, &w_curs, curpos, size);
|
2006-12-23 15:33:25 +08:00
|
|
|
break;
|
2005-06-27 18:35:33 +08:00
|
|
|
default:
|
2008-06-24 09:23:39 +08:00
|
|
|
data = NULL;
|
|
|
|
error("unknown object type %i at offset %"PRIuMAX" in %s",
|
2013-03-28 04:03:42 +08:00
|
|
|
type, (uintmax_t)obj_offset, p->pack_name);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* PHASE 3: apply deltas in order */
|
|
|
|
|
|
|
|
/* invariants:
|
|
|
|
* 'data' holds the base data, or NULL if there was corruption
|
|
|
|
*/
|
|
|
|
while (delta_stack_nr) {
|
|
|
|
void *delta_data;
|
|
|
|
void *base = data;
|
|
|
|
unsigned long delta_size, base_size = size;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
data = NULL;
|
|
|
|
|
|
|
|
if (base)
|
|
|
|
add_delta_base_cache(p, obj_offset, base, base_size, type);
|
|
|
|
|
|
|
|
if (!base) {
|
|
|
|
/*
|
|
|
|
* We're probably in deep shit, but let's try to fetch
|
|
|
|
* the required base anyway from another pack or loose.
|
|
|
|
* This is costly but should happen only in the presence
|
|
|
|
* of a corrupted pack, and is better than failing outright.
|
|
|
|
*/
|
|
|
|
struct revindex_entry *revidx;
|
|
|
|
const unsigned char *base_sha1;
|
|
|
|
revidx = find_pack_revindex(p, obj_offset);
|
|
|
|
if (revidx) {
|
|
|
|
base_sha1 = nth_packed_object_sha1(p, revidx->nr);
|
|
|
|
error("failed to read delta base object %s"
|
|
|
|
" at offset %"PRIuMAX" from %s",
|
|
|
|
sha1_to_hex(base_sha1), (uintmax_t)obj_offset,
|
|
|
|
p->pack_name);
|
|
|
|
mark_bad_packed_object(p, base_sha1);
|
|
|
|
base = read_object(base_sha1, &type, &base_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
i = --delta_stack_nr;
|
|
|
|
obj_offset = delta_stack[i].obj_offset;
|
|
|
|
curpos = delta_stack[i].curpos;
|
|
|
|
delta_size = delta_stack[i].size;
|
|
|
|
|
|
|
|
if (!base)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
delta_data = unpack_compressed_entry(p, &w_curs, curpos, delta_size);
|
|
|
|
|
|
|
|
if (!delta_data) {
|
|
|
|
error("failed to unpack compressed delta "
|
|
|
|
"at offset %"PRIuMAX" from %s",
|
|
|
|
(uintmax_t)curpos, p->pack_name);
|
|
|
|
data = NULL;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
data = patch_delta(base, base_size,
|
|
|
|
delta_data, delta_size,
|
|
|
|
&size);
|
|
|
|
if (!data)
|
|
|
|
die("failed to apply delta");
|
|
|
|
|
2013-05-30 21:56:21 +08:00
|
|
|
free(delta_data);
|
2005-06-27 18:35:33 +08:00
|
|
|
}
|
2013-03-28 04:03:42 +08:00
|
|
|
|
|
|
|
*final_type = type;
|
|
|
|
*final_size = size;
|
|
|
|
|
2006-12-23 15:34:08 +08:00
|
|
|
unuse_pack(&w_curs);
|
2007-02-27 03:55:59 +08:00
|
|
|
return data;
|
2005-06-27 18:35:33 +08:00
|
|
|
}
|
|
|
|
|
2007-05-26 13:24:19 +08:00
|
|
|
const unsigned char *nth_packed_object_sha1(struct packed_git *p,
|
2007-04-05 04:49:04 +08:00
|
|
|
uint32_t n)
|
2005-06-29 05:56:57 +08:00
|
|
|
{
|
2007-03-17 04:42:50 +08:00
|
|
|
const unsigned char *index = p->index_data;
|
2007-05-26 13:24:19 +08:00
|
|
|
if (!index) {
|
|
|
|
if (open_pack_index(p))
|
|
|
|
return NULL;
|
|
|
|
index = p->index_data;
|
|
|
|
}
|
2007-04-09 13:06:28 +08:00
|
|
|
if (n >= p->num_objects)
|
2007-04-05 04:49:04 +08:00
|
|
|
return NULL;
|
2007-04-09 13:06:35 +08:00
|
|
|
index += 4 * 256;
|
|
|
|
if (p->index_version == 1) {
|
|
|
|
return index + 24 * n + 4;
|
|
|
|
} else {
|
|
|
|
index += 8;
|
|
|
|
return index + 20 * n;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-06-25 11:17:12 +08:00
|
|
|
off_t nth_packed_object_offset(const struct packed_git *p, uint32_t n)
|
2007-04-09 13:06:35 +08:00
|
|
|
{
|
|
|
|
const unsigned char *index = p->index_data;
|
|
|
|
index += 4 * 256;
|
|
|
|
if (p->index_version == 1) {
|
|
|
|
return ntohl(*((uint32_t *)(index + 24 * n)));
|
|
|
|
} else {
|
|
|
|
uint32_t off;
|
|
|
|
index += 8 + p->num_objects * (20 + 4);
|
|
|
|
off = ntohl(*((uint32_t *)(index + 4 * n)));
|
|
|
|
if (!(off & 0x80000000))
|
|
|
|
return off;
|
|
|
|
index += p->num_objects * 4 + (off & 0x7fffffff) * 8;
|
|
|
|
return (((uint64_t)ntohl(*((uint32_t *)(index + 0)))) << 32) |
|
|
|
|
ntohl(*((uint32_t *)(index + 4)));
|
|
|
|
}
|
2005-06-29 05:56:57 +08:00
|
|
|
}
|
|
|
|
|
2007-03-07 09:44:30 +08:00
|
|
|
off_t find_pack_entry_one(const unsigned char *sha1,
|
2006-09-21 12:05:37 +08:00
|
|
|
struct packed_git *p)
|
2005-06-27 18:35:33 +08:00
|
|
|
{
|
2007-03-17 04:42:50 +08:00
|
|
|
const uint32_t *level1_ofs = p->index_data;
|
|
|
|
const unsigned char *index = p->index_data;
|
sha1-lookup: more memory efficient search in sorted list of SHA-1
Currently, when looking for a packed object from the pack idx, a
simple binary search is used.
A conventional binary search loop looks like this:
unsigned lo, hi;
do {
unsigned mi = (lo + hi) / 2;
int cmp = "entry pointed at by mi" minus "target";
if (!cmp)
return mi; "mi is the wanted one"
if (cmp > 0)
hi = mi; "mi is larger than target"
else
lo = mi+1; "mi is smaller than target"
} while (lo < hi);
"did not find what we wanted"
The invariants are:
- When entering the loop, 'lo' points at a slot that is never
above the target (it could be at the target), 'hi' points at
a slot that is guaranteed to be above the target (it can
never be at the target).
- We find a point 'mi' between 'lo' and 'hi' ('mi' could be
the same as 'lo', but never can be as high as 'hi'), and
check if 'mi' hits the target. There are three cases:
- if it is a hit, we have found what we are looking for;
- if it is strictly higher than the target, we set it to
'hi', and repeat the search.
- if it is strictly lower than the target, we update 'lo'
to one slot after it, because we allow 'lo' to be at the
target and 'mi' is known to be below the target.
If the loop exits, there is no matching entry.
When choosing 'mi', we do not have to take the "middle" but
anywhere in between 'lo' and 'hi', as long as lo <= mi < hi is
satisfied. When we somehow know that the distance between the
target and 'lo' is much shorter than the target and 'hi', we
could pick 'mi' that is much closer to 'lo' than (hi+lo)/2,
which a conventional binary search would pick.
This patch takes advantage of the fact that the SHA-1 is a good
hash function, and as long as there are enough entries in the
table, we can expect uniform distribution. An entry that begins
with for example "deadbeef..." is much likely to appear much
later than in the midway of a reasonably populated table. In
fact, it can be expected to be near 87% (222/256) from the top
of the table.
This is a work-in-progress and has switches to allow easier
experiments and debugging. Exporting GIT_USE_LOOKUP environment
variable enables this code.
On my admittedly memory starved machine, with a partial KDE
repository (3.0G pack with 95M idx):
$ GIT_USE_LOOKUP=t git log -800 --stat HEAD >/dev/null
3.93user 0.16system 0:04.09elapsed 100%CPU (0avgtext+0avgdata 0maxresident)k
0inputs+0outputs (0major+55588minor)pagefaults 0swaps
Without the patch, the numbers are:
$ git log -800 --stat HEAD >/dev/null
4.00user 0.15system 0:04.17elapsed 99%CPU (0avgtext+0avgdata 0maxresident)k
0inputs+0outputs (0major+60258minor)pagefaults 0swaps
In the same repository:
$ GIT_USE_LOOKUP=t git log -2000 HEAD >/dev/null
0.12user 0.00system 0:00.12elapsed 97%CPU (0avgtext+0avgdata 0maxresident)k
0inputs+0outputs (0major+4241minor)pagefaults 0swaps
Without the patch, the numbers are:
$ git log -2000 HEAD >/dev/null
0.05user 0.01system 0:00.07elapsed 100%CPU (0avgtext+0avgdata 0maxresident)k
0inputs+0outputs (0major+8506minor)pagefaults 0swaps
There isn't much time difference, but the number of minor faults
seems to show that we are touching much smaller number of pages,
which is expected.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-12-29 18:05:47 +08:00
|
|
|
unsigned hi, lo, stride;
|
|
|
|
static int use_lookup = -1;
|
|
|
|
static int debug_lookup = -1;
|
|
|
|
|
|
|
|
if (debug_lookup < 0)
|
|
|
|
debug_lookup = !!getenv("GIT_DEBUG_LOOKUP");
|
2007-03-17 04:42:50 +08:00
|
|
|
|
2007-05-26 13:24:19 +08:00
|
|
|
if (!index) {
|
|
|
|
if (open_pack_index(p))
|
|
|
|
return 0;
|
|
|
|
level1_ofs = p->index_data;
|
|
|
|
index = p->index_data;
|
|
|
|
}
|
2007-04-09 13:06:35 +08:00
|
|
|
if (p->index_version > 1) {
|
|
|
|
level1_ofs += 2;
|
|
|
|
index += 8;
|
|
|
|
}
|
2007-03-17 04:42:50 +08:00
|
|
|
index += 4 * 256;
|
2007-04-09 13:06:35 +08:00
|
|
|
hi = ntohl(level1_ofs[*sha1]);
|
|
|
|
lo = ((*sha1 == 0x0) ? 0 : ntohl(level1_ofs[*sha1 - 1]));
|
sha1-lookup: more memory efficient search in sorted list of SHA-1
Currently, when looking for a packed object from the pack idx, a
simple binary search is used.
A conventional binary search loop looks like this:
unsigned lo, hi;
do {
unsigned mi = (lo + hi) / 2;
int cmp = "entry pointed at by mi" minus "target";
if (!cmp)
return mi; "mi is the wanted one"
if (cmp > 0)
hi = mi; "mi is larger than target"
else
lo = mi+1; "mi is smaller than target"
} while (lo < hi);
"did not find what we wanted"
The invariants are:
- When entering the loop, 'lo' points at a slot that is never
above the target (it could be at the target), 'hi' points at
a slot that is guaranteed to be above the target (it can
never be at the target).
- We find a point 'mi' between 'lo' and 'hi' ('mi' could be
the same as 'lo', but never can be as high as 'hi'), and
check if 'mi' hits the target. There are three cases:
- if it is a hit, we have found what we are looking for;
- if it is strictly higher than the target, we set it to
'hi', and repeat the search.
- if it is strictly lower than the target, we update 'lo'
to one slot after it, because we allow 'lo' to be at the
target and 'mi' is known to be below the target.
If the loop exits, there is no matching entry.
When choosing 'mi', we do not have to take the "middle" but
anywhere in between 'lo' and 'hi', as long as lo <= mi < hi is
satisfied. When we somehow know that the distance between the
target and 'lo' is much shorter than the target and 'hi', we
could pick 'mi' that is much closer to 'lo' than (hi+lo)/2,
which a conventional binary search would pick.
This patch takes advantage of the fact that the SHA-1 is a good
hash function, and as long as there are enough entries in the
table, we can expect uniform distribution. An entry that begins
with for example "deadbeef..." is much likely to appear much
later than in the midway of a reasonably populated table. In
fact, it can be expected to be near 87% (222/256) from the top
of the table.
This is a work-in-progress and has switches to allow easier
experiments and debugging. Exporting GIT_USE_LOOKUP environment
variable enables this code.
On my admittedly memory starved machine, with a partial KDE
repository (3.0G pack with 95M idx):
$ GIT_USE_LOOKUP=t git log -800 --stat HEAD >/dev/null
3.93user 0.16system 0:04.09elapsed 100%CPU (0avgtext+0avgdata 0maxresident)k
0inputs+0outputs (0major+55588minor)pagefaults 0swaps
Without the patch, the numbers are:
$ git log -800 --stat HEAD >/dev/null
4.00user 0.15system 0:04.17elapsed 99%CPU (0avgtext+0avgdata 0maxresident)k
0inputs+0outputs (0major+60258minor)pagefaults 0swaps
In the same repository:
$ GIT_USE_LOOKUP=t git log -2000 HEAD >/dev/null
0.12user 0.00system 0:00.12elapsed 97%CPU (0avgtext+0avgdata 0maxresident)k
0inputs+0outputs (0major+4241minor)pagefaults 0swaps
Without the patch, the numbers are:
$ git log -2000 HEAD >/dev/null
0.05user 0.01system 0:00.07elapsed 100%CPU (0avgtext+0avgdata 0maxresident)k
0inputs+0outputs (0major+8506minor)pagefaults 0swaps
There isn't much time difference, but the number of minor faults
seems to show that we are touching much smaller number of pages,
which is expected.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-12-29 18:05:47 +08:00
|
|
|
if (p->index_version > 1) {
|
|
|
|
stride = 20;
|
|
|
|
} else {
|
|
|
|
stride = 24;
|
|
|
|
index += 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (debug_lookup)
|
2008-07-03 23:52:09 +08:00
|
|
|
printf("%02x%02x%02x... lo %u hi %u nr %"PRIu32"\n",
|
sha1-lookup: more memory efficient search in sorted list of SHA-1
Currently, when looking for a packed object from the pack idx, a
simple binary search is used.
A conventional binary search loop looks like this:
unsigned lo, hi;
do {
unsigned mi = (lo + hi) / 2;
int cmp = "entry pointed at by mi" minus "target";
if (!cmp)
return mi; "mi is the wanted one"
if (cmp > 0)
hi = mi; "mi is larger than target"
else
lo = mi+1; "mi is smaller than target"
} while (lo < hi);
"did not find what we wanted"
The invariants are:
- When entering the loop, 'lo' points at a slot that is never
above the target (it could be at the target), 'hi' points at
a slot that is guaranteed to be above the target (it can
never be at the target).
- We find a point 'mi' between 'lo' and 'hi' ('mi' could be
the same as 'lo', but never can be as high as 'hi'), and
check if 'mi' hits the target. There are three cases:
- if it is a hit, we have found what we are looking for;
- if it is strictly higher than the target, we set it to
'hi', and repeat the search.
- if it is strictly lower than the target, we update 'lo'
to one slot after it, because we allow 'lo' to be at the
target and 'mi' is known to be below the target.
If the loop exits, there is no matching entry.
When choosing 'mi', we do not have to take the "middle" but
anywhere in between 'lo' and 'hi', as long as lo <= mi < hi is
satisfied. When we somehow know that the distance between the
target and 'lo' is much shorter than the target and 'hi', we
could pick 'mi' that is much closer to 'lo' than (hi+lo)/2,
which a conventional binary search would pick.
This patch takes advantage of the fact that the SHA-1 is a good
hash function, and as long as there are enough entries in the
table, we can expect uniform distribution. An entry that begins
with for example "deadbeef..." is much likely to appear much
later than in the midway of a reasonably populated table. In
fact, it can be expected to be near 87% (222/256) from the top
of the table.
This is a work-in-progress and has switches to allow easier
experiments and debugging. Exporting GIT_USE_LOOKUP environment
variable enables this code.
On my admittedly memory starved machine, with a partial KDE
repository (3.0G pack with 95M idx):
$ GIT_USE_LOOKUP=t git log -800 --stat HEAD >/dev/null
3.93user 0.16system 0:04.09elapsed 100%CPU (0avgtext+0avgdata 0maxresident)k
0inputs+0outputs (0major+55588minor)pagefaults 0swaps
Without the patch, the numbers are:
$ git log -800 --stat HEAD >/dev/null
4.00user 0.15system 0:04.17elapsed 99%CPU (0avgtext+0avgdata 0maxresident)k
0inputs+0outputs (0major+60258minor)pagefaults 0swaps
In the same repository:
$ GIT_USE_LOOKUP=t git log -2000 HEAD >/dev/null
0.12user 0.00system 0:00.12elapsed 97%CPU (0avgtext+0avgdata 0maxresident)k
0inputs+0outputs (0major+4241minor)pagefaults 0swaps
Without the patch, the numbers are:
$ git log -2000 HEAD >/dev/null
0.05user 0.01system 0:00.07elapsed 100%CPU (0avgtext+0avgdata 0maxresident)k
0inputs+0outputs (0major+8506minor)pagefaults 0swaps
There isn't much time difference, but the number of minor faults
seems to show that we are touching much smaller number of pages,
which is expected.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-12-29 18:05:47 +08:00
|
|
|
sha1[0], sha1[1], sha1[2], lo, hi, p->num_objects);
|
|
|
|
|
|
|
|
if (use_lookup < 0)
|
|
|
|
use_lookup = !!getenv("GIT_USE_LOOKUP");
|
|
|
|
if (use_lookup) {
|
|
|
|
int pos = sha1_entry_pos(index, stride, 0,
|
|
|
|
lo, hi, p->num_objects, sha1);
|
|
|
|
if (pos < 0)
|
|
|
|
return 0;
|
|
|
|
return nth_packed_object_offset(p, pos);
|
|
|
|
}
|
2005-06-27 18:35:33 +08:00
|
|
|
|
|
|
|
do {
|
2007-04-09 13:06:35 +08:00
|
|
|
unsigned mi = (lo + hi) / 2;
|
sha1-lookup: more memory efficient search in sorted list of SHA-1
Currently, when looking for a packed object from the pack idx, a
simple binary search is used.
A conventional binary search loop looks like this:
unsigned lo, hi;
do {
unsigned mi = (lo + hi) / 2;
int cmp = "entry pointed at by mi" minus "target";
if (!cmp)
return mi; "mi is the wanted one"
if (cmp > 0)
hi = mi; "mi is larger than target"
else
lo = mi+1; "mi is smaller than target"
} while (lo < hi);
"did not find what we wanted"
The invariants are:
- When entering the loop, 'lo' points at a slot that is never
above the target (it could be at the target), 'hi' points at
a slot that is guaranteed to be above the target (it can
never be at the target).
- We find a point 'mi' between 'lo' and 'hi' ('mi' could be
the same as 'lo', but never can be as high as 'hi'), and
check if 'mi' hits the target. There are three cases:
- if it is a hit, we have found what we are looking for;
- if it is strictly higher than the target, we set it to
'hi', and repeat the search.
- if it is strictly lower than the target, we update 'lo'
to one slot after it, because we allow 'lo' to be at the
target and 'mi' is known to be below the target.
If the loop exits, there is no matching entry.
When choosing 'mi', we do not have to take the "middle" but
anywhere in between 'lo' and 'hi', as long as lo <= mi < hi is
satisfied. When we somehow know that the distance between the
target and 'lo' is much shorter than the target and 'hi', we
could pick 'mi' that is much closer to 'lo' than (hi+lo)/2,
which a conventional binary search would pick.
This patch takes advantage of the fact that the SHA-1 is a good
hash function, and as long as there are enough entries in the
table, we can expect uniform distribution. An entry that begins
with for example "deadbeef..." is much likely to appear much
later than in the midway of a reasonably populated table. In
fact, it can be expected to be near 87% (222/256) from the top
of the table.
This is a work-in-progress and has switches to allow easier
experiments and debugging. Exporting GIT_USE_LOOKUP environment
variable enables this code.
On my admittedly memory starved machine, with a partial KDE
repository (3.0G pack with 95M idx):
$ GIT_USE_LOOKUP=t git log -800 --stat HEAD >/dev/null
3.93user 0.16system 0:04.09elapsed 100%CPU (0avgtext+0avgdata 0maxresident)k
0inputs+0outputs (0major+55588minor)pagefaults 0swaps
Without the patch, the numbers are:
$ git log -800 --stat HEAD >/dev/null
4.00user 0.15system 0:04.17elapsed 99%CPU (0avgtext+0avgdata 0maxresident)k
0inputs+0outputs (0major+60258minor)pagefaults 0swaps
In the same repository:
$ GIT_USE_LOOKUP=t git log -2000 HEAD >/dev/null
0.12user 0.00system 0:00.12elapsed 97%CPU (0avgtext+0avgdata 0maxresident)k
0inputs+0outputs (0major+4241minor)pagefaults 0swaps
Without the patch, the numbers are:
$ git log -2000 HEAD >/dev/null
0.05user 0.01system 0:00.07elapsed 100%CPU (0avgtext+0avgdata 0maxresident)k
0inputs+0outputs (0major+8506minor)pagefaults 0swaps
There isn't much time difference, but the number of minor faults
seems to show that we are touching much smaller number of pages,
which is expected.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-12-29 18:05:47 +08:00
|
|
|
int cmp = hashcmp(index + mi * stride, sha1);
|
|
|
|
|
|
|
|
if (debug_lookup)
|
|
|
|
printf("lo %u hi %u rg %u mi %u\n",
|
|
|
|
lo, hi, hi - lo, mi);
|
2006-09-21 12:05:37 +08:00
|
|
|
if (!cmp)
|
2007-04-09 13:06:35 +08:00
|
|
|
return nth_packed_object_offset(p, mi);
|
2005-06-27 18:35:33 +08:00
|
|
|
if (cmp > 0)
|
|
|
|
hi = mi;
|
|
|
|
else
|
|
|
|
lo = mi+1;
|
|
|
|
} while (lo < hi);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
pack-objects: protect against disappearing packs
It's possible that while pack-objects is running, a
simultaneously running prune process might delete a pack
that we are interested in. Because we load the pack indices
early on, we know that the pack contains our item, but by
the time we try to open and map it, it is gone.
Since c715f78, we already protect against this in the normal
object access code path, but pack-objects accesses the packs
at a lower level. In the normal access path, we call
find_pack_entry, which will call find_pack_entry_one on each
pack index, which does the actual lookup. If it gets a hit,
we will actually open and verify the validity of the
matching packfile (using c715f78's is_pack_valid). If we
can't open it, we'll issue a warning and pretend that we
didn't find it, causing us to go on to the next pack (or on
to loose objects).
Furthermore, we will cache the descriptor to the opened
packfile. Which means that later, when we actually try to
access the object, we are likely to still have that packfile
opened, and won't care if it has been unlinked from the
filesystem.
Notice the "likely" above. If there is another pack access
in the interim, and we run out of descriptors, we could
close the pack. And then a later attempt to access the
closed pack could fail (we'll try to re-open it, of course,
but it may have been deleted). In practice, this doesn't
happen because we tend to look up items and then access them
immediately.
Pack-objects does not follow this code path. Instead, it
accesses the packs at a much lower level, using
find_pack_entry_one directly. This means we skip the
is_pack_valid check, and may end up with the name of a
packfile, but no open descriptor.
We can add the same is_pack_valid check here. Unfortunately,
the access patterns of pack-objects are not quite as nice
for keeping lookup and object access together. We look up
each object as we find out about it, and the only later when
writing the packfile do we necessarily access it. Which
means that the opened packfile may be closed in the interim.
In practice, however, adding this check still has value, for
three reasons.
1. If you have a reasonable number of packs and/or a
reasonable file descriptor limit, you can keep all of
your packs open simultaneously. If this is the case,
then the race is impossible to trigger.
2. Even if you can't keep all packs open at once, you
may end up keeping the deleted one open (i.e., you may
get lucky).
3. The race window is shortened. You may notice early that
the pack is gone, and not try to access it. Triggering
the problem without this check means deleting the pack
any time after we read the list of index files, but
before we access the looked-up objects. Triggering it
with this check means deleting the pack means deleting
the pack after we do a lookup (and successfully access
the packfile), but before we access the object. Which
is a smaller window.
Acked-by: Nicolas Pitre <nico@fluxnic.net>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-10-15 02:03:48 +08:00
|
|
|
int is_pack_valid(struct packed_git *p)
|
2011-03-03 02:01:54 +08:00
|
|
|
{
|
|
|
|
/* An already open pack is known to be valid. */
|
|
|
|
if (p->pack_fd != -1)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* If the pack has one window completely covering the
|
|
|
|
* file size, the pack is known to be valid even if
|
|
|
|
* the descriptor is not currently open.
|
|
|
|
*/
|
|
|
|
if (p->windows) {
|
|
|
|
struct pack_window *w = p->windows;
|
|
|
|
|
|
|
|
if (!w->offset && w->len == p->pack_size)
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Force the pack to open to prove its valid. */
|
|
|
|
return !open_packed_git(p);
|
|
|
|
}
|
|
|
|
|
2012-02-01 21:48:54 +08:00
|
|
|
static int fill_pack_entry(const unsigned char *sha1,
|
|
|
|
struct pack_entry *e,
|
|
|
|
struct packed_git *p)
|
|
|
|
{
|
|
|
|
off_t offset;
|
|
|
|
|
|
|
|
if (p->num_bad_objects) {
|
|
|
|
unsigned i;
|
|
|
|
for (i = 0; i < p->num_bad_objects; i++)
|
|
|
|
if (!hashcmp(sha1, p->bad_object_sha1 + 20 * i))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
offset = find_pack_entry_one(sha1, p);
|
|
|
|
if (!offset)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We are about to tell the caller where they can locate the
|
|
|
|
* requested object. We better make sure the packfile is
|
|
|
|
* still here and can be accessed before supplying that
|
|
|
|
* answer, as it may have been deleted since the index was
|
|
|
|
* loaded!
|
|
|
|
*/
|
|
|
|
if (!is_pack_valid(p)) {
|
|
|
|
warning("packfile %s cannot be accessed", p->pack_name);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
e->offset = offset;
|
|
|
|
e->p = p;
|
|
|
|
hashcpy(e->sha1, sha1);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2009-03-20 11:47:54 +08:00
|
|
|
static int find_pack_entry(const unsigned char *sha1, struct pack_entry *e)
|
2005-06-27 18:35:33 +08:00
|
|
|
{
|
|
|
|
struct packed_git *p;
|
2006-09-21 12:05:37 +08:00
|
|
|
|
2005-06-27 18:35:33 +08:00
|
|
|
prepare_packed_git();
|
2007-05-31 10:48:13 +08:00
|
|
|
if (!packed_git)
|
|
|
|
return 0;
|
2005-06-27 18:35:33 +08:00
|
|
|
|
2012-02-01 21:48:55 +08:00
|
|
|
if (last_found_pack && fill_pack_entry(sha1, e, last_found_pack))
|
|
|
|
return 1;
|
2008-06-24 09:23:39 +08:00
|
|
|
|
2012-02-01 21:48:55 +08:00
|
|
|
for (p = packed_git; p; p = p->next) {
|
|
|
|
if (p == last_found_pack || !fill_pack_entry(sha1, e, p))
|
|
|
|
continue;
|
2007-05-31 10:48:13 +08:00
|
|
|
|
2012-02-01 21:48:55 +08:00
|
|
|
last_found_pack = p;
|
|
|
|
return 1;
|
|
|
|
}
|
2005-06-27 18:35:33 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-06-07 15:04:01 +08:00
|
|
|
struct packed_git *find_sha1_pack(const unsigned char *sha1,
|
2005-08-01 08:53:44 +08:00
|
|
|
struct packed_git *packs)
|
|
|
|
{
|
|
|
|
struct packed_git *p;
|
|
|
|
|
|
|
|
for (p = packs; p; p = p->next) {
|
2006-09-21 12:05:37 +08:00
|
|
|
if (find_pack_entry_one(sha1, p))
|
2005-08-01 08:53:44 +08:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
return NULL;
|
2007-02-27 03:55:59 +08:00
|
|
|
|
2005-08-01 08:53:44 +08:00
|
|
|
}
|
|
|
|
|
2007-02-27 03:55:59 +08:00
|
|
|
static int sha1_loose_object_info(const unsigned char *sha1, unsigned long *sizep)
|
2005-06-03 06:20:54 +08:00
|
|
|
{
|
2005-06-27 18:34:06 +08:00
|
|
|
int status;
|
2005-06-03 06:20:54 +08:00
|
|
|
unsigned long mapsize, size;
|
|
|
|
void *map;
|
2011-06-11 02:52:15 +08:00
|
|
|
git_zstream stream;
|
2007-02-27 03:55:55 +08:00
|
|
|
char hdr[32];
|
2005-06-03 06:20:54 +08:00
|
|
|
|
2006-07-18 06:04:47 +08:00
|
|
|
map = map_sha1_file(sha1, &mapsize);
|
2006-11-28 07:18:55 +08:00
|
|
|
if (!map)
|
|
|
|
return error("unable to find %s", sha1_to_hex(sha1));
|
2005-06-27 18:34:06 +08:00
|
|
|
if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0)
|
|
|
|
status = error("unable to unpack %s header",
|
|
|
|
sha1_to_hex(sha1));
|
2007-02-27 03:55:59 +08:00
|
|
|
else if ((status = parse_sha1_header(hdr, &size)) < 0)
|
2005-06-27 18:34:06 +08:00
|
|
|
status = error("unable to parse %s header", sha1_to_hex(sha1));
|
2007-02-27 03:55:59 +08:00
|
|
|
else if (sizep)
|
|
|
|
*sizep = size;
|
2009-01-08 11:54:47 +08:00
|
|
|
git_inflate_end(&stream);
|
2005-06-03 06:20:54 +08:00
|
|
|
munmap(map, mapsize);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2011-05-13 06:51:38 +08:00
|
|
|
/* returns enum object_type or negative */
|
|
|
|
int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi)
|
2006-11-28 07:18:55 +08:00
|
|
|
{
|
2011-02-05 22:03:02 +08:00
|
|
|
struct cached_object *co;
|
2006-11-28 07:18:55 +08:00
|
|
|
struct pack_entry e;
|
2011-05-13 06:51:38 +08:00
|
|
|
int status, rtype;
|
2006-11-28 07:18:55 +08:00
|
|
|
|
2011-02-05 22:03:02 +08:00
|
|
|
co = find_cached_object(sha1);
|
|
|
|
if (co) {
|
2011-05-13 06:51:38 +08:00
|
|
|
if (oi->sizep)
|
|
|
|
*(oi->sizep) = co->size;
|
|
|
|
oi->whence = OI_CACHED;
|
2011-02-05 22:03:02 +08:00
|
|
|
return co->type;
|
|
|
|
}
|
|
|
|
|
2009-02-28 15:15:53 +08:00
|
|
|
if (!find_pack_entry(sha1, &e)) {
|
2008-08-06 04:08:41 +08:00
|
|
|
/* Most likely it's a loose object. */
|
2011-05-13 06:51:38 +08:00
|
|
|
status = sha1_loose_object_info(sha1, oi->sizep);
|
|
|
|
if (status >= 0) {
|
|
|
|
oi->whence = OI_LOOSE;
|
2008-08-06 04:08:41 +08:00
|
|
|
return status;
|
2011-05-13 06:51:38 +08:00
|
|
|
}
|
2008-08-06 04:08:41 +08:00
|
|
|
|
|
|
|
/* Not a loose object; someone else may have just packed it. */
|
2006-11-28 07:18:55 +08:00
|
|
|
reprepare_packed_git();
|
2009-02-28 15:15:53 +08:00
|
|
|
if (!find_pack_entry(sha1, &e))
|
2008-08-06 04:08:41 +08:00
|
|
|
return status;
|
2006-11-28 07:18:55 +08:00
|
|
|
}
|
2008-10-30 07:02:47 +08:00
|
|
|
|
2011-05-13 06:51:38 +08:00
|
|
|
status = packed_object_info(e.p, e.offset, oi->sizep, &rtype);
|
2008-10-30 07:02:47 +08:00
|
|
|
if (status < 0) {
|
|
|
|
mark_bad_packed_object(e.p, sha1);
|
2011-05-13 06:51:38 +08:00
|
|
|
status = sha1_object_info_extended(sha1, oi);
|
2011-05-14 04:20:43 +08:00
|
|
|
} else if (in_delta_base_cache(e.p, e.offset)) {
|
|
|
|
oi->whence = OI_DBCACHED;
|
2011-05-13 06:51:38 +08:00
|
|
|
} else {
|
|
|
|
oi->whence = OI_PACKED;
|
|
|
|
oi->u.packed.offset = e.offset;
|
|
|
|
oi->u.packed.pack = e.p;
|
|
|
|
oi->u.packed.is_delta = (rtype == OBJ_REF_DELTA ||
|
|
|
|
rtype == OBJ_OFS_DELTA);
|
2008-10-30 07:02:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return status;
|
2006-11-28 07:18:55 +08:00
|
|
|
}
|
|
|
|
|
2011-05-13 06:51:38 +08:00
|
|
|
int sha1_object_info(const unsigned char *sha1, unsigned long *sizep)
|
|
|
|
{
|
|
|
|
struct object_info oi;
|
|
|
|
|
|
|
|
oi.sizep = sizep;
|
|
|
|
return sha1_object_info_extended(sha1, &oi);
|
|
|
|
}
|
|
|
|
|
2007-02-27 03:55:59 +08:00
|
|
|
static void *read_packed_sha1(const unsigned char *sha1,
|
|
|
|
enum object_type *type, unsigned long *size)
|
2005-06-27 18:35:33 +08:00
|
|
|
{
|
|
|
|
struct pack_entry e;
|
2008-06-24 09:23:39 +08:00
|
|
|
void *data;
|
2005-06-27 18:35:33 +08:00
|
|
|
|
2009-02-28 15:15:53 +08:00
|
|
|
if (!find_pack_entry(sha1, &e))
|
2005-06-27 18:35:33 +08:00
|
|
|
return NULL;
|
2008-06-24 09:23:39 +08:00
|
|
|
data = cache_or_unpack_entry(e.p, e.offset, size, type, 1);
|
|
|
|
if (!data) {
|
|
|
|
/*
|
|
|
|
* We're probably in deep shit, but let's try to fetch
|
|
|
|
* the required object anyway from another pack or loose.
|
|
|
|
* This should happen only in the presence of a corrupted
|
|
|
|
* pack, and is better than failing outright.
|
|
|
|
*/
|
|
|
|
error("failed to read object %s at offset %"PRIuMAX" from %s",
|
|
|
|
sha1_to_hex(sha1), (uintmax_t)e.offset, e.p->pack_name);
|
|
|
|
mark_bad_packed_object(e.p, sha1);
|
2008-07-15 09:46:48 +08:00
|
|
|
data = read_object(sha1, type, size);
|
2008-06-24 09:23:39 +08:00
|
|
|
}
|
|
|
|
return data;
|
2005-06-27 18:35:33 +08:00
|
|
|
}
|
|
|
|
|
2007-02-27 03:55:59 +08:00
|
|
|
int pretend_sha1_file(void *buf, unsigned long len, enum object_type type,
|
|
|
|
unsigned char *sha1)
|
2007-02-05 13:42:38 +08:00
|
|
|
{
|
|
|
|
struct cached_object *co;
|
|
|
|
|
2007-02-27 03:55:59 +08:00
|
|
|
hash_sha1_file(buf, len, typename(type), sha1);
|
2007-02-05 13:42:38 +08:00
|
|
|
if (has_sha1_file(sha1) || find_cached_object(sha1))
|
|
|
|
return 0;
|
|
|
|
if (cached_object_alloc <= cached_object_nr) {
|
|
|
|
cached_object_alloc = alloc_nr(cached_object_alloc);
|
|
|
|
cached_objects = xrealloc(cached_objects,
|
|
|
|
sizeof(*cached_objects) *
|
|
|
|
cached_object_alloc);
|
|
|
|
}
|
|
|
|
co = &cached_objects[cached_object_nr++];
|
|
|
|
co->size = len;
|
2007-02-27 03:55:59 +08:00
|
|
|
co->type = type;
|
2007-02-16 09:02:06 +08:00
|
|
|
co->buf = xmalloc(len);
|
|
|
|
memcpy(co->buf, buf, len);
|
2007-02-05 13:42:38 +08:00
|
|
|
hashcpy(co->sha1, sha1);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-01-13 01:42:24 +08:00
|
|
|
static void *read_object(const unsigned char *sha1, enum object_type *type,
|
|
|
|
unsigned long *size)
|
2005-04-19 04:04:43 +08:00
|
|
|
{
|
|
|
|
unsigned long mapsize;
|
|
|
|
void *map, *buf;
|
2007-02-05 13:42:38 +08:00
|
|
|
struct cached_object *co;
|
|
|
|
|
|
|
|
co = find_cached_object(sha1);
|
|
|
|
if (co) {
|
2007-02-27 03:55:59 +08:00
|
|
|
*type = co->type;
|
2007-02-05 13:42:38 +08:00
|
|
|
*size = co->size;
|
2007-09-16 06:32:36 +08:00
|
|
|
return xmemdupz(co->buf, co->size);
|
2007-02-05 13:42:38 +08:00
|
|
|
}
|
2005-04-19 04:04:43 +08:00
|
|
|
|
2007-01-23 04:29:45 +08:00
|
|
|
buf = read_packed_sha1(sha1, type, size);
|
|
|
|
if (buf)
|
|
|
|
return buf;
|
2006-07-18 06:04:47 +08:00
|
|
|
map = map_sha1_file(sha1, &mapsize);
|
2005-04-19 04:04:43 +08:00
|
|
|
if (map) {
|
2007-03-05 16:21:37 +08:00
|
|
|
buf = unpack_sha1_file(map, mapsize, type, size, sha1);
|
2005-04-19 04:04:43 +08:00
|
|
|
munmap(map, mapsize);
|
|
|
|
return buf;
|
|
|
|
}
|
2006-06-02 23:32:23 +08:00
|
|
|
reprepare_packed_git();
|
2007-01-23 04:29:45 +08:00
|
|
|
return read_packed_sha1(sha1, type, size);
|
2005-04-19 04:04:43 +08:00
|
|
|
}
|
|
|
|
|
2010-10-29 02:13:06 +08:00
|
|
|
/*
|
|
|
|
* This function dies on corrupt objects; the callers who want to
|
|
|
|
* deal with them should arrange to call read_object() and give error
|
|
|
|
* messages themselves.
|
|
|
|
*/
|
2011-05-16 03:54:54 +08:00
|
|
|
void *read_sha1_file_extended(const unsigned char *sha1,
|
|
|
|
enum object_type *type,
|
|
|
|
unsigned long *size,
|
|
|
|
unsigned flag)
|
2008-07-15 09:46:48 +08:00
|
|
|
{
|
2010-10-29 02:13:06 +08:00
|
|
|
void *data;
|
2010-06-10 20:47:01 +08:00
|
|
|
char *path;
|
2010-10-29 02:13:06 +08:00
|
|
|
const struct packed_git *p;
|
2011-05-16 03:54:54 +08:00
|
|
|
const unsigned char *repl = (flag & READ_SHA1_FILE_REPLACE)
|
|
|
|
? lookup_replace_object(sha1) : sha1;
|
2010-10-29 02:13:06 +08:00
|
|
|
|
2010-10-29 02:13:06 +08:00
|
|
|
errno = 0;
|
|
|
|
data = read_object(repl, type, size);
|
2011-05-16 03:54:52 +08:00
|
|
|
if (data)
|
2010-10-29 02:13:06 +08:00
|
|
|
return data;
|
2009-01-23 17:06:53 +08:00
|
|
|
|
2011-01-21 04:12:20 +08:00
|
|
|
if (errno && errno != ENOENT)
|
2010-10-29 02:13:06 +08:00
|
|
|
die_errno("failed to read object %s", sha1_to_hex(sha1));
|
|
|
|
|
2009-01-23 17:06:53 +08:00
|
|
|
/* die if we replaced an object with one that does not exist */
|
2010-10-29 02:13:06 +08:00
|
|
|
if (repl != sha1)
|
2009-01-23 17:06:53 +08:00
|
|
|
die("replacement %s not found for %s",
|
|
|
|
sha1_to_hex(repl), sha1_to_hex(sha1));
|
|
|
|
|
2010-10-29 02:13:06 +08:00
|
|
|
if (has_loose_object(repl)) {
|
|
|
|
path = sha1_file_name(sha1);
|
|
|
|
die("loose object %s (stored in %s) is corrupt",
|
|
|
|
sha1_to_hex(repl), path);
|
2010-06-10 20:47:01 +08:00
|
|
|
}
|
2009-01-23 17:06:53 +08:00
|
|
|
|
2010-10-29 02:13:06 +08:00
|
|
|
if ((p = has_packed_and_bad(repl)) != NULL)
|
|
|
|
die("packed object %s (stored in %s) is corrupt",
|
|
|
|
sha1_to_hex(repl), p->pack_name);
|
2009-01-23 17:07:01 +08:00
|
|
|
|
2010-10-29 02:13:06 +08:00
|
|
|
return NULL;
|
2008-07-15 09:46:48 +08:00
|
|
|
}
|
|
|
|
|
2005-04-29 07:42:27 +08:00
|
|
|
void *read_object_with_reference(const unsigned char *sha1,
|
2007-02-27 03:55:59 +08:00
|
|
|
const char *required_type_name,
|
2005-04-29 07:42:27 +08:00
|
|
|
unsigned long *size,
|
|
|
|
unsigned char *actual_sha1_return)
|
2005-04-21 09:06:49 +08:00
|
|
|
{
|
2007-02-27 03:55:59 +08:00
|
|
|
enum object_type type, required_type;
|
2005-04-21 09:06:49 +08:00
|
|
|
void *buffer;
|
|
|
|
unsigned long isize;
|
2005-04-29 07:42:27 +08:00
|
|
|
unsigned char actual_sha1[20];
|
2005-04-21 09:06:49 +08:00
|
|
|
|
2007-02-27 03:55:59 +08:00
|
|
|
required_type = type_from_string(required_type_name);
|
2006-08-23 14:49:00 +08:00
|
|
|
hashcpy(actual_sha1, sha1);
|
2005-04-29 07:42:27 +08:00
|
|
|
while (1) {
|
|
|
|
int ref_length = -1;
|
|
|
|
const char *ref_type = NULL;
|
2005-04-21 09:06:49 +08:00
|
|
|
|
2007-02-27 03:55:59 +08:00
|
|
|
buffer = read_sha1_file(actual_sha1, &type, &isize);
|
2005-04-29 07:42:27 +08:00
|
|
|
if (!buffer)
|
|
|
|
return NULL;
|
2007-02-27 03:55:59 +08:00
|
|
|
if (type == required_type) {
|
2005-04-29 07:42:27 +08:00
|
|
|
*size = isize;
|
|
|
|
if (actual_sha1_return)
|
2006-08-23 14:49:00 +08:00
|
|
|
hashcpy(actual_sha1_return, actual_sha1);
|
2005-04-29 07:42:27 +08:00
|
|
|
return buffer;
|
|
|
|
}
|
|
|
|
/* Handle references */
|
2007-02-27 03:55:59 +08:00
|
|
|
else if (type == OBJ_COMMIT)
|
2005-04-29 07:42:27 +08:00
|
|
|
ref_type = "tree ";
|
2007-02-27 03:55:59 +08:00
|
|
|
else if (type == OBJ_TAG)
|
2005-04-29 07:42:27 +08:00
|
|
|
ref_type = "object ";
|
|
|
|
else {
|
|
|
|
free(buffer);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
ref_length = strlen(ref_type);
|
2005-04-21 09:06:49 +08:00
|
|
|
|
2008-02-19 04:47:52 +08:00
|
|
|
if (ref_length + 40 > isize ||
|
|
|
|
memcmp(buffer, ref_type, ref_length) ||
|
2006-06-18 23:18:09 +08:00
|
|
|
get_sha1_hex((char *) buffer + ref_length, actual_sha1)) {
|
2005-04-29 07:42:27 +08:00
|
|
|
free(buffer);
|
|
|
|
return NULL;
|
|
|
|
}
|
2005-08-09 02:44:43 +08:00
|
|
|
free(buffer);
|
2005-04-29 07:42:27 +08:00
|
|
|
/* Now we have the ID of the referred-to object in
|
|
|
|
* actual_sha1. Check again. */
|
2005-04-21 09:06:49 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-03-21 04:02:09 +08:00
|
|
|
static void write_sha1_file_prepare(const void *buf, unsigned long len,
|
2006-10-15 20:02:03 +08:00
|
|
|
const char *type, unsigned char *sha1,
|
2007-02-27 03:55:55 +08:00
|
|
|
char *hdr, int *hdrlen)
|
2005-06-28 10:03:13 +08:00
|
|
|
{
|
2008-10-02 02:05:20 +08:00
|
|
|
git_SHA_CTX c;
|
2005-06-28 10:03:13 +08:00
|
|
|
|
|
|
|
/* Generate the header */
|
2007-02-27 03:55:55 +08:00
|
|
|
*hdrlen = sprintf(hdr, "%s %lu", type, len)+1;
|
2005-06-28 10:03:13 +08:00
|
|
|
|
|
|
|
/* Sha1.. */
|
2008-10-02 02:05:20 +08:00
|
|
|
git_SHA1_Init(&c);
|
|
|
|
git_SHA1_Update(&c, hdr, *hdrlen);
|
|
|
|
git_SHA1_Update(&c, buf, len);
|
|
|
|
git_SHA1_Final(sha1, &c);
|
2005-06-28 10:03:13 +08:00
|
|
|
}
|
|
|
|
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 06:54:01 +08:00
|
|
|
/*
|
2009-03-26 07:19:36 +08:00
|
|
|
* Move the just written object into its final resting place.
|
|
|
|
* NEEDSWORK: this should be renamed to finalize_temp_file() as
|
|
|
|
* "moving" is only a part of what it does, when no patch between
|
|
|
|
* master to pu changes the call sites of this function.
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 06:54:01 +08:00
|
|
|
*/
|
2006-09-01 15:17:47 +08:00
|
|
|
int move_temp_to_file(const char *tmpfile, const char *filename)
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 06:54:01 +08:00
|
|
|
{
|
2008-09-19 06:24:46 +08:00
|
|
|
int ret = 0;
|
2009-03-26 07:19:36 +08:00
|
|
|
|
2009-04-28 06:32:25 +08:00
|
|
|
if (object_creation_mode == OBJECT_CREATION_USES_RENAMES)
|
2009-04-25 17:57:14 +08:00
|
|
|
goto try_rename;
|
|
|
|
else if (link(tmpfile, filename))
|
2008-09-19 06:24:46 +08:00
|
|
|
ret = errno;
|
2005-10-27 01:27:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Coda hack - coda doesn't like cross-directory links,
|
|
|
|
* so we fall back to a rename, which will mean that it
|
|
|
|
* won't be able to check collisions, but that's not a
|
|
|
|
* big deal.
|
|
|
|
*
|
|
|
|
* The same holds for FAT formatted media.
|
|
|
|
*
|
2009-03-28 14:14:39 +08:00
|
|
|
* When this succeeds, we just return. We have nothing
|
2005-10-27 01:27:36 +08:00
|
|
|
* left to unlink.
|
|
|
|
*/
|
|
|
|
if (ret && ret != EEXIST) {
|
2009-04-25 17:57:14 +08:00
|
|
|
try_rename:
|
2005-10-27 01:27:36 +08:00
|
|
|
if (!rename(tmpfile, filename))
|
2009-03-28 14:14:39 +08:00
|
|
|
goto out;
|
2005-10-26 07:41:20 +08:00
|
|
|
ret = errno;
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 06:54:01 +08:00
|
|
|
}
|
2009-04-30 05:22:56 +08:00
|
|
|
unlink_or_warn(tmpfile);
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 06:54:01 +08:00
|
|
|
if (ret) {
|
|
|
|
if (ret != EEXIST) {
|
2012-04-30 08:28:45 +08:00
|
|
|
return error("unable to write sha1 filename %s: %s", filename, strerror(ret));
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 06:54:01 +08:00
|
|
|
}
|
|
|
|
/* FIXME!!! Collision check here ? */
|
|
|
|
}
|
|
|
|
|
2009-03-28 14:14:39 +08:00
|
|
|
out:
|
2010-02-23 06:32:16 +08:00
|
|
|
if (adjust_shared_perm(filename))
|
2009-03-26 07:19:36 +08:00
|
|
|
return error("unable to set permission to '%s'", filename);
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 06:54:01 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-05-24 23:30:54 +08:00
|
|
|
static int write_buffer(int fd, const void *buf, size_t len)
|
|
|
|
{
|
2007-01-12 12:23:00 +08:00
|
|
|
if (write_in_full(fd, buf, len) < 0)
|
2007-01-08 23:58:23 +08:00
|
|
|
return error("file write error (%s)", strerror(errno));
|
2006-05-24 23:30:54 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-03-21 04:02:09 +08:00
|
|
|
int hash_sha1_file(const void *buf, unsigned long len, const char *type,
|
2006-10-14 18:45:36 +08:00
|
|
|
unsigned char *sha1)
|
|
|
|
{
|
2007-02-27 03:55:55 +08:00
|
|
|
char hdr[32];
|
2006-10-14 18:45:36 +08:00
|
|
|
int hdrlen;
|
|
|
|
write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-06-11 09:47:18 +08:00
|
|
|
/* Finalize a file on disk, and close it. */
|
|
|
|
static void close_sha1_file(int fd)
|
|
|
|
{
|
2008-06-19 06:18:44 +08:00
|
|
|
if (fsync_object_files)
|
|
|
|
fsync_or_die(fd, "sha1 file");
|
2008-06-11 09:47:18 +08:00
|
|
|
if (close(fd) != 0)
|
2009-06-27 23:58:46 +08:00
|
|
|
die_errno("error when closing sha1 file");
|
2008-06-11 09:47:18 +08:00
|
|
|
}
|
|
|
|
|
2008-06-15 01:50:12 +08:00
|
|
|
/* Size of directory component, including the ending '/' */
|
|
|
|
static inline int directory_size(const char *filename)
|
|
|
|
{
|
|
|
|
const char *s = strrchr(filename, '/');
|
|
|
|
if (!s)
|
|
|
|
return 0;
|
|
|
|
return s - filename + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This creates a temporary file in the same directory as the final
|
|
|
|
* 'filename'
|
|
|
|
*
|
|
|
|
* We want to avoid cross-directory filename renames, because those
|
|
|
|
* can have problems on various filesystems (FAT, NFS, Coda).
|
|
|
|
*/
|
|
|
|
static int create_tmpfile(char *buffer, size_t bufsiz, const char *filename)
|
|
|
|
{
|
|
|
|
int fd, dirlen = directory_size(filename);
|
|
|
|
|
|
|
|
if (dirlen + 20 > bufsiz) {
|
|
|
|
errno = ENAMETOOLONG;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
memcpy(buffer, filename, dirlen);
|
|
|
|
strcpy(buffer + dirlen, "tmp_obj_XXXXXX");
|
2010-02-23 06:32:16 +08:00
|
|
|
fd = git_mkstemp_mode(buffer, 0444);
|
sha1_file: avoid bogus "file exists" error message
This avoids the following misleading error message:
error: unable to create temporary sha1 filename ./objects/15: File exists
mkstemp can fail for many reasons, one of which, ENOENT, can occur if
the directory for the temp file doesn't exist. create_tmpfile tried to
handle this case by always trying to mkdir the directory, even if it
already existed. This caused errno to be clobbered, so one cannot tell
why mkstemp really failed, and it truncated the buffer to just the
directory name, resulting in the strange error message shown above.
Note that in both occasions that I've seen this failure, it has not been
due to a missing directory, or bad permissions, but some other, unknown
mkstemp failure mode that did not occur when I ran git again. This code
could perhaps be made more robust by retrying mkstemp, in case it was a
transient failure.
Signed-off-by: Joey Hess <joey@kitenet.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2008-11-21 02:56:28 +08:00
|
|
|
if (fd < 0 && dirlen && errno == ENOENT) {
|
2008-06-15 01:50:12 +08:00
|
|
|
/* Make sure the directory exists */
|
2008-06-24 05:33:41 +08:00
|
|
|
memcpy(buffer, filename, dirlen);
|
2008-06-15 01:50:12 +08:00
|
|
|
buffer[dirlen-1] = 0;
|
2008-06-17 13:02:12 +08:00
|
|
|
if (mkdir(buffer, 0777) || adjust_shared_perm(buffer))
|
2008-06-15 01:50:12 +08:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* Try again */
|
|
|
|
strcpy(buffer + dirlen - 1, "/tmp_obj_XXXXXX");
|
2010-02-23 06:32:16 +08:00
|
|
|
fd = git_mkstemp_mode(buffer, 0444);
|
2008-06-15 01:50:12 +08:00
|
|
|
}
|
|
|
|
return fd;
|
|
|
|
}
|
|
|
|
|
2008-05-14 13:32:48 +08:00
|
|
|
static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
|
2010-04-02 08:03:18 +08:00
|
|
|
const void *buf, unsigned long len, time_t mtime)
|
2005-04-19 04:04:43 +08:00
|
|
|
{
|
2009-01-29 13:56:34 +08:00
|
|
|
int fd, ret;
|
2010-02-21 12:27:31 +08:00
|
|
|
unsigned char compressed[4096];
|
2011-06-11 02:52:15 +08:00
|
|
|
git_zstream stream;
|
2010-02-22 04:48:06 +08:00
|
|
|
git_SHA_CTX c;
|
|
|
|
unsigned char parano_sha1[20];
|
2005-04-21 00:28:05 +08:00
|
|
|
char *filename;
|
2011-12-21 09:18:21 +08:00
|
|
|
static char tmp_file[PATH_MAX];
|
2005-04-26 01:19:53 +08:00
|
|
|
|
2006-10-15 20:02:03 +08:00
|
|
|
filename = sha1_file_name(sha1);
|
2011-12-21 09:18:21 +08:00
|
|
|
fd = create_tmpfile(tmp_file, sizeof(tmp_file), filename);
|
2005-05-04 02:46:16 +08:00
|
|
|
if (fd < 0) {
|
2008-11-14 15:19:34 +08:00
|
|
|
if (errno == EACCES)
|
2012-04-30 08:28:45 +08:00
|
|
|
return error("insufficient permission for adding an object to repository database %s", get_object_directory());
|
2006-11-09 20:52:05 +08:00
|
|
|
else
|
2012-04-30 08:28:46 +08:00
|
|
|
return error("unable to create temporary file: %s", strerror(errno));
|
2005-05-04 02:46:16 +08:00
|
|
|
}
|
|
|
|
|
2005-04-19 04:04:43 +08:00
|
|
|
/* Set it up */
|
|
|
|
memset(&stream, 0, sizeof(stream));
|
2011-06-11 01:55:10 +08:00
|
|
|
git_deflate_init(&stream, zlib_compression_level);
|
2005-04-19 04:04:43 +08:00
|
|
|
stream.next_out = compressed;
|
2010-02-21 12:27:31 +08:00
|
|
|
stream.avail_out = sizeof(compressed);
|
2010-02-22 04:48:06 +08:00
|
|
|
git_SHA1_Init(&c);
|
2005-04-26 01:19:53 +08:00
|
|
|
|
|
|
|
/* First header.. */
|
2007-02-27 03:55:55 +08:00
|
|
|
stream.next_in = (unsigned char *)hdr;
|
2005-04-26 01:19:53 +08:00
|
|
|
stream.avail_in = hdrlen;
|
2011-06-11 01:55:10 +08:00
|
|
|
while (git_deflate(&stream, 0) == Z_OK)
|
|
|
|
; /* nothing */
|
2010-02-22 04:48:06 +08:00
|
|
|
git_SHA1_Update(&c, hdr, hdrlen);
|
2005-04-26 01:19:53 +08:00
|
|
|
|
|
|
|
/* Then the data itself.. */
|
2010-04-02 08:03:18 +08:00
|
|
|
stream.next_in = (void *)buf;
|
2005-04-26 01:19:53 +08:00
|
|
|
stream.avail_in = len;
|
2010-02-21 12:27:31 +08:00
|
|
|
do {
|
2010-02-22 04:48:06 +08:00
|
|
|
unsigned char *in0 = stream.next_in;
|
2011-06-11 01:55:10 +08:00
|
|
|
ret = git_deflate(&stream, Z_FINISH);
|
2010-02-22 04:48:06 +08:00
|
|
|
git_SHA1_Update(&c, in0, stream.next_in - in0);
|
2010-02-21 12:27:31 +08:00
|
|
|
if (write_buffer(fd, compressed, stream.next_out - compressed) < 0)
|
|
|
|
die("unable to write sha1 file");
|
|
|
|
stream.next_out = compressed;
|
|
|
|
stream.avail_out = sizeof(compressed);
|
|
|
|
} while (ret == Z_OK);
|
|
|
|
|
Be more careful about zlib return values
When creating a new object, we use "deflate(stream, Z_FINISH)" in a loop
until it no longer returns Z_OK, and then we do "deflateEnd()" to finish
up business.
That should all work, but the fact is, it's not how you're _supposed_ to
use the zlib return values properly:
- deflate() should never return Z_OK in the first place, except if we
need to increase the output buffer size (which we're not doing, and
should never need to do, since we pre-allocated a buffer that is
supposed to be able to hold the output in full). So the "while()" loop
was incorrect: Z_OK doesn't actually mean "ok, continue", it means "ok,
allocate more memory for me and continue"!
- if we got an error return, we would consider it to be end-of-stream,
but it could be some internal zlib error. In short, we should check
for Z_STREAM_END explicitly, since that's the only valid return value
anyway for the Z_FINISH case.
- we never checked deflateEnd() return codes at all.
Now, admittedly, none of these issues should ever happen, unless there is
some internal bug in zlib. So this patch should make zero difference, but
it seems to be the right thing to do.
We should probablybe anal and check the return value of "deflateInit()"
too!
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-03-21 02:38:34 +08:00
|
|
|
if (ret != Z_STREAM_END)
|
|
|
|
die("unable to deflate new object %s (%d)", sha1_to_hex(sha1), ret);
|
2011-06-11 01:55:10 +08:00
|
|
|
ret = git_deflate_end_gently(&stream);
|
Be more careful about zlib return values
When creating a new object, we use "deflate(stream, Z_FINISH)" in a loop
until it no longer returns Z_OK, and then we do "deflateEnd()" to finish
up business.
That should all work, but the fact is, it's not how you're _supposed_ to
use the zlib return values properly:
- deflate() should never return Z_OK in the first place, except if we
need to increase the output buffer size (which we're not doing, and
should never need to do, since we pre-allocated a buffer that is
supposed to be able to hold the output in full). So the "while()" loop
was incorrect: Z_OK doesn't actually mean "ok, continue", it means "ok,
allocate more memory for me and continue"!
- if we got an error return, we would consider it to be end-of-stream,
but it could be some internal zlib error. In short, we should check
for Z_STREAM_END explicitly, since that's the only valid return value
anyway for the Z_FINISH case.
- we never checked deflateEnd() return codes at all.
Now, admittedly, none of these issues should ever happen, unless there is
some internal bug in zlib. So this patch should make zero difference, but
it seems to be the right thing to do.
We should probablybe anal and check the return value of "deflateInit()"
too!
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-03-21 02:38:34 +08:00
|
|
|
if (ret != Z_OK)
|
|
|
|
die("deflateEnd on object %s failed (%d)", sha1_to_hex(sha1), ret);
|
2010-02-22 04:48:06 +08:00
|
|
|
git_SHA1_Final(parano_sha1, &c);
|
|
|
|
if (hashcmp(sha1, parano_sha1) != 0)
|
|
|
|
die("confused by unstable object source data for %s", sha1_to_hex(sha1));
|
Be more careful about zlib return values
When creating a new object, we use "deflate(stream, Z_FINISH)" in a loop
until it no longer returns Z_OK, and then we do "deflateEnd()" to finish
up business.
That should all work, but the fact is, it's not how you're _supposed_ to
use the zlib return values properly:
- deflate() should never return Z_OK in the first place, except if we
need to increase the output buffer size (which we're not doing, and
should never need to do, since we pre-allocated a buffer that is
supposed to be able to hold the output in full). So the "while()" loop
was incorrect: Z_OK doesn't actually mean "ok, continue", it means "ok,
allocate more memory for me and continue"!
- if we got an error return, we would consider it to be end-of-stream,
but it could be some internal zlib error. In short, we should check
for Z_STREAM_END explicitly, since that's the only valid return value
anyway for the Z_FINISH case.
- we never checked deflateEnd() return codes at all.
Now, admittedly, none of these issues should ever happen, unless there is
some internal bug in zlib. So this patch should make zero difference, but
it seems to be the right thing to do.
We should probablybe anal and check the return value of "deflateInit()"
too!
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-03-21 02:38:34 +08:00
|
|
|
|
2008-06-11 09:47:18 +08:00
|
|
|
close_sha1_file(fd);
|
2005-04-19 04:04:43 +08:00
|
|
|
|
2008-05-14 13:32:48 +08:00
|
|
|
if (mtime) {
|
|
|
|
struct utimbuf utb;
|
|
|
|
utb.actime = mtime;
|
|
|
|
utb.modtime = mtime;
|
2011-12-21 09:18:21 +08:00
|
|
|
if (utime(tmp_file, &utb) < 0)
|
2008-05-14 13:32:48 +08:00
|
|
|
warning("failed utime() on %s: %s",
|
2011-12-21 09:18:21 +08:00
|
|
|
tmp_file, strerror(errno));
|
2008-05-14 13:32:48 +08:00
|
|
|
}
|
|
|
|
|
2011-12-21 09:18:21 +08:00
|
|
|
return move_temp_to_file(tmp_file, filename);
|
2005-04-19 04:04:43 +08:00
|
|
|
}
|
2005-04-24 09:47:23 +08:00
|
|
|
|
2010-04-02 08:03:18 +08:00
|
|
|
int write_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *returnsha1)
|
2008-05-14 13:32:48 +08:00
|
|
|
{
|
|
|
|
unsigned char sha1[20];
|
|
|
|
char hdr[32];
|
|
|
|
int hdrlen;
|
|
|
|
|
|
|
|
/* Normally if we have it in the pack then we do not bother writing
|
|
|
|
* it out into .git/objects/??/?{38} file.
|
|
|
|
*/
|
|
|
|
write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
|
|
|
|
if (returnsha1)
|
|
|
|
hashcpy(returnsha1, sha1);
|
|
|
|
if (has_sha1_file(sha1))
|
|
|
|
return 0;
|
|
|
|
return write_loose_object(sha1, hdr, hdrlen, buf, len, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int force_object_loose(const unsigned char *sha1, time_t mtime)
|
|
|
|
{
|
|
|
|
void *buf;
|
|
|
|
unsigned long len;
|
|
|
|
enum object_type type;
|
|
|
|
char hdr[32];
|
|
|
|
int hdrlen;
|
2008-10-18 08:37:31 +08:00
|
|
|
int ret;
|
2008-05-14 13:32:48 +08:00
|
|
|
|
2008-06-15 02:43:01 +08:00
|
|
|
if (has_loose_object(sha1))
|
2008-05-14 13:32:48 +08:00
|
|
|
return 0;
|
|
|
|
buf = read_packed_sha1(sha1, &type, &len);
|
|
|
|
if (!buf)
|
|
|
|
return error("cannot read sha1_file for %s", sha1_to_hex(sha1));
|
|
|
|
hdrlen = sprintf(hdr, "%s %lu", typename(type), len) + 1;
|
2008-10-18 08:37:31 +08:00
|
|
|
ret = write_loose_object(sha1, hdr, hdrlen, buf, len, mtime);
|
|
|
|
free(buf);
|
|
|
|
|
|
|
|
return ret;
|
2008-05-14 13:32:48 +08:00
|
|
|
}
|
|
|
|
|
2005-08-01 08:53:44 +08:00
|
|
|
int has_pack_index(const unsigned char *sha1)
|
|
|
|
{
|
|
|
|
struct stat st;
|
|
|
|
if (stat(sha1_pack_index_name(sha1), &st))
|
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2009-02-28 15:15:53 +08:00
|
|
|
int has_sha1_pack(const unsigned char *sha1)
|
|
|
|
{
|
|
|
|
struct pack_entry e;
|
|
|
|
return find_pack_entry(sha1, &e);
|
|
|
|
}
|
|
|
|
|
2005-04-24 09:47:23 +08:00
|
|
|
int has_sha1_file(const unsigned char *sha1)
|
|
|
|
{
|
2005-06-27 18:35:33 +08:00
|
|
|
struct pack_entry e;
|
|
|
|
|
2009-02-28 15:15:53 +08:00
|
|
|
if (find_pack_entry(sha1, &e))
|
2005-06-27 18:35:33 +08:00
|
|
|
return 1;
|
2008-06-15 02:43:01 +08:00
|
|
|
return has_loose_object(sha1);
|
2005-04-24 09:47:23 +08:00
|
|
|
}
|
2005-05-02 14:45:49 +08:00
|
|
|
|
2011-02-05 18:52:21 +08:00
|
|
|
static void check_tree(const void *buf, size_t size)
|
|
|
|
{
|
|
|
|
struct tree_desc desc;
|
|
|
|
struct name_entry entry;
|
|
|
|
|
|
|
|
init_tree_desc(&desc, buf, size);
|
|
|
|
while (tree_entry(&desc, &entry))
|
|
|
|
/* do nothing
|
|
|
|
* tree_entry() will die() on malformed entries */
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void check_commit(const void *buf, size_t size)
|
|
|
|
{
|
|
|
|
struct commit c;
|
|
|
|
memset(&c, 0, sizeof(c));
|
|
|
|
if (parse_commit_buffer(&c, buf, size))
|
|
|
|
die("corrupt commit");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void check_tag(const void *buf, size_t size)
|
|
|
|
{
|
|
|
|
struct tag t;
|
|
|
|
memset(&t, 0, sizeof(t));
|
|
|
|
if (parse_tag_buffer(&t, buf, size))
|
|
|
|
die("corrupt tag");
|
|
|
|
}
|
|
|
|
|
2008-08-03 12:39:16 +08:00
|
|
|
static int index_mem(unsigned char *sha1, void *buf, size_t size,
|
2011-05-08 16:47:33 +08:00
|
|
|
enum object_type type,
|
|
|
|
const char *path, unsigned flags)
|
2006-05-24 02:19:04 +08:00
|
|
|
{
|
Lazy man's auto-CRLF
It currently does NOT know about file attributes, so it does its
conversion purely based on content. Maybe that is more in the "git
philosophy" anyway, since content is king, but I think we should try to do
the file attributes to turn it off on demand.
Anyway, BY DEFAULT it is off regardless, because it requires a
[core]
AutoCRLF = true
in your config file to be enabled. We could make that the default for
Windows, of course, the same way we do some other things (filemode etc).
But you can actually enable it on UNIX, and it will cause:
- "git update-index" will write blobs without CRLF
- "git diff" will diff working tree files without CRLF
- "git checkout" will write files to the working tree _with_ CRLF
and things work fine.
Funnily, it actually shows an odd file in git itself:
git clone -n git test-crlf
cd test-crlf
git config core.autocrlf true
git checkout
git diff
shows a diff for "Documentation/docbook-xsl.css". Why? Because we have
actually checked in that file *with* CRLF! So when "core.autocrlf" is
true, we'll always generate a *different* hash for it in the index,
because the index hash will be for the content _without_ CRLF.
Is this complete? I dunno. It seems to work for me. It doesn't use the
filename at all right now, and that's probably a deficiency (we could
certainly make the "is_binary()" heuristics also take standard filename
heuristics into account).
I don't pass in the filename at all for the "index_fd()" case
(git-update-index), so that would need to be passed around, but this
actually works fine.
NOTE NOTE NOTE! The "is_binary()" heuristics are totally made-up by yours
truly. I will not guarantee that they work at all reasonable. Caveat
emptor. But it _is_ simple, and it _is_ safe, since it's all off by
default.
The patch is pretty simple - the biggest part is the new "convert.c" file,
but even that is really just basic stuff that anybody can write in
"Teaching C 101" as a final project for their first class in programming.
Not to say that it's bug-free, of course - but at least we're not talking
about rocket surgery here.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-02-14 03:07:23 +08:00
|
|
|
int ret, re_allocated = 0;
|
2011-05-08 16:47:33 +08:00
|
|
|
int write_object = flags & HASH_WRITE_OBJECT;
|
2005-05-02 14:45:49 +08:00
|
|
|
|
2005-07-09 07:51:55 +08:00
|
|
|
if (!type)
|
2007-03-01 03:45:56 +08:00
|
|
|
type = OBJ_BLOB;
|
Lazy man's auto-CRLF
It currently does NOT know about file attributes, so it does its
conversion purely based on content. Maybe that is more in the "git
philosophy" anyway, since content is king, but I think we should try to do
the file attributes to turn it off on demand.
Anyway, BY DEFAULT it is off regardless, because it requires a
[core]
AutoCRLF = true
in your config file to be enabled. We could make that the default for
Windows, of course, the same way we do some other things (filemode etc).
But you can actually enable it on UNIX, and it will cause:
- "git update-index" will write blobs without CRLF
- "git diff" will diff working tree files without CRLF
- "git checkout" will write files to the working tree _with_ CRLF
and things work fine.
Funnily, it actually shows an odd file in git itself:
git clone -n git test-crlf
cd test-crlf
git config core.autocrlf true
git checkout
git diff
shows a diff for "Documentation/docbook-xsl.css". Why? Because we have
actually checked in that file *with* CRLF! So when "core.autocrlf" is
true, we'll always generate a *different* hash for it in the index,
because the index hash will be for the content _without_ CRLF.
Is this complete? I dunno. It seems to work for me. It doesn't use the
filename at all right now, and that's probably a deficiency (we could
certainly make the "is_binary()" heuristics also take standard filename
heuristics into account).
I don't pass in the filename at all for the "index_fd()" case
(git-update-index), so that would need to be passed around, but this
actually works fine.
NOTE NOTE NOTE! The "is_binary()" heuristics are totally made-up by yours
truly. I will not guarantee that they work at all reasonable. Caveat
emptor. But it _is_ simple, and it _is_ safe, since it's all off by
default.
The patch is pretty simple - the biggest part is the new "convert.c" file,
but even that is really just basic stuff that anybody can write in
"Teaching C 101" as a final project for their first class in programming.
Not to say that it's bug-free, of course - but at least we're not talking
about rocket surgery here.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-02-14 03:07:23 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert blobs to git internal format
|
|
|
|
*/
|
2008-08-03 12:39:16 +08:00
|
|
|
if ((type == OBJ_BLOB) && path) {
|
2008-10-10 03:12:12 +08:00
|
|
|
struct strbuf nbuf = STRBUF_INIT;
|
safecrlf: Add mechanism to warn about irreversible crlf conversions
CRLF conversion bears a slight chance of corrupting data.
autocrlf=true will convert CRLF to LF during commit and LF to
CRLF during checkout. A file that contains a mixture of LF and
CRLF before the commit cannot be recreated by git. For text
files this is the right thing to do: it corrects line endings
such that we have only LF line endings in the repository.
But for binary files that are accidentally classified as text the
conversion can corrupt data.
If you recognize such corruption early you can easily fix it by
setting the conversion type explicitly in .gitattributes. Right
after committing you still have the original file in your work
tree and this file is not yet corrupted. You can explicitly tell
git that this file is binary and git will handle the file
appropriately.
Unfortunately, the desired effect of cleaning up text files with
mixed line endings and the undesired effect of corrupting binary
files cannot be distinguished. In both cases CRLFs are removed
in an irreversible way. For text files this is the right thing
to do because CRLFs are line endings, while for binary files
converting CRLFs corrupts data.
This patch adds a mechanism that can either warn the user about
an irreversible conversion or can even refuse to convert. The
mechanism is controlled by the variable core.safecrlf, with the
following values:
- false: disable safecrlf mechanism
- warn: warn about irreversible conversions
- true: refuse irreversible conversions
The default is to warn. Users are only affected by this default
if core.autocrlf is set. But the current default of git is to
leave core.autocrlf unset, so users will not see warnings unless
they deliberately chose to activate the autocrlf mechanism.
The safecrlf mechanism's details depend on the git command. The
general principles when safecrlf is active (not false) are:
- we warn/error out if files in the work tree can modified in an
irreversible way without giving the user a chance to backup the
original file.
- for read-only operations that do not modify files in the work tree
we do not not print annoying warnings.
There are exceptions. Even though...
- "git add" itself does not touch the files in the work tree, the
next checkout would, so the safety triggers;
- "git apply" to update a text file with a patch does touch the files
in the work tree, but the operation is about text files and CRLF
conversion is about fixing the line ending inconsistencies, so the
safety does not trigger;
- "git diff" itself does not touch the files in the work tree, it is
often run to inspect the changes you intend to next "git add". To
catch potential problems early, safety triggers.
The concept of a safety check was originally proposed in a similar
way by Linus Torvalds. Thanks to Dimitry Potapov for insisting
on getting the naked LF/autocrlf=true case right.
Signed-off-by: Steffen Prohaska <prohaska@zib.de>
2008-02-06 19:25:58 +08:00
|
|
|
if (convert_to_git(path, buf, size, &nbuf,
|
2011-11-16 00:59:39 +08:00
|
|
|
write_object ? safe_crlf : SAFE_CRLF_FALSE)) {
|
2007-09-27 18:58:23 +08:00
|
|
|
buf = strbuf_detach(&nbuf, &size);
|
Lazy man's auto-CRLF
It currently does NOT know about file attributes, so it does its
conversion purely based on content. Maybe that is more in the "git
philosophy" anyway, since content is king, but I think we should try to do
the file attributes to turn it off on demand.
Anyway, BY DEFAULT it is off regardless, because it requires a
[core]
AutoCRLF = true
in your config file to be enabled. We could make that the default for
Windows, of course, the same way we do some other things (filemode etc).
But you can actually enable it on UNIX, and it will cause:
- "git update-index" will write blobs without CRLF
- "git diff" will diff working tree files without CRLF
- "git checkout" will write files to the working tree _with_ CRLF
and things work fine.
Funnily, it actually shows an odd file in git itself:
git clone -n git test-crlf
cd test-crlf
git config core.autocrlf true
git checkout
git diff
shows a diff for "Documentation/docbook-xsl.css". Why? Because we have
actually checked in that file *with* CRLF! So when "core.autocrlf" is
true, we'll always generate a *different* hash for it in the index,
because the index hash will be for the content _without_ CRLF.
Is this complete? I dunno. It seems to work for me. It doesn't use the
filename at all right now, and that's probably a deficiency (we could
certainly make the "is_binary()" heuristics also take standard filename
heuristics into account).
I don't pass in the filename at all for the "index_fd()" case
(git-update-index), so that would need to be passed around, but this
actually works fine.
NOTE NOTE NOTE! The "is_binary()" heuristics are totally made-up by yours
truly. I will not guarantee that they work at all reasonable. Caveat
emptor. But it _is_ simple, and it _is_ safe, since it's all off by
default.
The patch is pretty simple - the biggest part is the new "convert.c" file,
but even that is really just basic stuff that anybody can write in
"Teaching C 101" as a final project for their first class in programming.
Not to say that it's bug-free, of course - but at least we're not talking
about rocket surgery here.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-02-14 03:07:23 +08:00
|
|
|
re_allocated = 1;
|
|
|
|
}
|
|
|
|
}
|
2011-05-08 16:47:33 +08:00
|
|
|
if (flags & HASH_FORMAT_CHECK) {
|
2011-02-05 18:52:21 +08:00
|
|
|
if (type == OBJ_TREE)
|
|
|
|
check_tree(buf, size);
|
|
|
|
if (type == OBJ_COMMIT)
|
|
|
|
check_commit(buf, size);
|
|
|
|
if (type == OBJ_TAG)
|
|
|
|
check_tag(buf, size);
|
|
|
|
}
|
Lazy man's auto-CRLF
It currently does NOT know about file attributes, so it does its
conversion purely based on content. Maybe that is more in the "git
philosophy" anyway, since content is king, but I think we should try to do
the file attributes to turn it off on demand.
Anyway, BY DEFAULT it is off regardless, because it requires a
[core]
AutoCRLF = true
in your config file to be enabled. We could make that the default for
Windows, of course, the same way we do some other things (filemode etc).
But you can actually enable it on UNIX, and it will cause:
- "git update-index" will write blobs without CRLF
- "git diff" will diff working tree files without CRLF
- "git checkout" will write files to the working tree _with_ CRLF
and things work fine.
Funnily, it actually shows an odd file in git itself:
git clone -n git test-crlf
cd test-crlf
git config core.autocrlf true
git checkout
git diff
shows a diff for "Documentation/docbook-xsl.css". Why? Because we have
actually checked in that file *with* CRLF! So when "core.autocrlf" is
true, we'll always generate a *different* hash for it in the index,
because the index hash will be for the content _without_ CRLF.
Is this complete? I dunno. It seems to work for me. It doesn't use the
filename at all right now, and that's probably a deficiency (we could
certainly make the "is_binary()" heuristics also take standard filename
heuristics into account).
I don't pass in the filename at all for the "index_fd()" case
(git-update-index), so that would need to be passed around, but this
actually works fine.
NOTE NOTE NOTE! The "is_binary()" heuristics are totally made-up by yours
truly. I will not guarantee that they work at all reasonable. Caveat
emptor. But it _is_ simple, and it _is_ safe, since it's all off by
default.
The patch is pretty simple - the biggest part is the new "convert.c" file,
but even that is really just basic stuff that anybody can write in
"Teaching C 101" as a final project for their first class in programming.
Not to say that it's bug-free, of course - but at least we're not talking
about rocket surgery here.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-02-14 03:07:23 +08:00
|
|
|
|
2005-07-09 07:51:55 +08:00
|
|
|
if (write_object)
|
2007-03-01 03:45:56 +08:00
|
|
|
ret = write_sha1_file(buf, size, typename(type), sha1);
|
2006-10-14 18:45:36 +08:00
|
|
|
else
|
2007-03-01 03:45:56 +08:00
|
|
|
ret = hash_sha1_file(buf, size, typename(type), sha1);
|
2008-08-03 12:39:16 +08:00
|
|
|
if (re_allocated)
|
Lazy man's auto-CRLF
It currently does NOT know about file attributes, so it does its
conversion purely based on content. Maybe that is more in the "git
philosophy" anyway, since content is king, but I think we should try to do
the file attributes to turn it off on demand.
Anyway, BY DEFAULT it is off regardless, because it requires a
[core]
AutoCRLF = true
in your config file to be enabled. We could make that the default for
Windows, of course, the same way we do some other things (filemode etc).
But you can actually enable it on UNIX, and it will cause:
- "git update-index" will write blobs without CRLF
- "git diff" will diff working tree files without CRLF
- "git checkout" will write files to the working tree _with_ CRLF
and things work fine.
Funnily, it actually shows an odd file in git itself:
git clone -n git test-crlf
cd test-crlf
git config core.autocrlf true
git checkout
git diff
shows a diff for "Documentation/docbook-xsl.css". Why? Because we have
actually checked in that file *with* CRLF! So when "core.autocrlf" is
true, we'll always generate a *different* hash for it in the index,
because the index hash will be for the content _without_ CRLF.
Is this complete? I dunno. It seems to work for me. It doesn't use the
filename at all right now, and that's probably a deficiency (we could
certainly make the "is_binary()" heuristics also take standard filename
heuristics into account).
I don't pass in the filename at all for the "index_fd()" case
(git-update-index), so that would need to be passed around, but this
actually works fine.
NOTE NOTE NOTE! The "is_binary()" heuristics are totally made-up by yours
truly. I will not guarantee that they work at all reasonable. Caveat
emptor. But it _is_ simple, and it _is_ safe, since it's all off by
default.
The patch is pretty simple - the biggest part is the new "convert.c" file,
but even that is really just basic stuff that anybody can write in
"Teaching C 101" as a final project for their first class in programming.
Not to say that it's bug-free, of course - but at least we're not talking
about rocket surgery here.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-02-14 03:07:23 +08:00
|
|
|
free(buf);
|
2008-08-03 12:39:16 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-05-08 16:47:34 +08:00
|
|
|
static int index_pipe(unsigned char *sha1, int fd, enum object_type type,
|
|
|
|
const char *path, unsigned flags)
|
|
|
|
{
|
|
|
|
struct strbuf sbuf = STRBUF_INIT;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (strbuf_read(&sbuf, fd, 4096) >= 0)
|
|
|
|
ret = index_mem(sha1, sbuf.buf, sbuf.len, type, path, flags);
|
|
|
|
else
|
|
|
|
ret = -1;
|
|
|
|
strbuf_release(&sbuf);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-02-21 14:32:19 +08:00
|
|
|
#define SMALL_FILE_SIZE (32*1024)
|
|
|
|
|
2011-05-08 16:47:34 +08:00
|
|
|
static int index_core(unsigned char *sha1, int fd, size_t size,
|
|
|
|
enum object_type type, const char *path,
|
|
|
|
unsigned flags)
|
2008-08-03 12:39:16 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2011-05-08 16:47:34 +08:00
|
|
|
if (!size) {
|
2011-05-08 16:47:33 +08:00
|
|
|
ret = index_mem(sha1, NULL, size, type, path, flags);
|
2010-02-21 14:32:19 +08:00
|
|
|
} else if (size <= SMALL_FILE_SIZE) {
|
|
|
|
char *buf = xmalloc(size);
|
|
|
|
if (size == read_in_full(fd, buf, size))
|
2011-05-08 16:47:33 +08:00
|
|
|
ret = index_mem(sha1, buf, size, type, path, flags);
|
2010-02-21 14:32:19 +08:00
|
|
|
else
|
|
|
|
ret = error("short read %s", strerror(errno));
|
|
|
|
free(buf);
|
2010-05-11 05:38:17 +08:00
|
|
|
} else {
|
2008-08-03 12:39:16 +08:00
|
|
|
void *buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
|
2011-05-08 16:47:33 +08:00
|
|
|
ret = index_mem(sha1, buf, size, type, path, flags);
|
2005-05-04 02:46:16 +08:00
|
|
|
munmap(buf, size);
|
2010-05-11 05:38:17 +08:00
|
|
|
}
|
2011-05-08 16:47:34 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-05-08 16:47:35 +08:00
|
|
|
/*
|
2011-10-29 05:48:40 +08:00
|
|
|
* This creates one packfile per large blob unless bulk-checkin
|
|
|
|
* machinery is "plugged".
|
2011-05-08 16:47:35 +08:00
|
|
|
*
|
|
|
|
* This also bypasses the usual "convert-to-git" dance, and that is on
|
|
|
|
* purpose. We could write a streaming version of the converting
|
|
|
|
* functions and insert that before feeding the data to fast-import
|
do not stream large files to pack when filters are in use
Because git's object format requires us to specify the
number of bytes in the object in its header, we must know
the size before streaming a blob into the object database.
This is not a problem when adding a regular file, as we can
get the size from stat(). However, when filters are in use
(such as autocrlf, or the ident, filter, or eol
gitattributes), we have no idea what the ultimate size will
be.
The current code just punts on the whole issue and ignores
filter configuration entirely for files larger than
core.bigfilethreshold. This can generate confusing results
if you use filters for large binary files, as the filter
will suddenly stop working as the file goes over a certain
size. Rather than try to handle unknown input sizes with
streaming, this patch just turns off the streaming
optimization when filters are in use.
This has a slight performance regression in a very specific
case: if you have autocrlf on, but no gitattributes, a large
binary file will avoid the streaming code path because we
don't know beforehand whether it will need conversion or
not. But if you are handling large binary files, you should
be marking them as such via attributes (or at least not
using autocrlf, and instead marking your text files as
such). And the flip side is that if you have a large
_non_-binary file, there is a correctness improvement;
before we did not apply the conversion at all.
The first half of the new t1051 script covers these failures
on input. The second half tests the matching output code
paths. These already work correctly, and do not need any
adjustment.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-02-25 06:10:17 +08:00
|
|
|
* (or equivalent in-core API described above). However, that is
|
|
|
|
* somewhat complicated, as we do not know the size of the filter
|
|
|
|
* result, which we need to know beforehand when writing a git object.
|
|
|
|
* Since the primary motivation for trying to stream from the working
|
|
|
|
* tree file and to avoid mmaping it in core is to deal with large
|
|
|
|
* binary blobs, they generally do not want to get any conversion, and
|
|
|
|
* callers should avoid this code path when filters are requested.
|
2011-05-08 16:47:35 +08:00
|
|
|
*/
|
|
|
|
static int index_stream(unsigned char *sha1, int fd, size_t size,
|
|
|
|
enum object_type type, const char *path,
|
|
|
|
unsigned flags)
|
|
|
|
{
|
2011-10-29 05:48:40 +08:00
|
|
|
return index_bulk_checkin(sha1, fd, size, type, path, flags);
|
2011-05-08 16:47:35 +08:00
|
|
|
}
|
|
|
|
|
2011-05-08 16:47:34 +08:00
|
|
|
int index_fd(unsigned char *sha1, int fd, struct stat *st,
|
|
|
|
enum object_type type, const char *path, unsigned flags)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
size_t size = xsize_t(st->st_size);
|
|
|
|
|
|
|
|
if (!S_ISREG(st->st_mode))
|
|
|
|
ret = index_pipe(sha1, fd, type, path, flags);
|
do not stream large files to pack when filters are in use
Because git's object format requires us to specify the
number of bytes in the object in its header, we must know
the size before streaming a blob into the object database.
This is not a problem when adding a regular file, as we can
get the size from stat(). However, when filters are in use
(such as autocrlf, or the ident, filter, or eol
gitattributes), we have no idea what the ultimate size will
be.
The current code just punts on the whole issue and ignores
filter configuration entirely for files larger than
core.bigfilethreshold. This can generate confusing results
if you use filters for large binary files, as the filter
will suddenly stop working as the file goes over a certain
size. Rather than try to handle unknown input sizes with
streaming, this patch just turns off the streaming
optimization when filters are in use.
This has a slight performance regression in a very specific
case: if you have autocrlf on, but no gitattributes, a large
binary file will avoid the streaming code path because we
don't know beforehand whether it will need conversion or
not. But if you are handling large binary files, you should
be marking them as such via attributes (or at least not
using autocrlf, and instead marking your text files as
such). And the flip side is that if you have a large
_non_-binary file, there is a correctness improvement;
before we did not apply the conversion at all.
The first half of the new t1051 script covers these failures
on input. The second half tests the matching output code
paths. These already work correctly, and do not need any
adjustment.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-02-25 06:10:17 +08:00
|
|
|
else if (size <= big_file_threshold || type != OBJ_BLOB ||
|
|
|
|
(path && would_convert_to_git(path, NULL, 0, 0)))
|
2011-05-08 16:47:34 +08:00
|
|
|
ret = index_core(sha1, fd, size, type, path, flags);
|
2011-05-08 16:47:35 +08:00
|
|
|
else
|
|
|
|
ret = index_stream(sha1, fd, size, type, path, flags);
|
2008-08-03 12:39:16 +08:00
|
|
|
close(fd);
|
2005-05-04 02:46:16 +08:00
|
|
|
return ret;
|
2005-05-02 14:45:49 +08:00
|
|
|
}
|
2005-10-07 18:42:00 +08:00
|
|
|
|
2011-05-08 16:47:33 +08:00
|
|
|
int index_path(unsigned char *sha1, const char *path, struct stat *st, unsigned flags)
|
2005-10-07 18:42:00 +08:00
|
|
|
{
|
|
|
|
int fd;
|
2008-12-18 01:51:53 +08:00
|
|
|
struct strbuf sb = STRBUF_INIT;
|
2005-10-07 18:42:00 +08:00
|
|
|
|
|
|
|
switch (st->st_mode & S_IFMT) {
|
|
|
|
case S_IFREG:
|
|
|
|
fd = open(path, O_RDONLY);
|
|
|
|
if (fd < 0)
|
|
|
|
return error("open(\"%s\"): %s", path,
|
|
|
|
strerror(errno));
|
2011-05-08 16:47:33 +08:00
|
|
|
if (index_fd(sha1, fd, st, OBJ_BLOB, path, flags) < 0)
|
2005-10-07 18:42:00 +08:00
|
|
|
return error("%s: failed to insert into database",
|
|
|
|
path);
|
|
|
|
break;
|
|
|
|
case S_IFLNK:
|
2008-12-18 01:51:53 +08:00
|
|
|
if (strbuf_readlink(&sb, path, st->st_size)) {
|
2005-10-07 18:42:00 +08:00
|
|
|
char *errstr = strerror(errno);
|
|
|
|
return error("readlink(\"%s\"): %s", path,
|
|
|
|
errstr);
|
|
|
|
}
|
2011-05-08 16:47:33 +08:00
|
|
|
if (!(flags & HASH_WRITE_OBJECT))
|
2008-12-18 01:51:53 +08:00
|
|
|
hash_sha1_file(sb.buf, sb.len, blob_type, sha1);
|
|
|
|
else if (write_sha1_file(sb.buf, sb.len, blob_type, sha1))
|
2005-10-07 18:42:00 +08:00
|
|
|
return error("%s: failed to insert into database",
|
|
|
|
path);
|
2008-12-18 01:51:53 +08:00
|
|
|
strbuf_release(&sb);
|
2005-10-07 18:42:00 +08:00
|
|
|
break;
|
2007-04-10 12:20:29 +08:00
|
|
|
case S_IFDIR:
|
|
|
|
return resolve_gitlink_ref(path, "HEAD", sha1);
|
2005-10-07 18:42:00 +08:00
|
|
|
default:
|
|
|
|
return error("%s: unsupported file type", path);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2007-01-23 13:55:18 +08:00
|
|
|
|
|
|
|
int read_pack_header(int fd, struct pack_header *header)
|
|
|
|
{
|
2008-05-03 21:27:26 +08:00
|
|
|
if (read_in_full(fd, header, sizeof(*header)) < sizeof(*header))
|
|
|
|
/* "eof before pack header was fully read" */
|
|
|
|
return PH_ERROR_EOF;
|
|
|
|
|
2007-01-23 13:55:18 +08:00
|
|
|
if (header->hdr_signature != htonl(PACK_SIGNATURE))
|
|
|
|
/* "protocol error (pack signature mismatch detected)" */
|
|
|
|
return PH_ERROR_PACK_SIGNATURE;
|
|
|
|
if (!pack_version_ok(header->hdr_version))
|
|
|
|
/* "protocol error (pack version unsupported)" */
|
|
|
|
return PH_ERROR_PROTOCOL;
|
|
|
|
return 0;
|
|
|
|
}
|
make commit_tree a library function
Until now, this has been part of the commit-tree builtin.
However, it is already used by other builtins (like commit,
merge, and notes), and it would be useful to access it from
library code.
The check_valid helper has to come along, too, but is given
a more library-ish name of "assert_sha1_type".
Otherwise, the code is unchanged. There are still a few
rough edges for a library function, like printing the utf8
warning to stderr, but we can address those if and when they
come up as inappropriate.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-04-02 08:05:23 +08:00
|
|
|
|
|
|
|
void assert_sha1_type(const unsigned char *sha1, enum object_type expect)
|
|
|
|
{
|
|
|
|
enum object_type type = sha1_object_info(sha1, NULL);
|
|
|
|
if (type < 0)
|
|
|
|
die("%s is not a valid object", sha1_to_hex(sha1));
|
|
|
|
if (type != expect)
|
|
|
|
die("%s is not a valid '%s' object", sha1_to_hex(sha1),
|
|
|
|
typename(expect));
|
|
|
|
}
|