fsck: use streaming interface for large blobs in pack

For blobs, we want to make sure the on-disk data is not corrupted
(i.e. can be inflated and produce the expected SHA-1). Blob content is
opaque, there's nothing else inside to check for.

For really large blobs, we may want to avoid unpacking the entire blob
in memory, just to check whether it produces the same SHA-1. On 32-bit
systems, we may not have enough virtual address space for such memory
allocation. And even on 64-bit where it's not a problem, allocating a
lot more memory could result in kicking other parts of systems to swap
file, generating lots of I/O and slowing everything down.

For this particular operation, not unpacking the blob and letting
check_sha1_signature, which supports streaming interface, do the job
is sufficient. check_sha1_signature() is not shown in the diff,
unfortunately. But if will be called when "data_valid && !data" is
false.

We will call the callback function "fn" with NULL as "data". The only
callback of this function is fsck_obj_buffer(), which does not touch
"data" at all if it's a blob.

Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
This commit is contained in:
Nguyễn Thái Ngọc Duy 2016-07-13 17:44:04 +02:00 committed by Junio C Hamano
parent af92a645d3
commit ec9d224903
4 changed files with 29 additions and 6 deletions

View File

@ -356,6 +356,10 @@ static int fsck_sha1(const unsigned char *sha1)
static int fsck_obj_buffer(const unsigned char *sha1, enum object_type type,
unsigned long size, void *buffer, int *eaten)
{
/*
* Note, buffer may be NULL if type is OBJ_BLOB. See
* verify_packfile(), data_valid variable for details.
*/
struct object *obj;
obj = parse_object_buffer(sha1, type, size, buffer, eaten);
if (!obj) {

View File

@ -105,6 +105,8 @@ static int verify_packfile(struct packed_git *p,
void *data;
enum object_type type;
unsigned long size;
off_t curpos;
int data_valid;
if (p->index_version > 1) {
off_t offset = entries[i].offset;
@ -116,8 +118,25 @@ static int verify_packfile(struct packed_git *p,
sha1_to_hex(entries[i].sha1),
p->pack_name, (uintmax_t)offset);
}
data = unpack_entry(p, entries[i].offset, &type, &size);
if (!data)
curpos = entries[i].offset;
type = unpack_object_header(p, w_curs, &curpos, &size);
unuse_pack(w_curs);
if (type == OBJ_BLOB && big_file_threshold <= size) {
/*
* Let check_sha1_signature() check it with
* the streaming interface; no point slurping
* the data in-core only to discard.
*/
data = NULL;
data_valid = 0;
} else {
data = unpack_entry(p, entries[i].offset, &type, &size);
data_valid = 1;
}
if (data_valid && !data)
err = error("cannot unpack %s from %s at offset %"PRIuMAX"",
sha1_to_hex(entries[i].sha1), p->pack_name,
(uintmax_t)entries[i].offset);

1
pack.h
View File

@ -74,6 +74,7 @@ struct pack_idx_entry {
struct progress;
/* Note, the data argument could be NULL if object type is blob */
typedef int (*verify_fn)(const unsigned char*, enum object_type, unsigned long, void*, int*);
extern const char *write_idx_file(const char *index_name, struct pack_idx_entry **objects, int nr_objects, const struct pack_idx_option *, const unsigned char *sha1);

View File

@ -177,10 +177,9 @@ test_expect_success 'zip achiving, deflate' '
git archive --format=zip HEAD >/dev/null
'
test_expect_success 'fsck' '
test_must_fail git fsck 2>err &&
n=$(grep "error: attempting to allocate .* over limit" err | wc -l) &&
test "$n" -gt 1
test_expect_success 'fsck large blobs' '
git fsck 2>err &&
test_must_be_empty err
'
test_done