2021-10-08 04:25:04 +08:00
|
|
|
/*
|
|
|
|
Copyright 2020 Google LLC
|
|
|
|
|
|
|
|
Use of this source code is governed by a BSD-style
|
|
|
|
license that can be found in the LICENSE file or at
|
|
|
|
https://developers.google.com/open-source/licenses/bsd
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef BLOCK_H
|
|
|
|
#define BLOCK_H
|
|
|
|
|
|
|
|
#include "basics.h"
|
|
|
|
#include "record.h"
|
|
|
|
#include "reftable-blocksource.h"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Writes reftable blocks. The block_writer is reused across blocks to minimize
|
|
|
|
* allocation overhead.
|
|
|
|
*/
|
|
|
|
struct block_writer {
|
reftable/block: reuse zstream when writing log blocks
While most reftable blocks are written to disk as-is, blocks for log
records are compressed with zlib. To compress them we use `compress2()`,
which is a simple wrapper around the more complex `zstream` interface
that would require multiple function invocations.
One downside of this interface is that `compress2()` will reallocate
internal state of the `zstream` interface on every single invocation.
Consequently, as we call `compress2()` for every single log block which
we are about to write, this can lead to quite some memory allocation
churn.
Refactor the code so that the block writer reuses a `zstream`. This
significantly reduces the number of bytes allocated when writing many
refs in a single transaction, as demonstrated by the following benchmark
that writes 100k refs in a single transaction.
Before:
HEAP SUMMARY:
in use at exit: 671,931 bytes in 151 blocks
total heap usage: 22,631,887 allocs, 22,631,736 frees, 1,854,670,793 bytes allocated
After:
HEAP SUMMARY:
in use at exit: 671,931 bytes in 151 blocks
total heap usage: 22,620,528 allocs, 22,620,377 frees, 1,245,549,984 bytes allocated
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2024-04-08 20:24:35 +08:00
|
|
|
z_stream *zstream;
|
reftable/block: reuse compressed array
Similar to the preceding commit, let's reuse the `compressed` array that
we use to store compressed data in. This results in a small reduction in
memory allocations when writing many refs.
Before:
HEAP SUMMARY:
in use at exit: 671,931 bytes in 151 blocks
total heap usage: 22,620,528 allocs, 22,620,377 frees, 1,245,549,984 bytes allocated
After:
HEAP SUMMARY:
in use at exit: 671,931 bytes in 151 blocks
total heap usage: 22,618,257 allocs, 22,618,106 frees, 1,236,351,528 bytes allocated
So while the reduction in allocations isn't really all that big, it's a
low hanging fruit and thus there isn't much of a reason not to pick it.
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2024-04-08 20:24:40 +08:00
|
|
|
unsigned char *compressed;
|
|
|
|
size_t compressed_cap;
|
|
|
|
|
2021-10-08 04:25:04 +08:00
|
|
|
uint8_t *buf;
|
|
|
|
uint32_t block_size;
|
|
|
|
|
2021-12-24 03:29:48 +08:00
|
|
|
/* Offset of the global header. Nonzero in the first block only. */
|
2021-10-08 04:25:04 +08:00
|
|
|
uint32_t header_off;
|
|
|
|
|
|
|
|
/* How often to restart keys. */
|
2024-05-13 16:18:23 +08:00
|
|
|
uint16_t restart_interval;
|
2021-10-08 04:25:04 +08:00
|
|
|
int hash_size;
|
|
|
|
|
|
|
|
/* Offset of next uint8_t to write. */
|
|
|
|
uint32_t next;
|
|
|
|
uint32_t *restarts;
|
|
|
|
uint32_t restart_len;
|
|
|
|
uint32_t restart_cap;
|
|
|
|
|
|
|
|
struct strbuf last_key;
|
|
|
|
int entries;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* initializes the blockwriter to write `typ` entries, using `buf` as temporary
|
|
|
|
* storage. `buf` is not owned by the block_writer. */
|
|
|
|
void block_writer_init(struct block_writer *bw, uint8_t typ, uint8_t *buf,
|
|
|
|
uint32_t block_size, uint32_t header_off, int hash_size);
|
|
|
|
|
|
|
|
/* returns the block type (eg. 'r' for ref records. */
|
|
|
|
uint8_t block_writer_type(struct block_writer *bw);
|
|
|
|
|
|
|
|
/* appends the record, or -1 if it doesn't fit. */
|
|
|
|
int block_writer_add(struct block_writer *w, struct reftable_record *rec);
|
|
|
|
|
|
|
|
/* appends the key restarts, and compress the block if necessary. */
|
|
|
|
int block_writer_finish(struct block_writer *w);
|
|
|
|
|
|
|
|
/* clears out internally allocated block_writer members. */
|
|
|
|
void block_writer_release(struct block_writer *bw);
|
|
|
|
|
reftable/block: reuse `zstream` state on inflation
When calling `inflateInit()` and `inflate()`, the zlib library will
allocate several data structures for the underlying `zstream` to keep
track of various information. Thus, when inflating repeatedly, it is
possible to optimize memory allocation patterns by reusing the `zstream`
and then calling `inflateReset()` on it to prepare it for the next chunk
of data to inflate.
This is exactly what the reftable code is doing: when iterating through
reflogs we need to potentially inflate many log blocks, but we discard
the `zstream` every single time. Instead, as we reuse the `block_reader`
for each of the blocks anyway, we can initialize the `zstream` once and
then reuse it for subsequent inflations.
Refactor the code to do so, which leads to a significant reduction in
the number of allocations. The following measurements were done when
iterating through 1 million reflog entries. Before:
HEAP SUMMARY:
in use at exit: 13,473 bytes in 122 blocks
total heap usage: 23,028 allocs, 22,906 frees, 162,813,552 bytes allocated
After:
HEAP SUMMARY:
in use at exit: 13,473 bytes in 122 blocks
total heap usage: 302 allocs, 180 frees, 88,352 bytes allocated
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2024-04-08 20:17:04 +08:00
|
|
|
struct z_stream;
|
|
|
|
|
2021-10-08 04:25:04 +08:00
|
|
|
/* Read a block. */
|
|
|
|
struct block_reader {
|
|
|
|
/* offset of the block header; nonzero for the first block in a
|
|
|
|
* reftable. */
|
|
|
|
uint32_t header_off;
|
|
|
|
|
|
|
|
/* the memory block */
|
|
|
|
struct reftable_block block;
|
|
|
|
int hash_size;
|
|
|
|
|
reftable/block: reuse uncompressed blocks
The reftable backend stores reflog entries in a compressed format and
thus needs to uncompress blocks before one can read records from it.
For each reflog block we thus have to allocate an array that we can
decompress the block contents into. This block is being discarded
whenever the table iterator moves to the next block. Consequently, we
reallocate a new array on every block, which is quite wasteful.
Refactor the code to reuse the uncompressed block data when moving the
block reader to a new block. This significantly reduces the number of
allocations when iterating through many compressed blocks. The following
measurements are done with `git reflog list` when listing 100k reflogs.
Before:
HEAP SUMMARY:
in use at exit: 13,473 bytes in 122 blocks
total heap usage: 45,755 allocs, 45,633 frees, 254,779,456 bytes allocated
After:
HEAP SUMMARY:
in use at exit: 13,473 bytes in 122 blocks
total heap usage: 23,028 allocs, 22,906 frees, 162,813,547 bytes allocated
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2024-04-08 20:16:54 +08:00
|
|
|
/* Uncompressed data for log entries. */
|
reftable/block: reuse `zstream` state on inflation
When calling `inflateInit()` and `inflate()`, the zlib library will
allocate several data structures for the underlying `zstream` to keep
track of various information. Thus, when inflating repeatedly, it is
possible to optimize memory allocation patterns by reusing the `zstream`
and then calling `inflateReset()` on it to prepare it for the next chunk
of data to inflate.
This is exactly what the reftable code is doing: when iterating through
reflogs we need to potentially inflate many log blocks, but we discard
the `zstream` every single time. Instead, as we reuse the `block_reader`
for each of the blocks anyway, we can initialize the `zstream` once and
then reuse it for subsequent inflations.
Refactor the code to do so, which leads to a significant reduction in
the number of allocations. The following measurements were done when
iterating through 1 million reflog entries. Before:
HEAP SUMMARY:
in use at exit: 13,473 bytes in 122 blocks
total heap usage: 23,028 allocs, 22,906 frees, 162,813,552 bytes allocated
After:
HEAP SUMMARY:
in use at exit: 13,473 bytes in 122 blocks
total heap usage: 302 allocs, 180 frees, 88,352 bytes allocated
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2024-04-08 20:17:04 +08:00
|
|
|
z_stream *zstream;
|
reftable/block: reuse uncompressed blocks
The reftable backend stores reflog entries in a compressed format and
thus needs to uncompress blocks before one can read records from it.
For each reflog block we thus have to allocate an array that we can
decompress the block contents into. This block is being discarded
whenever the table iterator moves to the next block. Consequently, we
reallocate a new array on every block, which is quite wasteful.
Refactor the code to reuse the uncompressed block data when moving the
block reader to a new block. This significantly reduces the number of
allocations when iterating through many compressed blocks. The following
measurements are done with `git reflog list` when listing 100k reflogs.
Before:
HEAP SUMMARY:
in use at exit: 13,473 bytes in 122 blocks
total heap usage: 45,755 allocs, 45,633 frees, 254,779,456 bytes allocated
After:
HEAP SUMMARY:
in use at exit: 13,473 bytes in 122 blocks
total heap usage: 23,028 allocs, 22,906 frees, 162,813,547 bytes allocated
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2024-04-08 20:16:54 +08:00
|
|
|
unsigned char *uncompressed_data;
|
|
|
|
size_t uncompressed_cap;
|
|
|
|
|
2021-10-08 04:25:04 +08:00
|
|
|
/* size of the data, excluding restart data. */
|
|
|
|
uint32_t block_len;
|
|
|
|
uint8_t *restart_bytes;
|
|
|
|
uint16_t restart_count;
|
|
|
|
|
|
|
|
/* size of the data in the file. For log blocks, this is the compressed
|
|
|
|
* size. */
|
|
|
|
uint32_t full_block_size;
|
|
|
|
};
|
|
|
|
|
2024-04-08 20:16:36 +08:00
|
|
|
/* initializes a block reader. */
|
|
|
|
int block_reader_init(struct block_reader *br, struct reftable_block *bl,
|
|
|
|
uint32_t header_off, uint32_t table_block_size,
|
|
|
|
int hash_size);
|
|
|
|
|
2024-04-08 20:16:40 +08:00
|
|
|
void block_reader_release(struct block_reader *br);
|
|
|
|
|
2024-04-08 20:16:36 +08:00
|
|
|
/* Returns the block type (eg. 'r' for refs) */
|
reftable/block: move ownership of block reader into `struct table_iter`
The table iterator allows the caller to iterate through all records in a
reftable table. To do so it iterates through all blocks of the desired
type one by one, where for each block it creates a new block iterator
and yields all its entries.
One of the things that is somewhat confusing in this context is who owns
the block reader that is being used to read the blocks and pass them to
the block iterator. Intuitively, as the table iterator is responsible
for iterating through the blocks, one would assume that this iterator is
also responsible for managing the lifecycle of the reader. And while it
somewhat is, the block reader is ultimately stored inside of the block
iterator.
Refactor the code such that the block reader is instead fully managed by
the table iterator. Instead of passing the reader to the block iterator,
we now only end up passing the block data to it. Despite clearing up the
lifecycle of the reader, it will also allow for better reuse of the
reader in subsequent patches.
The following benchmark prints a single matching ref out of 1 million
refs. Before:
HEAP SUMMARY:
in use at exit: 13,603 bytes in 125 blocks
total heap usage: 6,607 allocs, 6,482 frees, 509,635 bytes allocated
After:
HEAP SUMMARY:
in use at exit: 13,603 bytes in 125 blocks
total heap usage: 7,235 allocs, 7,110 frees, 301,481 bytes allocated
Note that while there are more allocation and free calls now, the
overall number of bytes allocated is significantly lower. The number of
allocations will be reduced significantly by the next patch though.
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2024-04-08 20:16:45 +08:00
|
|
|
uint8_t block_reader_type(const struct block_reader *r);
|
2024-04-08 20:16:36 +08:00
|
|
|
|
|
|
|
/* Decodes the first key in the block */
|
reftable/block: move ownership of block reader into `struct table_iter`
The table iterator allows the caller to iterate through all records in a
reftable table. To do so it iterates through all blocks of the desired
type one by one, where for each block it creates a new block iterator
and yields all its entries.
One of the things that is somewhat confusing in this context is who owns
the block reader that is being used to read the blocks and pass them to
the block iterator. Intuitively, as the table iterator is responsible
for iterating through the blocks, one would assume that this iterator is
also responsible for managing the lifecycle of the reader. And while it
somewhat is, the block reader is ultimately stored inside of the block
iterator.
Refactor the code such that the block reader is instead fully managed by
the table iterator. Instead of passing the reader to the block iterator,
we now only end up passing the block data to it. Despite clearing up the
lifecycle of the reader, it will also allow for better reuse of the
reader in subsequent patches.
The following benchmark prints a single matching ref out of 1 million
refs. Before:
HEAP SUMMARY:
in use at exit: 13,603 bytes in 125 blocks
total heap usage: 6,607 allocs, 6,482 frees, 509,635 bytes allocated
After:
HEAP SUMMARY:
in use at exit: 13,603 bytes in 125 blocks
total heap usage: 7,235 allocs, 7,110 frees, 301,481 bytes allocated
Note that while there are more allocation and free calls now, the
overall number of bytes allocated is significantly lower. The number of
allocations will be reduced significantly by the next patch though.
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2024-04-08 20:16:45 +08:00
|
|
|
int block_reader_first_key(const struct block_reader *br, struct strbuf *key);
|
2024-04-08 20:16:36 +08:00
|
|
|
|
2021-10-08 04:25:04 +08:00
|
|
|
/* Iterate over entries in a block */
|
|
|
|
struct block_iter {
|
|
|
|
/* offset within the block of the next entry to read. */
|
|
|
|
uint32_t next_off;
|
reftable/block: move ownership of block reader into `struct table_iter`
The table iterator allows the caller to iterate through all records in a
reftable table. To do so it iterates through all blocks of the desired
type one by one, where for each block it creates a new block iterator
and yields all its entries.
One of the things that is somewhat confusing in this context is who owns
the block reader that is being used to read the blocks and pass them to
the block iterator. Intuitively, as the table iterator is responsible
for iterating through the blocks, one would assume that this iterator is
also responsible for managing the lifecycle of the reader. And while it
somewhat is, the block reader is ultimately stored inside of the block
iterator.
Refactor the code such that the block reader is instead fully managed by
the table iterator. Instead of passing the reader to the block iterator,
we now only end up passing the block data to it. Despite clearing up the
lifecycle of the reader, it will also allow for better reuse of the
reader in subsequent patches.
The following benchmark prints a single matching ref out of 1 million
refs. Before:
HEAP SUMMARY:
in use at exit: 13,603 bytes in 125 blocks
total heap usage: 6,607 allocs, 6,482 frees, 509,635 bytes allocated
After:
HEAP SUMMARY:
in use at exit: 13,603 bytes in 125 blocks
total heap usage: 7,235 allocs, 7,110 frees, 301,481 bytes allocated
Note that while there are more allocation and free calls now, the
overall number of bytes allocated is significantly lower. The number of
allocations will be reduced significantly by the next patch though.
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2024-04-08 20:16:45 +08:00
|
|
|
const unsigned char *block;
|
|
|
|
size_t block_len;
|
|
|
|
int hash_size;
|
2021-10-08 04:25:04 +08:00
|
|
|
|
|
|
|
/* key for last entry we read. */
|
|
|
|
struct strbuf last_key;
|
reftable/record: use scratch buffer when decoding records
When decoding log records we need a temporary buffer to decode the
reflog entry's name, mail address and message. As this buffer is local
to the function we thus have to reallocate it for every single log
record which we're about to decode, which is inefficient.
Refactor the code such that callers need to pass in a scratch buffer,
which allows us to reuse it for multiple decodes. This reduces the
number of allocations when iterating through reflogs. Before:
HEAP SUMMARY:
in use at exit: 13,473 bytes in 122 blocks
total heap usage: 2,068,487 allocs, 2,068,365 frees, 305,122,946 bytes allocated
After:
HEAP SUMMARY:
in use at exit: 13,473 bytes in 122 blocks
total heap usage: 1,068,485 allocs, 1,068,363 frees, 281,122,886 bytes allocated
Note that this commit also drop some redundant calls to `strbuf_reset()`
right before calling `decode_string()`. The latter already knows to
reset the buffer, so there is no need for these.
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2024-03-05 20:11:16 +08:00
|
|
|
struct strbuf scratch;
|
2021-10-08 04:25:04 +08:00
|
|
|
};
|
|
|
|
|
2023-12-11 17:08:07 +08:00
|
|
|
#define BLOCK_ITER_INIT { \
|
|
|
|
.last_key = STRBUF_INIT, \
|
reftable/record: use scratch buffer when decoding records
When decoding log records we need a temporary buffer to decode the
reflog entry's name, mail address and message. As this buffer is local
to the function we thus have to reallocate it for every single log
record which we're about to decode, which is inefficient.
Refactor the code such that callers need to pass in a scratch buffer,
which allows us to reuse it for multiple decodes. This reduces the
number of allocations when iterating through reflogs. Before:
HEAP SUMMARY:
in use at exit: 13,473 bytes in 122 blocks
total heap usage: 2,068,487 allocs, 2,068,365 frees, 305,122,946 bytes allocated
After:
HEAP SUMMARY:
in use at exit: 13,473 bytes in 122 blocks
total heap usage: 1,068,485 allocs, 1,068,363 frees, 281,122,886 bytes allocated
Note that this commit also drop some redundant calls to `strbuf_reset()`
right before calling `decode_string()`. The latter already knows to
reset the buffer, so there is no need for these.
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2024-03-05 20:11:16 +08:00
|
|
|
.scratch = STRBUF_INIT, \
|
2023-12-11 17:08:07 +08:00
|
|
|
}
|
|
|
|
|
2021-10-08 04:25:04 +08:00
|
|
|
/* Position `it` at start of the block */
|
reftable/block: move ownership of block reader into `struct table_iter`
The table iterator allows the caller to iterate through all records in a
reftable table. To do so it iterates through all blocks of the desired
type one by one, where for each block it creates a new block iterator
and yields all its entries.
One of the things that is somewhat confusing in this context is who owns
the block reader that is being used to read the blocks and pass them to
the block iterator. Intuitively, as the table iterator is responsible
for iterating through the blocks, one would assume that this iterator is
also responsible for managing the lifecycle of the reader. And while it
somewhat is, the block reader is ultimately stored inside of the block
iterator.
Refactor the code such that the block reader is instead fully managed by
the table iterator. Instead of passing the reader to the block iterator,
we now only end up passing the block data to it. Despite clearing up the
lifecycle of the reader, it will also allow for better reuse of the
reader in subsequent patches.
The following benchmark prints a single matching ref out of 1 million
refs. Before:
HEAP SUMMARY:
in use at exit: 13,603 bytes in 125 blocks
total heap usage: 6,607 allocs, 6,482 frees, 509,635 bytes allocated
After:
HEAP SUMMARY:
in use at exit: 13,603 bytes in 125 blocks
total heap usage: 7,235 allocs, 7,110 frees, 301,481 bytes allocated
Note that while there are more allocation and free calls now, the
overall number of bytes allocated is significantly lower. The number of
allocations will be reduced significantly by the next patch though.
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2024-04-08 20:16:45 +08:00
|
|
|
void block_iter_seek_start(struct block_iter *it, const struct block_reader *br);
|
2021-10-08 04:25:04 +08:00
|
|
|
|
|
|
|
/* Position `it` to the `want` key in the block */
|
reftable/block: move ownership of block reader into `struct table_iter`
The table iterator allows the caller to iterate through all records in a
reftable table. To do so it iterates through all blocks of the desired
type one by one, where for each block it creates a new block iterator
and yields all its entries.
One of the things that is somewhat confusing in this context is who owns
the block reader that is being used to read the blocks and pass them to
the block iterator. Intuitively, as the table iterator is responsible
for iterating through the blocks, one would assume that this iterator is
also responsible for managing the lifecycle of the reader. And while it
somewhat is, the block reader is ultimately stored inside of the block
iterator.
Refactor the code such that the block reader is instead fully managed by
the table iterator. Instead of passing the reader to the block iterator,
we now only end up passing the block data to it. Despite clearing up the
lifecycle of the reader, it will also allow for better reuse of the
reader in subsequent patches.
The following benchmark prints a single matching ref out of 1 million
refs. Before:
HEAP SUMMARY:
in use at exit: 13,603 bytes in 125 blocks
total heap usage: 6,607 allocs, 6,482 frees, 509,635 bytes allocated
After:
HEAP SUMMARY:
in use at exit: 13,603 bytes in 125 blocks
total heap usage: 7,235 allocs, 7,110 frees, 301,481 bytes allocated
Note that while there are more allocation and free calls now, the
overall number of bytes allocated is significantly lower. The number of
allocations will be reduced significantly by the next patch though.
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2024-04-08 20:16:45 +08:00
|
|
|
int block_iter_seek_key(struct block_iter *it, const struct block_reader *br,
|
2024-04-08 20:16:31 +08:00
|
|
|
struct strbuf *want);
|
2021-10-08 04:25:04 +08:00
|
|
|
|
|
|
|
/* return < 0 for error, 0 for OK, > 0 for EOF. */
|
|
|
|
int block_iter_next(struct block_iter *it, struct reftable_record *rec);
|
|
|
|
|
reftable/block: move ownership of block reader into `struct table_iter`
The table iterator allows the caller to iterate through all records in a
reftable table. To do so it iterates through all blocks of the desired
type one by one, where for each block it creates a new block iterator
and yields all its entries.
One of the things that is somewhat confusing in this context is who owns
the block reader that is being used to read the blocks and pass them to
the block iterator. Intuitively, as the table iterator is responsible
for iterating through the blocks, one would assume that this iterator is
also responsible for managing the lifecycle of the reader. And while it
somewhat is, the block reader is ultimately stored inside of the block
iterator.
Refactor the code such that the block reader is instead fully managed by
the table iterator. Instead of passing the reader to the block iterator,
we now only end up passing the block data to it. Despite clearing up the
lifecycle of the reader, it will also allow for better reuse of the
reader in subsequent patches.
The following benchmark prints a single matching ref out of 1 million
refs. Before:
HEAP SUMMARY:
in use at exit: 13,603 bytes in 125 blocks
total heap usage: 6,607 allocs, 6,482 frees, 509,635 bytes allocated
After:
HEAP SUMMARY:
in use at exit: 13,603 bytes in 125 blocks
total heap usage: 7,235 allocs, 7,110 frees, 301,481 bytes allocated
Note that while there are more allocation and free calls now, the
overall number of bytes allocated is significantly lower. The number of
allocations will be reduced significantly by the next patch though.
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2024-04-08 20:16:45 +08:00
|
|
|
/* Reset the block iterator to pristine state without releasing its memory. */
|
|
|
|
void block_iter_reset(struct block_iter *it);
|
|
|
|
|
2021-10-08 04:25:04 +08:00
|
|
|
/* deallocate memory for `it`. The block reader and its block is left intact. */
|
|
|
|
void block_iter_close(struct block_iter *it);
|
|
|
|
|
|
|
|
/* size of file header, depending on format version */
|
|
|
|
int header_size(int version);
|
|
|
|
|
|
|
|
/* size of file footer, depending on format version */
|
|
|
|
int footer_size(int version);
|
|
|
|
|
|
|
|
/* returns a block to its source. */
|
|
|
|
void reftable_block_done(struct reftable_block *ret);
|
|
|
|
|
|
|
|
#endif
|