2011-05-21 03:59:01 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2011, Google Inc.
|
|
|
|
*/
|
|
|
|
#ifndef CONVERT_H
|
|
|
|
#define CONVERT_H
|
|
|
|
|
2024-06-14 14:50:32 +08:00
|
|
|
#include "hash.h"
|
2017-07-01 04:41:28 +08:00
|
|
|
#include "string-list.h"
|
|
|
|
|
2017-06-13 06:13:52 +08:00
|
|
|
struct index_state;
|
2018-08-16 01:54:05 +08:00
|
|
|
struct strbuf;
|
2017-06-13 06:13:52 +08:00
|
|
|
|
2018-01-14 06:49:31 +08:00
|
|
|
#define CONV_EOL_RNDTRP_DIE (1<<0) /* Die if CRLF to LF to CRLF is different */
|
|
|
|
#define CONV_EOL_RNDTRP_WARN (1<<1) /* Warn if CRLF to LF to CRLF is different */
|
|
|
|
#define CONV_EOL_RENORMALIZE (1<<2) /* Convert CRLF to LF */
|
|
|
|
#define CONV_EOL_KEEP_CRLF (1<<3) /* Keep CRLF line endings as is */
|
2018-04-16 02:16:07 +08:00
|
|
|
#define CONV_WRITE_OBJECT (1<<4) /* Content is written to the index */
|
2011-05-21 03:59:01 +08:00
|
|
|
|
2018-01-14 06:49:31 +08:00
|
|
|
extern int global_conv_flags_eol;
|
2011-05-21 03:59:01 +08:00
|
|
|
|
|
|
|
enum auto_crlf {
|
|
|
|
AUTO_CRLF_FALSE = 0,
|
|
|
|
AUTO_CRLF_TRUE = 1,
|
|
|
|
AUTO_CRLF_INPUT = -1
|
|
|
|
};
|
|
|
|
|
|
|
|
extern enum auto_crlf auto_crlf;
|
|
|
|
|
|
|
|
enum eol {
|
|
|
|
EOL_UNSET,
|
|
|
|
EOL_CRLF,
|
|
|
|
EOL_LF,
|
|
|
|
#ifdef NATIVE_CRLF
|
|
|
|
EOL_NATIVE = EOL_CRLF
|
|
|
|
#else
|
|
|
|
EOL_NATIVE = EOL_LF
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
2017-07-01 04:41:28 +08:00
|
|
|
enum ce_delay_state {
|
|
|
|
CE_NO_DELAY = 0,
|
|
|
|
CE_CAN_DELAY = 1,
|
|
|
|
CE_RETRY = 2
|
|
|
|
};
|
|
|
|
|
|
|
|
struct delayed_checkout {
|
|
|
|
/*
|
|
|
|
* State of the currently processed cache entry. If the state is
|
|
|
|
* CE_CAN_DELAY, then the filter can delay the current cache entry.
|
|
|
|
* If the state is CE_RETRY, then this signals the filter that the
|
|
|
|
* cache entry was requested before.
|
|
|
|
*/
|
|
|
|
enum ce_delay_state state;
|
|
|
|
/* List of filter drivers that signaled delayed blobs. */
|
|
|
|
struct string_list filters;
|
2022-07-14 19:49:12 +08:00
|
|
|
/*
|
|
|
|
* List of delayed blobs identified by their path. The `util` member
|
|
|
|
* holds a counter pointer which must be incremented when/if the
|
|
|
|
* associated blob gets checked out.
|
|
|
|
*/
|
2017-07-01 04:41:28 +08:00
|
|
|
struct string_list paths;
|
|
|
|
};
|
|
|
|
|
convert: permit passing additional metadata to filter processes
There are a variety of situations where a filter process can make use of
some additional metadata. For example, some people find the ident
filter too limiting and would like to include the commit or the branch
in their smudged files. This information isn't available during
checkout as HEAD hasn't been updated at that point, and it wouldn't be
available in archives either.
Let's add a way to pass this metadata down to the filter. We pass the
blob we're operating on, the treeish (preferring the commit over the
tree if one exists), and the ref we're operating on. Note that we won't
pass this information in all cases, such as when renormalizing or when
we're performing diffs, since it doesn't make sense in those cases.
The data we currently get from the filter process looks like the
following:
command=smudge
pathname=git.c
0000
With this change, we'll get data more like this:
command=smudge
pathname=git.c
refname=refs/tags/v2.25.1
treeish=c522f061d551c9bb8684a7c3859b2ece4499b56b
blob=7be7ad34bd053884ec48923706e70c81719a8660
0000
There are a couple things to note about this approach. For operations
like checkout, treeish will always be a commit, since we cannot check
out individual trees, but for other operations, like archive, we can end
up operating on only a particular tree, so we'll provide only a tree as
the treeish. Similar comments apply for refname, since there are a
variety of cases in which we won't have a ref.
This commit wires up the code to print this information, but doesn't
pass any of it at this point. In a future commit, we'll have various
code paths pass the actual useful data down.
Signed-off-by: brian m. carlson <bk2204@github.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-03-17 02:05:02 +08:00
|
|
|
struct checkout_metadata {
|
|
|
|
const char *refname;
|
|
|
|
struct object_id treeish;
|
|
|
|
struct object_id blob;
|
|
|
|
};
|
|
|
|
|
2020-12-16 22:50:30 +08:00
|
|
|
enum convert_crlf_action {
|
|
|
|
CRLF_UNDEFINED,
|
|
|
|
CRLF_BINARY,
|
|
|
|
CRLF_TEXT,
|
|
|
|
CRLF_TEXT_INPUT,
|
|
|
|
CRLF_TEXT_CRLF,
|
|
|
|
CRLF_AUTO,
|
|
|
|
CRLF_AUTO_INPUT,
|
|
|
|
CRLF_AUTO_CRLF
|
|
|
|
};
|
|
|
|
|
|
|
|
struct convert_driver;
|
|
|
|
|
|
|
|
struct conv_attrs {
|
|
|
|
struct convert_driver *drv;
|
|
|
|
enum convert_crlf_action attr_action; /* What attr says */
|
|
|
|
enum convert_crlf_action crlf_action; /* When no attr is set, use core.autocrlf */
|
|
|
|
int ident;
|
|
|
|
const char *working_tree_encoding; /* Supported encoding or default encoding if NULL */
|
|
|
|
};
|
|
|
|
|
2021-04-30 12:50:26 +08:00
|
|
|
void convert_attrs(struct index_state *istate,
|
2020-12-16 22:50:30 +08:00
|
|
|
struct conv_attrs *ca, const char *path);
|
|
|
|
|
2011-05-21 03:59:01 +08:00
|
|
|
extern enum eol core_eol;
|
2024-05-27 19:46:25 +08:00
|
|
|
extern char *check_roundtrip_encoding;
|
2021-04-01 09:49:39 +08:00
|
|
|
const char *get_cached_convert_stats_ascii(struct index_state *istate,
|
2018-06-30 17:20:24 +08:00
|
|
|
const char *path);
|
|
|
|
const char *get_wt_convert_stats_ascii(const char *path);
|
2021-04-01 09:49:39 +08:00
|
|
|
const char *get_convert_attr_ascii(struct index_state *istate,
|
2018-08-14 00:14:21 +08:00
|
|
|
const char *path);
|
2011-05-21 03:59:01 +08:00
|
|
|
|
|
|
|
/* returns 1 if *dst was used */
|
2021-04-01 09:49:39 +08:00
|
|
|
int convert_to_git(struct index_state *istate,
|
2018-06-30 17:20:24 +08:00
|
|
|
const char *path, const char *src, size_t len,
|
|
|
|
struct strbuf *dst, int conv_flags);
|
convert: add [async_]convert_to_working_tree_ca() variants
Separate the attribute gathering from the actual conversion by adding
_ca() variants of the conversion functions. These variants receive a
precomputed 'struct conv_attrs', not relying, thus, on an index state.
They will be used in a future patch adding parallel checkout support,
for two reasons:
- We will already load the conversion attributes in checkout_entry(),
before conversion, to decide whether a path is eligible for parallel
checkout. Therefore, it would be wasteful to load them again later,
for the actual conversion.
- The parallel workers will be responsible for reading, converting and
writing blobs to the working tree. They won't have access to the main
process' index state, so they cannot load the attributes. Instead,
they will receive the preloaded ones and call the _ca() variant of
the conversion functions. Furthermore, the attributes machinery is
optimized to handle paths in sequential order, so it's better to leave
it for the main process, anyway.
Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
Signed-off-by: Matheus Tavares <matheus.bernardino@usp.br>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-12-16 22:50:31 +08:00
|
|
|
int convert_to_working_tree_ca(const struct conv_attrs *ca,
|
|
|
|
const char *path, const char *src,
|
|
|
|
size_t len, struct strbuf *dst,
|
|
|
|
const struct checkout_metadata *meta);
|
|
|
|
int async_convert_to_working_tree_ca(const struct conv_attrs *ca,
|
|
|
|
const char *path, const char *src,
|
|
|
|
size_t len, struct strbuf *dst,
|
|
|
|
const struct checkout_metadata *meta,
|
|
|
|
void *dco);
|
2021-04-30 12:50:26 +08:00
|
|
|
static inline int convert_to_working_tree(struct index_state *istate,
|
convert: add [async_]convert_to_working_tree_ca() variants
Separate the attribute gathering from the actual conversion by adding
_ca() variants of the conversion functions. These variants receive a
precomputed 'struct conv_attrs', not relying, thus, on an index state.
They will be used in a future patch adding parallel checkout support,
for two reasons:
- We will already load the conversion attributes in checkout_entry(),
before conversion, to decide whether a path is eligible for parallel
checkout. Therefore, it would be wasteful to load them again later,
for the actual conversion.
- The parallel workers will be responsible for reading, converting and
writing blobs to the working tree. They won't have access to the main
process' index state, so they cannot load the attributes. Instead,
they will receive the preloaded ones and call the _ca() variant of
the conversion functions. Furthermore, the attributes machinery is
optimized to handle paths in sequential order, so it's better to leave
it for the main process, anyway.
Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
Signed-off-by: Matheus Tavares <matheus.bernardino@usp.br>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-12-16 22:50:31 +08:00
|
|
|
const char *path, const char *src,
|
|
|
|
size_t len, struct strbuf *dst,
|
|
|
|
const struct checkout_metadata *meta)
|
|
|
|
{
|
|
|
|
struct conv_attrs ca;
|
|
|
|
convert_attrs(istate, &ca, path);
|
|
|
|
return convert_to_working_tree_ca(&ca, path, src, len, dst, meta);
|
|
|
|
}
|
2021-04-30 12:50:26 +08:00
|
|
|
static inline int async_convert_to_working_tree(struct index_state *istate,
|
convert: add [async_]convert_to_working_tree_ca() variants
Separate the attribute gathering from the actual conversion by adding
_ca() variants of the conversion functions. These variants receive a
precomputed 'struct conv_attrs', not relying, thus, on an index state.
They will be used in a future patch adding parallel checkout support,
for two reasons:
- We will already load the conversion attributes in checkout_entry(),
before conversion, to decide whether a path is eligible for parallel
checkout. Therefore, it would be wasteful to load them again later,
for the actual conversion.
- The parallel workers will be responsible for reading, converting and
writing blobs to the working tree. They won't have access to the main
process' index state, so they cannot load the attributes. Instead,
they will receive the preloaded ones and call the _ca() variant of
the conversion functions. Furthermore, the attributes machinery is
optimized to handle paths in sequential order, so it's better to leave
it for the main process, anyway.
Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
Signed-off-by: Matheus Tavares <matheus.bernardino@usp.br>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-12-16 22:50:31 +08:00
|
|
|
const char *path, const char *src,
|
|
|
|
size_t len, struct strbuf *dst,
|
|
|
|
const struct checkout_metadata *meta,
|
|
|
|
void *dco)
|
|
|
|
{
|
|
|
|
struct conv_attrs ca;
|
|
|
|
convert_attrs(istate, &ca, path);
|
|
|
|
return async_convert_to_working_tree_ca(&ca, path, src, len, dst, meta, dco);
|
|
|
|
}
|
2018-06-30 17:20:24 +08:00
|
|
|
int async_query_available_blobs(const char *cmd,
|
|
|
|
struct string_list *available_paths);
|
2021-04-01 09:49:39 +08:00
|
|
|
int renormalize_buffer(struct index_state *istate,
|
2018-06-30 17:20:24 +08:00
|
|
|
const char *path, const char *src, size_t len,
|
|
|
|
struct strbuf *dst);
|
2021-04-01 09:49:39 +08:00
|
|
|
static inline int would_convert_to_git(struct index_state *istate,
|
2017-06-13 06:13:55 +08:00
|
|
|
const char *path)
|
2012-02-25 06:02:37 +08:00
|
|
|
{
|
2017-06-13 06:13:55 +08:00
|
|
|
return convert_to_git(istate, path, NULL, 0, NULL, 0);
|
2012-02-25 06:02:37 +08:00
|
|
|
}
|
2014-08-26 23:23:25 +08:00
|
|
|
/* Precondition: would_convert_to_git_filter_fd(path) == true */
|
2021-04-01 09:49:39 +08:00
|
|
|
void convert_to_git_filter_fd(struct index_state *istate,
|
2018-06-30 17:20:24 +08:00
|
|
|
const char *path, int fd,
|
|
|
|
struct strbuf *dst,
|
|
|
|
int conv_flags);
|
2021-04-01 09:49:39 +08:00
|
|
|
int would_convert_to_git_filter_fd(struct index_state *istate,
|
2018-08-14 00:14:21 +08:00
|
|
|
const char *path);
|
2011-05-21 05:33:31 +08:00
|
|
|
|
2020-03-17 02:05:03 +08:00
|
|
|
/*
|
|
|
|
* Initialize the checkout metadata with the given values. Any argument may be
|
|
|
|
* NULL if it is not applicable. The treeish should be a commit if that is
|
|
|
|
* available, and a tree otherwise.
|
|
|
|
*
|
|
|
|
* The refname is not copied and must be valid for the lifetime of the struct.
|
|
|
|
* THe object IDs are copied.
|
|
|
|
*/
|
|
|
|
void init_checkout_metadata(struct checkout_metadata *meta, const char *refname,
|
|
|
|
const struct object_id *treeish,
|
|
|
|
const struct object_id *blob);
|
|
|
|
|
|
|
|
/* Copy the metadata from src to dst, updating the blob. */
|
|
|
|
void clone_checkout_metadata(struct checkout_metadata *dst,
|
|
|
|
const struct checkout_metadata *src,
|
|
|
|
const struct object_id *blob);
|
|
|
|
|
2019-09-03 06:39:44 +08:00
|
|
|
/*
|
|
|
|
* Reset the internal list of attributes used by convert_to_git and
|
|
|
|
* convert_to_working_tree.
|
|
|
|
*/
|
|
|
|
void reset_parsed_attributes(void);
|
|
|
|
|
2011-05-21 05:33:31 +08:00
|
|
|
/*****************************************************************
|
|
|
|
*
|
2013-07-23 05:02:23 +08:00
|
|
|
* Streaming conversion support
|
2011-05-21 05:33:31 +08:00
|
|
|
*
|
|
|
|
*****************************************************************/
|
|
|
|
|
|
|
|
struct stream_filter; /* opaque */
|
|
|
|
|
2021-04-01 09:49:39 +08:00
|
|
|
struct stream_filter *get_stream_filter(struct index_state *istate,
|
2018-08-14 00:14:21 +08:00
|
|
|
const char *path,
|
2018-06-30 17:20:24 +08:00
|
|
|
const struct object_id *);
|
2020-12-16 22:50:32 +08:00
|
|
|
struct stream_filter *get_stream_filter_ca(const struct conv_attrs *ca,
|
|
|
|
const struct object_id *oid);
|
2018-06-30 17:20:24 +08:00
|
|
|
void free_stream_filter(struct stream_filter *);
|
|
|
|
int is_null_stream_filter(struct stream_filter *);
|
2011-05-21 05:33:31 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Use as much input up to *isize_p and fill output up to *osize_p;
|
|
|
|
* update isize_p and osize_p to indicate how much buffer space was
|
|
|
|
* consumed and filled. Return 0 on success, non-zero on error.
|
2011-05-22 05:05:51 +08:00
|
|
|
*
|
|
|
|
* Some filters may need to buffer the input and look-ahead inside it
|
|
|
|
* to decide what to output, and they may consume more than zero bytes
|
|
|
|
* of input and still not produce any output. After feeding all the
|
|
|
|
* input, pass NULL as input and keep calling this function, to let
|
|
|
|
* such filters know there is no more input coming and it is time for
|
|
|
|
* them to produce the remaining output based on the buffered input.
|
2011-05-21 05:33:31 +08:00
|
|
|
*/
|
2018-06-30 17:20:24 +08:00
|
|
|
int stream_filter(struct stream_filter *,
|
|
|
|
const char *input, size_t *isize_p,
|
|
|
|
char *output, size_t *osize_p);
|
2011-05-21 05:33:31 +08:00
|
|
|
|
2020-12-16 22:50:33 +08:00
|
|
|
enum conv_attrs_classification {
|
|
|
|
/*
|
|
|
|
* The blob must be loaded into a buffer before it can be
|
|
|
|
* smudged. All smudging is done in-proc.
|
|
|
|
*/
|
|
|
|
CA_CLASS_INCORE,
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The blob must be loaded into a buffer, but uses a
|
|
|
|
* single-file driver filter, such as rot13.
|
|
|
|
*/
|
|
|
|
CA_CLASS_INCORE_FILTER,
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The blob must be loaded into a buffer, but uses a
|
|
|
|
* long-running driver process, such as LFS. This might or
|
|
|
|
* might not use delayed operations. (The important thing is
|
|
|
|
* that there is a single subordinate long-running process
|
|
|
|
* handling all associated blobs and in case of delayed
|
|
|
|
* operations, may hold per-blob state.)
|
|
|
|
*/
|
|
|
|
CA_CLASS_INCORE_PROCESS,
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The blob can be streamed and smudged without needing to
|
|
|
|
* completely read it into a buffer.
|
|
|
|
*/
|
|
|
|
CA_CLASS_STREAMABLE,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum conv_attrs_classification classify_conv_attrs(
|
|
|
|
const struct conv_attrs *ca);
|
|
|
|
|
2011-05-21 03:59:01 +08:00
|
|
|
#endif /* CONVERT_H */
|