2006-10-31 06:32:48 +08:00
|
|
|
/**
|
|
|
|
* compress.c - Compressed attribute handling code. Originated from the Linux-NTFS
|
|
|
|
* project.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2004-2005 Anton Altaparmakov
|
|
|
|
* Copyright (c) 2004-2006 Szabolcs Szakacsits
|
|
|
|
* Copyright (c) 2005 Yura Pakhuchiy
|
2014-03-11 17:11:00 +08:00
|
|
|
* Copyright (c) 2009-2014 Jean-Pierre Andre
|
2014-09-02 15:45:16 +08:00
|
|
|
* Copyright (c) 2014 Eric Biggers
|
2006-10-31 06:32:48 +08:00
|
|
|
*
|
|
|
|
* This program/include file is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License as published
|
|
|
|
* by the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program/include file is distributed in the hope that it will be
|
|
|
|
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
|
|
|
|
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program (in the main directory of the NTFS-3G
|
|
|
|
* distribution in the file COPYING); if not, write to the Free Software
|
|
|
|
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef HAVE_CONFIG_H
|
|
|
|
#include "config.h"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef HAVE_STDIO_H
|
|
|
|
#include <stdio.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_STRING_H
|
|
|
|
#include <string.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_STDLIB_H
|
|
|
|
#include <stdlib.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_ERRNO_H
|
|
|
|
#include <errno.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include "attrib.h"
|
|
|
|
#include "debug.h"
|
|
|
|
#include "volume.h"
|
|
|
|
#include "types.h"
|
|
|
|
#include "layout.h"
|
|
|
|
#include "runlist.h"
|
|
|
|
#include "compress.h"
|
2009-04-03 21:33:24 +08:00
|
|
|
#include "lcnalloc.h"
|
2006-10-31 06:32:48 +08:00
|
|
|
#include "logging.h"
|
|
|
|
#include "misc.h"
|
|
|
|
|
2010-11-09 16:43:31 +08:00
|
|
|
#undef le16_to_cpup
|
|
|
|
/* the standard le16_to_cpup() crashes for unaligned data on some processors */
|
|
|
|
#define le16_to_cpup(p) (*(u8*)(p) + (((u8*)(p))[1] << 8))
|
|
|
|
|
2006-10-31 06:32:48 +08:00
|
|
|
/**
|
|
|
|
* enum ntfs_compression_constants - constants used in the compression code
|
|
|
|
*/
|
|
|
|
typedef enum {
|
|
|
|
/* Token types and access mask. */
|
|
|
|
NTFS_SYMBOL_TOKEN = 0,
|
|
|
|
NTFS_PHRASE_TOKEN = 1,
|
|
|
|
NTFS_TOKEN_MASK = 1,
|
|
|
|
|
|
|
|
/* Compression sub-block constants. */
|
|
|
|
NTFS_SB_SIZE_MASK = 0x0fff,
|
|
|
|
NTFS_SB_SIZE = 0x1000,
|
|
|
|
NTFS_SB_IS_COMPRESSED = 0x8000,
|
|
|
|
} ntfs_compression_constants;
|
|
|
|
|
2014-09-02 15:45:16 +08:00
|
|
|
/* Match length at or above which ntfs_best_match() will stop searching for
|
|
|
|
* longer matches. */
|
|
|
|
#define NICE_MATCH_LEN 18
|
|
|
|
|
|
|
|
/* Maximum number of potential matches that ntfs_best_match() will consider at
|
|
|
|
* each position. */
|
|
|
|
#define MAX_SEARCH_DEPTH 24
|
|
|
|
|
|
|
|
/* log base 2 of the number of entries in the hash table for match-finding. */
|
|
|
|
#define HASH_SHIFT 14
|
|
|
|
|
|
|
|
/* Constant for the multiplicative hash function. */
|
|
|
|
#define HASH_MULTIPLIER 0x1E35A7BD
|
|
|
|
|
2009-04-03 21:33:24 +08:00
|
|
|
struct COMPRESS_CONTEXT {
|
|
|
|
const unsigned char *inbuf;
|
2011-07-05 18:17:30 +08:00
|
|
|
int bufsize;
|
|
|
|
int size;
|
|
|
|
int rel;
|
|
|
|
int mxsz;
|
2014-09-02 15:45:16 +08:00
|
|
|
s16 head[1 << HASH_SHIFT];
|
|
|
|
s16 prev[NTFS_SB_SIZE];
|
2009-04-03 21:33:24 +08:00
|
|
|
} ;
|
|
|
|
|
2014-09-02 15:45:16 +08:00
|
|
|
/*
|
|
|
|
* Hash the next 3-byte sequence in the input buffer
|
|
|
|
*/
|
|
|
|
static inline unsigned int ntfs_hash(const u8 *p)
|
|
|
|
{
|
|
|
|
u32 str;
|
|
|
|
u32 hash;
|
|
|
|
|
|
|
|
#if defined(__i386__) || defined(__x86_64__)
|
|
|
|
/* Unaligned access allowed, and little endian CPU.
|
|
|
|
* Callers ensure that at least 4 (not 3) bytes are remaining. */
|
|
|
|
str = *(const u32 *)p & 0xFFFFFF;
|
|
|
|
#else
|
|
|
|
str = ((u32)p[0] << 0) | ((u32)p[1] << 8) | ((u32)p[2] << 16);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
hash = str * HASH_MULTIPLIER;
|
|
|
|
|
|
|
|
/* High bits are more random than the low bits. */
|
|
|
|
return hash >> (32 - HASH_SHIFT);
|
|
|
|
}
|
|
|
|
|
2009-04-03 21:33:24 +08:00
|
|
|
/*
|
2011-07-05 18:17:30 +08:00
|
|
|
* Search for the longest sequence matching current position
|
2009-04-03 21:33:24 +08:00
|
|
|
*
|
2014-09-02 15:45:16 +08:00
|
|
|
* A hash table, each entry of which points to a chain of sequence
|
|
|
|
* positions sharing the corresponding hash code, is maintained to speed up
|
|
|
|
* searching for matches. To maintain the hash table, either
|
|
|
|
* ntfs_best_match() or ntfs_skip_position() has to be called for each
|
|
|
|
* consecutive position.
|
|
|
|
*
|
|
|
|
* This function is heavily used; it has to be optimized carefully.
|
2011-07-05 18:17:30 +08:00
|
|
|
*
|
2014-09-02 15:45:16 +08:00
|
|
|
* This function sets pctx->size and pctx->rel to the length and offset,
|
|
|
|
* respectively, of the longest match found.
|
2011-07-05 18:17:30 +08:00
|
|
|
*
|
2014-09-02 15:45:16 +08:00
|
|
|
* The minimum match length is assumed to be 3, and the maximum match
|
|
|
|
* length is assumed to be pctx->mxsz. If this function produces
|
|
|
|
* pctx->size < 3, then no match was found.
|
|
|
|
*
|
|
|
|
* Note: for the following reasons, this function is not guaranteed to find
|
|
|
|
* *the* longest match up to pctx->mxsz:
|
|
|
|
*
|
|
|
|
* (1) If this function finds a match of NICE_MATCH_LEN bytes or greater,
|
|
|
|
* it ends early because a match this long is good enough and it's not
|
|
|
|
* worth spending more time searching.
|
|
|
|
*
|
|
|
|
* (2) If this function considers MAX_SEARCH_DEPTH matches with a single
|
|
|
|
* position, it ends early and returns the longest match found so far.
|
|
|
|
* This saves a lot of time on degenerate inputs.
|
2009-04-03 21:33:24 +08:00
|
|
|
*/
|
2014-09-02 15:45:16 +08:00
|
|
|
static void ntfs_best_match(struct COMPRESS_CONTEXT *pctx, const int i,
|
|
|
|
int best_len)
|
2009-04-03 21:33:24 +08:00
|
|
|
{
|
2014-09-02 15:45:16 +08:00
|
|
|
const u8 * const inbuf = pctx->inbuf;
|
|
|
|
const u8 * const strptr = &inbuf[i]; /* String we're matching against */
|
|
|
|
s16 * const prev = pctx->prev;
|
|
|
|
const int max_len = min(pctx->bufsize - i, pctx->mxsz);
|
|
|
|
const int nice_len = min(NICE_MATCH_LEN, max_len);
|
|
|
|
int depth_remaining = MAX_SEARCH_DEPTH;
|
|
|
|
const u8 *best_matchptr = strptr;
|
|
|
|
unsigned int hash;
|
|
|
|
s16 cur_match;
|
|
|
|
const u8 *matchptr;
|
|
|
|
int len;
|
|
|
|
|
|
|
|
if (max_len < 4)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Insert the current sequence into the appropriate hash chain. */
|
|
|
|
hash = ntfs_hash(strptr);
|
|
|
|
cur_match = pctx->head[hash];
|
|
|
|
prev[i] = cur_match;
|
|
|
|
pctx->head[hash] = i;
|
|
|
|
|
|
|
|
if (best_len >= max_len) {
|
|
|
|
/* Lazy match is being attempted, but there aren't enough length
|
|
|
|
* bits remaining to code a longer match. */
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Search the appropriate hash chain for matches. */
|
|
|
|
|
|
|
|
for (; cur_match >= 0 && depth_remaining--;
|
|
|
|
cur_match = prev[cur_match])
|
|
|
|
{
|
|
|
|
|
|
|
|
matchptr = &inbuf[cur_match];
|
|
|
|
|
|
|
|
/* Considering the potential match at 'matchptr': is it longer
|
|
|
|
* than 'best_len'?
|
|
|
|
*
|
|
|
|
* The bytes at index 'best_len' are the most likely to differ,
|
|
|
|
* so check them first.
|
|
|
|
*
|
|
|
|
* The bytes at indices 'best_len - 1' and '0' are less
|
|
|
|
* important to check separately. But doing so still gives a
|
|
|
|
* slight performance improvement, at least on x86_64, probably
|
|
|
|
* because they create separate branches for the CPU to predict
|
|
|
|
* independently of the branches in the main comparison loops.
|
|
|
|
*/
|
|
|
|
if (matchptr[best_len] != strptr[best_len] ||
|
|
|
|
matchptr[best_len - 1] != strptr[best_len - 1] ||
|
|
|
|
matchptr[0] != strptr[0])
|
|
|
|
goto next_match;
|
|
|
|
|
|
|
|
for (len = 1; len < best_len - 1; len++)
|
|
|
|
if (matchptr[len] != strptr[len])
|
|
|
|
goto next_match;
|
|
|
|
|
|
|
|
/* The match is the longest found so far ---
|
|
|
|
* at least 'best_len' + 1 bytes. Continue extending it. */
|
|
|
|
|
|
|
|
best_matchptr = matchptr;
|
|
|
|
|
|
|
|
do {
|
|
|
|
if (++best_len >= nice_len) {
|
|
|
|
/* 'nice_len' reached; don't waste time
|
|
|
|
* searching for longer matches. Extend the
|
|
|
|
* match as far as possible and terminate the
|
|
|
|
* search. */
|
|
|
|
while (best_len < max_len &&
|
|
|
|
(best_matchptr[best_len] ==
|
|
|
|
strptr[best_len]))
|
|
|
|
{
|
|
|
|
best_len++;
|
2009-04-03 21:33:24 +08:00
|
|
|
}
|
2014-09-02 15:45:16 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
} while (best_matchptr[best_len] == strptr[best_len]);
|
|
|
|
|
|
|
|
/* Found a longer match, but 'nice_len' not yet reached. */
|
|
|
|
|
|
|
|
next_match:
|
|
|
|
/* Continue to next match in the chain. */
|
|
|
|
;
|
2011-07-05 18:17:30 +08:00
|
|
|
}
|
2014-09-02 15:45:16 +08:00
|
|
|
|
|
|
|
/* Reached end of chain, or ended early due to reaching the maximum
|
|
|
|
* search depth. */
|
|
|
|
|
|
|
|
out:
|
|
|
|
/* Return the longest match we were able to find. */
|
|
|
|
pctx->size = best_len;
|
|
|
|
pctx->rel = best_matchptr - strptr; /* given as a negative number! */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Advance the match-finder, but don't search for matches.
|
|
|
|
*/
|
|
|
|
static void ntfs_skip_position(struct COMPRESS_CONTEXT *pctx, const int i)
|
|
|
|
{
|
|
|
|
unsigned int hash;
|
|
|
|
|
|
|
|
if (pctx->bufsize - i < 4)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Insert the current sequence into the appropriate hash chain. */
|
|
|
|
hash = ntfs_hash(pctx->inbuf + i);
|
|
|
|
pctx->prev[i] = pctx->head[hash];
|
|
|
|
pctx->head[hash] = i;
|
2009-04-03 21:33:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-07-05 18:17:30 +08:00
|
|
|
* Compress a 4096-byte block
|
2009-04-03 21:33:24 +08:00
|
|
|
*
|
2011-07-05 18:17:30 +08:00
|
|
|
* Returns a header of two bytes followed by the compressed data.
|
|
|
|
* If compression is not effective, the header and an uncompressed
|
|
|
|
* block is returned.
|
|
|
|
*
|
|
|
|
* Note : two bytes may be output before output buffer overflow
|
|
|
|
* is detected, so a 4100-bytes output buffer must be reserved.
|
|
|
|
*
|
|
|
|
* Returns the size of the compressed block, including the
|
|
|
|
* header (minimal size is 2, maximum size is 4098)
|
|
|
|
* 0 if an error has been met.
|
2009-04-03 21:33:24 +08:00
|
|
|
*/
|
|
|
|
|
2014-09-02 15:45:16 +08:00
|
|
|
static unsigned int ntfs_compress_block(const char *inbuf, const int bufsize,
|
2011-07-05 18:17:30 +08:00
|
|
|
char *outbuf)
|
2009-04-03 21:33:24 +08:00
|
|
|
{
|
|
|
|
struct COMPRESS_CONTEXT *pctx;
|
2011-07-05 18:17:30 +08:00
|
|
|
int i; /* current position */
|
|
|
|
int j; /* end of best match from current position */
|
|
|
|
int k; /* end of best match from next position */
|
|
|
|
int offs; /* offset to best match */
|
|
|
|
int bp; /* bits to store offset */
|
2014-09-02 15:45:16 +08:00
|
|
|
int bp_cur; /* saved bits to store offset at current position */
|
2011-07-05 18:17:30 +08:00
|
|
|
int mxoff; /* max match offset : 1 << bp */
|
2009-04-03 21:33:24 +08:00
|
|
|
unsigned int xout;
|
2011-07-05 18:17:30 +08:00
|
|
|
unsigned int q; /* aggregated offset and size */
|
2014-09-02 15:45:16 +08:00
|
|
|
int have_match; /* do we have a match at the current position? */
|
2011-07-05 18:17:30 +08:00
|
|
|
char *ptag; /* location reserved for a tag */
|
|
|
|
int tag; /* current value of tag */
|
|
|
|
int ntag; /* count of bits still undefined in tag */
|
2009-04-03 21:33:24 +08:00
|
|
|
|
2014-09-02 15:45:16 +08:00
|
|
|
pctx = ntfs_malloc(sizeof(struct COMPRESS_CONTEXT));
|
|
|
|
if (!pctx) {
|
|
|
|
errno = ENOMEM;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* All hash chains start as empty. The special value '-1' indicates the
|
|
|
|
* end of each hash chain. */
|
|
|
|
memset(pctx->head, 0xFF, sizeof(pctx->head));
|
|
|
|
|
|
|
|
pctx->inbuf = (const unsigned char*)inbuf;
|
|
|
|
pctx->bufsize = bufsize;
|
|
|
|
xout = 2;
|
|
|
|
i = 0;
|
|
|
|
bp = 4;
|
|
|
|
mxoff = 1 << bp;
|
|
|
|
pctx->mxsz = (1 << (16 - bp)) + 2;
|
|
|
|
have_match = 0;
|
|
|
|
tag = 0;
|
|
|
|
ntag = 8;
|
|
|
|
ptag = &outbuf[xout++];
|
|
|
|
|
|
|
|
while ((i < bufsize) && (xout < (NTFS_SB_SIZE + 2))) {
|
|
|
|
|
|
|
|
/* This implementation uses "lazy" parsing: it always chooses
|
|
|
|
* the longest match, unless the match at the next position is
|
|
|
|
* longer. This is the same strategy used by the high
|
|
|
|
* compression modes of zlib. */
|
|
|
|
|
|
|
|
if (!have_match) {
|
|
|
|
/* Find the longest match at the current position. But
|
|
|
|
* first adjust the maximum match length if needed.
|
|
|
|
* (This loop might need to run more than one time in
|
|
|
|
* the case that we just output a long match.) */
|
2011-07-05 18:17:30 +08:00
|
|
|
while (mxoff < i) {
|
|
|
|
bp++;
|
|
|
|
mxoff <<= 1;
|
|
|
|
pctx->mxsz = (pctx->mxsz + 2) >> 1;
|
|
|
|
}
|
2014-09-02 15:45:16 +08:00
|
|
|
ntfs_best_match(pctx, i, 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pctx->size >= 3) {
|
|
|
|
|
|
|
|
/* Found a match at the current position. */
|
|
|
|
|
2011-07-05 18:17:30 +08:00
|
|
|
j = i + pctx->size;
|
2014-09-02 15:45:16 +08:00
|
|
|
bp_cur = bp;
|
|
|
|
offs = pctx->rel;
|
|
|
|
|
|
|
|
if (pctx->size >= NICE_MATCH_LEN) {
|
|
|
|
|
|
|
|
/* Choose long matches immediately. */
|
|
|
|
|
|
|
|
q = (~offs << (16 - bp_cur)) + (j - i - 3);
|
|
|
|
outbuf[xout++] = q & 255;
|
|
|
|
outbuf[xout++] = (q >> 8) & 255;
|
|
|
|
tag |= (1 << (8 - ntag));
|
|
|
|
|
|
|
|
if (j == bufsize) {
|
|
|
|
/* Shortcut if the match extends to the
|
|
|
|
* end of the buffer. */
|
|
|
|
i = j;
|
|
|
|
--ntag;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
i += 1;
|
|
|
|
do {
|
|
|
|
ntfs_skip_position(pctx, i);
|
|
|
|
} while (++i != j);
|
|
|
|
have_match = 0;
|
|
|
|
} else {
|
|
|
|
/* Check for a longer match at the next
|
|
|
|
* position. */
|
|
|
|
|
|
|
|
/* Doesn't need to be while() since we just
|
|
|
|
* adjusted the maximum match length at the
|
|
|
|
* previous position. */
|
|
|
|
if (mxoff < i + 1) {
|
|
|
|
bp++;
|
|
|
|
mxoff <<= 1;
|
|
|
|
pctx->mxsz = (pctx->mxsz + 2) >> 1;
|
|
|
|
}
|
|
|
|
ntfs_best_match(pctx, i + 1, pctx->size);
|
2011-07-05 18:17:30 +08:00
|
|
|
k = i + 1 + pctx->size;
|
2014-09-02 15:45:16 +08:00
|
|
|
|
2011-07-05 18:17:30 +08:00
|
|
|
if (k > (j + 1)) {
|
2014-09-02 15:45:16 +08:00
|
|
|
/* Next match is longer.
|
|
|
|
* Output a literal. */
|
|
|
|
outbuf[xout++] = inbuf[i++];
|
|
|
|
have_match = 1;
|
2011-07-05 18:17:30 +08:00
|
|
|
} else {
|
2014-09-02 15:45:16 +08:00
|
|
|
/* Next match isn't longer.
|
|
|
|
* Output the current match. */
|
|
|
|
q = (~offs << (16 - bp_cur)) +
|
|
|
|
(j - i - 3);
|
2011-07-05 18:17:30 +08:00
|
|
|
outbuf[xout++] = q & 255;
|
|
|
|
outbuf[xout++] = (q >> 8) & 255;
|
|
|
|
tag |= (1 << (8 - ntag));
|
2014-09-02 15:45:16 +08:00
|
|
|
|
|
|
|
/* The minimum match length is 3, and
|
|
|
|
* we've run two bytes through the
|
|
|
|
* matchfinder already. So the minimum
|
|
|
|
* number of positions we need to skip
|
|
|
|
* is 1. */
|
|
|
|
i += 2;
|
|
|
|
do {
|
|
|
|
ntfs_skip_position(pctx, i);
|
|
|
|
} while (++i != j);
|
|
|
|
have_match = 0;
|
2009-04-03 21:33:24 +08:00
|
|
|
}
|
|
|
|
}
|
2014-09-02 15:45:16 +08:00
|
|
|
} else {
|
|
|
|
/* No match at current position. Output a literal. */
|
|
|
|
outbuf[xout++] = inbuf[i++];
|
|
|
|
have_match = 0;
|
2011-07-05 18:17:30 +08:00
|
|
|
}
|
2014-09-02 15:45:16 +08:00
|
|
|
|
|
|
|
/* Store the tag if fully used. */
|
|
|
|
if (!--ntag) {
|
2011-07-05 18:17:30 +08:00
|
|
|
*ptag = tag;
|
2014-09-02 15:45:16 +08:00
|
|
|
ntag = 8;
|
|
|
|
ptag = &outbuf[xout++];
|
|
|
|
tag = 0;
|
2009-04-03 21:33:24 +08:00
|
|
|
}
|
2014-09-02 15:45:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Store the last tag if partially used. */
|
|
|
|
if (ntag == 8)
|
|
|
|
xout--;
|
|
|
|
else
|
|
|
|
*ptag = tag;
|
|
|
|
|
|
|
|
/* Determine whether to store the data compressed or uncompressed. */
|
|
|
|
|
|
|
|
if ((i >= bufsize) && (xout < (NTFS_SB_SIZE + 2))) {
|
|
|
|
/* Compressed. */
|
|
|
|
outbuf[0] = (xout - 3) & 255;
|
|
|
|
outbuf[1] = 0xb0 + (((xout - 3) >> 8) & 15);
|
2009-04-03 21:33:24 +08:00
|
|
|
} else {
|
2014-09-02 15:45:16 +08:00
|
|
|
/* Uncompressed. */
|
|
|
|
memcpy(&outbuf[2], inbuf, bufsize);
|
|
|
|
if (bufsize < NTFS_SB_SIZE)
|
|
|
|
memset(&outbuf[bufsize + 2], 0, NTFS_SB_SIZE - bufsize);
|
|
|
|
outbuf[0] = 0xff;
|
|
|
|
outbuf[1] = 0x3f;
|
|
|
|
xout = NTFS_SB_SIZE + 2;
|
2009-04-03 21:33:24 +08:00
|
|
|
}
|
2014-09-02 15:45:16 +08:00
|
|
|
|
|
|
|
/* Free the compression context and return the total number of bytes
|
|
|
|
* written to 'outbuf'. */
|
|
|
|
free(pctx);
|
2011-07-05 18:17:30 +08:00
|
|
|
return (xout);
|
2009-04-03 21:33:24 +08:00
|
|
|
}
|
|
|
|
|
2006-10-31 06:32:48 +08:00
|
|
|
/**
|
|
|
|
* ntfs_decompress - decompress a compression block into an array of pages
|
|
|
|
* @dest: buffer to which to write the decompressed data
|
|
|
|
* @dest_size: size of buffer @dest in bytes
|
|
|
|
* @cb_start: compression block to decompress
|
|
|
|
* @cb_size: size of compression block @cb_start in bytes
|
|
|
|
*
|
|
|
|
* This decompresses the compression block @cb_start into the destination
|
|
|
|
* buffer @dest.
|
|
|
|
*
|
|
|
|
* @cb_start is a pointer to the compression block which needs decompressing
|
|
|
|
* and @cb_size is the size of @cb_start in bytes (8-64kiB).
|
|
|
|
*
|
|
|
|
* Return 0 if success or -EOVERFLOW on error in the compressed stream.
|
|
|
|
*/
|
|
|
|
static int ntfs_decompress(u8 *dest, const u32 dest_size,
|
|
|
|
u8 *const cb_start, const u32 cb_size)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Pointers into the compressed data, i.e. the compression block (cb),
|
|
|
|
* and the therein contained sub-blocks (sb).
|
|
|
|
*/
|
|
|
|
u8 *cb_end = cb_start + cb_size; /* End of cb. */
|
|
|
|
u8 *cb = cb_start; /* Current position in cb. */
|
|
|
|
u8 *cb_sb_start = cb; /* Beginning of the current sb in the cb. */
|
|
|
|
u8 *cb_sb_end; /* End of current sb / beginning of next sb. */
|
|
|
|
/* Variables for uncompressed data / destination. */
|
|
|
|
u8 *dest_end = dest + dest_size; /* End of dest buffer. */
|
|
|
|
u8 *dest_sb_start; /* Start of current sub-block in dest. */
|
|
|
|
u8 *dest_sb_end; /* End of current sb in dest. */
|
|
|
|
/* Variables for tag and token parsing. */
|
|
|
|
u8 tag; /* Current tag. */
|
|
|
|
int token; /* Loop counter for the eight tokens in tag. */
|
|
|
|
|
|
|
|
ntfs_log_trace("Entering, cb_size = 0x%x.\n", (unsigned)cb_size);
|
|
|
|
do_next_sb:
|
2009-03-27 17:10:00 +08:00
|
|
|
ntfs_log_debug("Beginning sub-block at offset = %d in the cb.\n",
|
|
|
|
(int)(cb - cb_start));
|
2006-10-31 06:32:48 +08:00
|
|
|
/*
|
|
|
|
* Have we reached the end of the compression block or the end of the
|
|
|
|
* decompressed data? The latter can happen for example if the current
|
|
|
|
* position in the compression block is one byte before its end so the
|
|
|
|
* first two checks do not detect it.
|
|
|
|
*/
|
2009-04-03 21:33:24 +08:00
|
|
|
if (cb == cb_end || !le16_to_cpup((le16*)cb) || dest == dest_end) {
|
2006-10-31 06:32:48 +08:00
|
|
|
ntfs_log_debug("Completed. Returning success (0).\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
/* Setup offset for the current sub-block destination. */
|
|
|
|
dest_sb_start = dest;
|
|
|
|
dest_sb_end = dest + NTFS_SB_SIZE;
|
|
|
|
/* Check that we are still within allowed boundaries. */
|
|
|
|
if (dest_sb_end > dest_end)
|
|
|
|
goto return_overflow;
|
|
|
|
/* Does the minimum size of a compressed sb overflow valid range? */
|
|
|
|
if (cb + 6 > cb_end)
|
|
|
|
goto return_overflow;
|
|
|
|
/* Setup the current sub-block source pointers and validate range. */
|
|
|
|
cb_sb_start = cb;
|
2009-04-03 21:33:24 +08:00
|
|
|
cb_sb_end = cb_sb_start + (le16_to_cpup((le16*)cb) & NTFS_SB_SIZE_MASK)
|
2006-10-31 06:32:48 +08:00
|
|
|
+ 3;
|
|
|
|
if (cb_sb_end > cb_end)
|
|
|
|
goto return_overflow;
|
|
|
|
/* Now, we are ready to process the current sub-block (sb). */
|
2009-04-03 21:33:24 +08:00
|
|
|
if (!(le16_to_cpup((le16*)cb) & NTFS_SB_IS_COMPRESSED)) {
|
2006-10-31 06:32:48 +08:00
|
|
|
ntfs_log_debug("Found uncompressed sub-block.\n");
|
|
|
|
/* This sb is not compressed, just copy it into destination. */
|
|
|
|
/* Advance source position to first data byte. */
|
|
|
|
cb += 2;
|
|
|
|
/* An uncompressed sb must be full size. */
|
|
|
|
if (cb_sb_end - cb != NTFS_SB_SIZE)
|
|
|
|
goto return_overflow;
|
|
|
|
/* Copy the block and advance the source position. */
|
|
|
|
memcpy(dest, cb, NTFS_SB_SIZE);
|
|
|
|
cb += NTFS_SB_SIZE;
|
|
|
|
/* Advance destination position to next sub-block. */
|
|
|
|
dest += NTFS_SB_SIZE;
|
|
|
|
goto do_next_sb;
|
|
|
|
}
|
|
|
|
ntfs_log_debug("Found compressed sub-block.\n");
|
|
|
|
/* This sb is compressed, decompress it into destination. */
|
|
|
|
/* Forward to the first tag in the sub-block. */
|
|
|
|
cb += 2;
|
|
|
|
do_next_tag:
|
|
|
|
if (cb == cb_sb_end) {
|
|
|
|
/* Check if the decompressed sub-block was not full-length. */
|
|
|
|
if (dest < dest_sb_end) {
|
|
|
|
int nr_bytes = dest_sb_end - dest;
|
|
|
|
|
|
|
|
ntfs_log_debug("Filling incomplete sub-block with zeroes.\n");
|
|
|
|
/* Zero remainder and update destination position. */
|
|
|
|
memset(dest, 0, nr_bytes);
|
|
|
|
dest += nr_bytes;
|
|
|
|
}
|
|
|
|
/* We have finished the current sub-block. */
|
|
|
|
goto do_next_sb;
|
|
|
|
}
|
|
|
|
/* Check we are still in range. */
|
|
|
|
if (cb > cb_sb_end || dest > dest_sb_end)
|
|
|
|
goto return_overflow;
|
|
|
|
/* Get the next tag and advance to first token. */
|
|
|
|
tag = *cb++;
|
|
|
|
/* Parse the eight tokens described by the tag. */
|
|
|
|
for (token = 0; token < 8; token++, tag >>= 1) {
|
|
|
|
u16 lg, pt, length, max_non_overlap;
|
|
|
|
register u16 i;
|
|
|
|
u8 *dest_back_addr;
|
|
|
|
|
|
|
|
/* Check if we are done / still in range. */
|
|
|
|
if (cb >= cb_sb_end || dest > dest_sb_end)
|
|
|
|
break;
|
|
|
|
/* Determine token type and parse appropriately.*/
|
|
|
|
if ((tag & NTFS_TOKEN_MASK) == NTFS_SYMBOL_TOKEN) {
|
|
|
|
/*
|
|
|
|
* We have a symbol token, copy the symbol across, and
|
|
|
|
* advance the source and destination positions.
|
|
|
|
*/
|
|
|
|
*dest++ = *cb++;
|
|
|
|
/* Continue with the next token. */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* We have a phrase token. Make sure it is not the first tag in
|
|
|
|
* the sb as this is illegal and would confuse the code below.
|
|
|
|
*/
|
|
|
|
if (dest == dest_sb_start)
|
|
|
|
goto return_overflow;
|
|
|
|
/*
|
|
|
|
* Determine the number of bytes to go back (p) and the number
|
|
|
|
* of bytes to copy (l). We use an optimized algorithm in which
|
|
|
|
* we first calculate log2(current destination position in sb),
|
|
|
|
* which allows determination of l and p in O(1) rather than
|
|
|
|
* O(n). We just need an arch-optimized log2() function now.
|
|
|
|
*/
|
|
|
|
lg = 0;
|
|
|
|
for (i = dest - dest_sb_start - 1; i >= 0x10; i >>= 1)
|
|
|
|
lg++;
|
|
|
|
/* Get the phrase token into i. */
|
2009-04-03 21:33:24 +08:00
|
|
|
pt = le16_to_cpup((le16*)cb);
|
2006-10-31 06:32:48 +08:00
|
|
|
/*
|
|
|
|
* Calculate starting position of the byte sequence in
|
|
|
|
* the destination using the fact that p = (pt >> (12 - lg)) + 1
|
|
|
|
* and make sure we don't go too far back.
|
|
|
|
*/
|
|
|
|
dest_back_addr = dest - (pt >> (12 - lg)) - 1;
|
|
|
|
if (dest_back_addr < dest_sb_start)
|
|
|
|
goto return_overflow;
|
|
|
|
/* Now calculate the length of the byte sequence. */
|
|
|
|
length = (pt & (0xfff >> lg)) + 3;
|
|
|
|
/* Verify destination is in range. */
|
|
|
|
if (dest + length > dest_sb_end)
|
|
|
|
goto return_overflow;
|
|
|
|
/* The number of non-overlapping bytes. */
|
|
|
|
max_non_overlap = dest - dest_back_addr;
|
|
|
|
if (length <= max_non_overlap) {
|
|
|
|
/* The byte sequence doesn't overlap, just copy it. */
|
|
|
|
memcpy(dest, dest_back_addr, length);
|
|
|
|
/* Advance destination pointer. */
|
|
|
|
dest += length;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* The byte sequence does overlap, copy non-overlapping
|
|
|
|
* part and then do a slow byte by byte copy for the
|
|
|
|
* overlapping part. Also, advance the destination
|
|
|
|
* pointer.
|
|
|
|
*/
|
|
|
|
memcpy(dest, dest_back_addr, max_non_overlap);
|
|
|
|
dest += max_non_overlap;
|
|
|
|
dest_back_addr += max_non_overlap;
|
|
|
|
length -= max_non_overlap;
|
|
|
|
while (length--)
|
|
|
|
*dest++ = *dest_back_addr++;
|
|
|
|
}
|
|
|
|
/* Advance source position and continue with the next token. */
|
|
|
|
cb += 2;
|
|
|
|
}
|
|
|
|
/* No tokens left in the current tag. Continue with the next tag. */
|
|
|
|
goto do_next_tag;
|
|
|
|
return_overflow:
|
|
|
|
errno = EOVERFLOW;
|
|
|
|
ntfs_log_perror("Failed to decompress file");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ntfs_is_cb_compressed - internal function, do not use
|
|
|
|
*
|
|
|
|
* This is a very specialised function determining if a cb is compressed or
|
|
|
|
* uncompressed. It is assumed that checking for a sparse cb has already been
|
|
|
|
* performed and that the cb is not sparse. It makes all sorts of other
|
|
|
|
* assumptions as well and hence it is not useful anywhere other than where it
|
|
|
|
* is used at the moment. Please, do not make this function available for use
|
|
|
|
* outside of compress.c as it is bound to confuse people and not do what they
|
|
|
|
* want.
|
|
|
|
*
|
|
|
|
* Return TRUE on errors so that the error will be detected later on in the
|
|
|
|
* code. Might be a bit confusing to debug but there really should never be
|
|
|
|
* errors coming from here.
|
|
|
|
*/
|
|
|
|
static BOOL ntfs_is_cb_compressed(ntfs_attr *na, runlist_element *rl,
|
|
|
|
VCN cb_start_vcn, int cb_clusters)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The simplest case: the run starting at @cb_start_vcn contains
|
|
|
|
* @cb_clusters clusters which are all not sparse, thus the cb is not
|
|
|
|
* compressed.
|
|
|
|
*/
|
|
|
|
restart:
|
|
|
|
cb_clusters -= rl->length - (cb_start_vcn - rl->vcn);
|
|
|
|
while (cb_clusters > 0) {
|
|
|
|
/* Go to the next run. */
|
|
|
|
rl++;
|
|
|
|
/* Map the next runlist fragment if it is not mapped. */
|
|
|
|
if (rl->lcn < LCN_HOLE || !rl->length) {
|
|
|
|
cb_start_vcn = rl->vcn;
|
|
|
|
rl = ntfs_attr_find_vcn(na, rl->vcn);
|
|
|
|
if (!rl || rl->lcn < LCN_HOLE || !rl->length)
|
|
|
|
return TRUE;
|
|
|
|
/*
|
|
|
|
* If the runs were merged need to deal with the
|
|
|
|
* resulting partial run so simply restart.
|
|
|
|
*/
|
|
|
|
if (rl->vcn < cb_start_vcn)
|
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
/* If the current run is sparse, the cb is compressed. */
|
|
|
|
if (rl->lcn == LCN_HOLE)
|
|
|
|
return TRUE;
|
|
|
|
/* If the whole cb is not sparse, it is not compressed. */
|
|
|
|
if (rl->length >= cb_clusters)
|
|
|
|
return FALSE;
|
|
|
|
cb_clusters -= rl->length;
|
|
|
|
};
|
|
|
|
/* All cb_clusters were not sparse thus the cb is not compressed. */
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ntfs_compressed_attr_pread - read from a compressed attribute
|
|
|
|
* @na: ntfs attribute to read from
|
|
|
|
* @pos: byte position in the attribute to begin reading from
|
|
|
|
* @count: number of bytes to read
|
|
|
|
* @b: output data buffer
|
|
|
|
*
|
|
|
|
* NOTE: You probably want to be using attrib.c::ntfs_attr_pread() instead.
|
|
|
|
*
|
|
|
|
* This function will read @count bytes starting at offset @pos from the
|
|
|
|
* compressed ntfs attribute @na into the data buffer @b.
|
|
|
|
*
|
|
|
|
* On success, return the number of successfully read bytes. If this number
|
|
|
|
* is lower than @count this means that the read reached end of file or that
|
|
|
|
* an error was encountered during the read so that the read is partial.
|
|
|
|
* 0 means end of file or nothing was read (also return 0 when @count is 0).
|
|
|
|
*
|
|
|
|
* On error and nothing has been read, return -1 with errno set appropriately
|
|
|
|
* to the return code of ntfs_pread(), or to EINVAL in case of invalid
|
|
|
|
* arguments.
|
|
|
|
*/
|
|
|
|
s64 ntfs_compressed_attr_pread(ntfs_attr *na, s64 pos, s64 count, void *b)
|
|
|
|
{
|
|
|
|
s64 br, to_read, ofs, total, total2;
|
|
|
|
u64 cb_size_mask;
|
|
|
|
VCN start_vcn, vcn, end_vcn;
|
|
|
|
ntfs_volume *vol;
|
|
|
|
runlist_element *rl;
|
|
|
|
u8 *dest, *cb, *cb_pos, *cb_end;
|
|
|
|
u32 cb_size;
|
|
|
|
int err;
|
2009-04-03 21:33:24 +08:00
|
|
|
ATTR_FLAGS data_flags;
|
|
|
|
FILE_ATTR_FLAGS compression;
|
2006-10-31 06:32:48 +08:00
|
|
|
unsigned int nr_cbs, cb_clusters;
|
|
|
|
|
|
|
|
ntfs_log_trace("Entering for inode 0x%llx, attr 0x%x, pos 0x%llx, count 0x%llx.\n",
|
2015-12-22 06:31:09 +08:00
|
|
|
(unsigned long long)na->ni->mft_no, le32_to_cpu(na->type),
|
2006-10-31 06:32:48 +08:00
|
|
|
(long long)pos, (long long)count);
|
2009-04-03 21:33:24 +08:00
|
|
|
data_flags = na->data_flags;
|
|
|
|
compression = na->ni->flags & FILE_ATTR_COMPRESSED;
|
|
|
|
if (!na || !na->ni || !na->ni->vol || !b
|
|
|
|
|| ((data_flags & ATTR_COMPRESSION_MASK)
|
|
|
|
!= ATTR_IS_COMPRESSED)
|
|
|
|
|| pos < 0 || count < 0) {
|
2006-10-31 06:32:48 +08:00
|
|
|
errno = EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Encrypted attributes are not supported. We return access denied,
|
|
|
|
* which is what Windows NT4 does, too.
|
|
|
|
*/
|
|
|
|
if (NAttrEncrypted(na)) {
|
|
|
|
errno = EACCES;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (!count)
|
|
|
|
return 0;
|
|
|
|
/* Truncate reads beyond end of attribute. */
|
|
|
|
if (pos + count > na->data_size) {
|
|
|
|
if (pos >= na->data_size) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
count = na->data_size - pos;
|
|
|
|
}
|
|
|
|
/* If it is a resident attribute, simply use ntfs_attr_pread(). */
|
|
|
|
if (!NAttrNonResident(na))
|
|
|
|
return ntfs_attr_pread(na, pos, count, b);
|
|
|
|
total = total2 = 0;
|
|
|
|
/* Zero out reads beyond initialized size. */
|
|
|
|
if (pos + count > na->initialized_size) {
|
|
|
|
if (pos >= na->initialized_size) {
|
|
|
|
memset(b, 0, count);
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
total2 = pos + count - na->initialized_size;
|
|
|
|
count -= total2;
|
|
|
|
memset((u8*)b + count, 0, total2);
|
|
|
|
}
|
|
|
|
vol = na->ni->vol;
|
|
|
|
cb_size = na->compression_block_size;
|
|
|
|
cb_size_mask = cb_size - 1UL;
|
|
|
|
cb_clusters = na->compression_block_clusters;
|
|
|
|
|
|
|
|
/* Need a temporary buffer for each loaded compression block. */
|
2009-04-03 21:33:24 +08:00
|
|
|
cb = (u8*)ntfs_malloc(cb_size);
|
2006-10-31 06:32:48 +08:00
|
|
|
if (!cb)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* Need a temporary buffer for each uncompressed block. */
|
2009-04-03 21:33:24 +08:00
|
|
|
dest = (u8*)ntfs_malloc(cb_size);
|
2006-10-31 06:32:48 +08:00
|
|
|
if (!dest) {
|
|
|
|
free(cb);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* The first vcn in the first compression block (cb) which we need to
|
|
|
|
* decompress.
|
|
|
|
*/
|
|
|
|
start_vcn = (pos & ~cb_size_mask) >> vol->cluster_size_bits;
|
|
|
|
/* Offset in the uncompressed cb at which to start reading data. */
|
|
|
|
ofs = pos & cb_size_mask;
|
|
|
|
/*
|
|
|
|
* The first vcn in the cb after the last cb which we need to
|
|
|
|
* decompress.
|
|
|
|
*/
|
|
|
|
end_vcn = ((pos + count + cb_size - 1) & ~cb_size_mask) >>
|
|
|
|
vol->cluster_size_bits;
|
|
|
|
/* Number of compression blocks (cbs) in the wanted vcn range. */
|
|
|
|
nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits >>
|
|
|
|
na->compression_block_size_bits;
|
|
|
|
cb_end = cb + cb_size;
|
|
|
|
do_next_cb:
|
|
|
|
nr_cbs--;
|
|
|
|
cb_pos = cb;
|
|
|
|
vcn = start_vcn;
|
|
|
|
start_vcn += cb_clusters;
|
|
|
|
|
|
|
|
/* Check whether the compression block is sparse. */
|
|
|
|
rl = ntfs_attr_find_vcn(na, vcn);
|
|
|
|
if (!rl || rl->lcn < LCN_HOLE) {
|
|
|
|
free(cb);
|
|
|
|
free(dest);
|
|
|
|
if (total)
|
|
|
|
return total;
|
|
|
|
/* FIXME: Do we want EIO or the error code? (AIA) */
|
|
|
|
errno = EIO;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (rl->lcn == LCN_HOLE) {
|
|
|
|
/* Sparse cb, zero out destination range overlapping the cb. */
|
|
|
|
ntfs_log_debug("Found sparse compression block.\n");
|
|
|
|
to_read = min(count, cb_size - ofs);
|
|
|
|
memset(b, 0, to_read);
|
|
|
|
ofs = 0;
|
|
|
|
total += to_read;
|
|
|
|
count -= to_read;
|
|
|
|
b = (u8*)b + to_read;
|
|
|
|
} else if (!ntfs_is_cb_compressed(na, rl, vcn, cb_clusters)) {
|
|
|
|
s64 tdata_size, tinitialized_size;
|
|
|
|
/*
|
|
|
|
* Uncompressed cb, read it straight into the destination range
|
|
|
|
* overlapping the cb.
|
|
|
|
*/
|
|
|
|
ntfs_log_debug("Found uncompressed compression block.\n");
|
|
|
|
/*
|
|
|
|
* Read the uncompressed data into the destination buffer.
|
|
|
|
* NOTE: We cheat a little bit here by marking the attribute as
|
|
|
|
* not compressed in the ntfs_attr structure so that we can
|
|
|
|
* read the data by simply using ntfs_attr_pread(). (-8
|
|
|
|
* NOTE: we have to modify data_size and initialized_size
|
|
|
|
* temporarily as well...
|
|
|
|
*/
|
|
|
|
to_read = min(count, cb_size - ofs);
|
|
|
|
ofs += vcn << vol->cluster_size_bits;
|
|
|
|
NAttrClearCompressed(na);
|
2009-04-03 21:33:24 +08:00
|
|
|
na->data_flags &= ~ATTR_COMPRESSION_MASK;
|
2006-10-31 06:32:48 +08:00
|
|
|
tdata_size = na->data_size;
|
|
|
|
tinitialized_size = na->initialized_size;
|
|
|
|
na->data_size = na->initialized_size = na->allocated_size;
|
|
|
|
do {
|
|
|
|
br = ntfs_attr_pread(na, ofs, to_read, b);
|
2010-06-18 20:04:23 +08:00
|
|
|
if (br <= 0) {
|
|
|
|
if (!br) {
|
|
|
|
ntfs_log_error("Failed to read an"
|
|
|
|
" uncompressed cluster,"
|
|
|
|
" inode %lld offs 0x%llx\n",
|
|
|
|
(long long)na->ni->mft_no,
|
|
|
|
(long long)ofs);
|
|
|
|
errno = EIO;
|
|
|
|
}
|
2006-10-31 06:32:48 +08:00
|
|
|
err = errno;
|
|
|
|
na->data_size = tdata_size;
|
|
|
|
na->initialized_size = tinitialized_size;
|
2009-04-03 21:33:24 +08:00
|
|
|
na->ni->flags |= compression;
|
|
|
|
na->data_flags = data_flags;
|
2006-10-31 06:32:48 +08:00
|
|
|
free(cb);
|
|
|
|
free(dest);
|
|
|
|
if (total)
|
|
|
|
return total;
|
|
|
|
errno = err;
|
|
|
|
return br;
|
|
|
|
}
|
|
|
|
total += br;
|
|
|
|
count -= br;
|
|
|
|
b = (u8*)b + br;
|
|
|
|
to_read -= br;
|
|
|
|
ofs += br;
|
|
|
|
} while (to_read > 0);
|
|
|
|
na->data_size = tdata_size;
|
|
|
|
na->initialized_size = tinitialized_size;
|
2009-04-03 21:33:24 +08:00
|
|
|
na->ni->flags |= compression;
|
|
|
|
na->data_flags = data_flags;
|
2006-10-31 06:32:48 +08:00
|
|
|
ofs = 0;
|
|
|
|
} else {
|
|
|
|
s64 tdata_size, tinitialized_size;
|
2013-05-30 16:23:04 +08:00
|
|
|
u32 decompsz;
|
2006-10-31 06:32:48 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Compressed cb, decompress it into the temporary buffer, then
|
|
|
|
* copy the data to the destination range overlapping the cb.
|
|
|
|
*/
|
|
|
|
ntfs_log_debug("Found compressed compression block.\n");
|
|
|
|
/*
|
|
|
|
* Read the compressed data into the temporary buffer.
|
|
|
|
* NOTE: We cheat a little bit here by marking the attribute as
|
|
|
|
* not compressed in the ntfs_attr structure so that we can
|
|
|
|
* read the raw, compressed data by simply using
|
|
|
|
* ntfs_attr_pread(). (-8
|
|
|
|
* NOTE: We have to modify data_size and initialized_size
|
|
|
|
* temporarily as well...
|
|
|
|
*/
|
|
|
|
to_read = cb_size;
|
|
|
|
NAttrClearCompressed(na);
|
2009-04-03 21:33:24 +08:00
|
|
|
na->data_flags &= ~ATTR_COMPRESSION_MASK;
|
2006-10-31 06:32:48 +08:00
|
|
|
tdata_size = na->data_size;
|
|
|
|
tinitialized_size = na->initialized_size;
|
|
|
|
na->data_size = na->initialized_size = na->allocated_size;
|
|
|
|
do {
|
|
|
|
br = ntfs_attr_pread(na,
|
|
|
|
(vcn << vol->cluster_size_bits) +
|
|
|
|
(cb_pos - cb), to_read, cb_pos);
|
2010-06-18 20:04:23 +08:00
|
|
|
if (br <= 0) {
|
|
|
|
if (!br) {
|
|
|
|
ntfs_log_error("Failed to read a"
|
|
|
|
" compressed cluster, "
|
|
|
|
" inode %lld offs 0x%llx\n",
|
|
|
|
(long long)na->ni->mft_no,
|
|
|
|
(long long)(vcn << vol->cluster_size_bits));
|
|
|
|
errno = EIO;
|
|
|
|
}
|
2006-10-31 06:32:48 +08:00
|
|
|
err = errno;
|
|
|
|
na->data_size = tdata_size;
|
|
|
|
na->initialized_size = tinitialized_size;
|
2009-04-03 21:33:24 +08:00
|
|
|
na->ni->flags |= compression;
|
|
|
|
na->data_flags = data_flags;
|
2006-10-31 06:32:48 +08:00
|
|
|
free(cb);
|
|
|
|
free(dest);
|
|
|
|
if (total)
|
|
|
|
return total;
|
|
|
|
errno = err;
|
|
|
|
return br;
|
|
|
|
}
|
|
|
|
cb_pos += br;
|
|
|
|
to_read -= br;
|
|
|
|
} while (to_read > 0);
|
|
|
|
na->data_size = tdata_size;
|
|
|
|
na->initialized_size = tinitialized_size;
|
2009-04-03 21:33:24 +08:00
|
|
|
na->ni->flags |= compression;
|
|
|
|
na->data_flags = data_flags;
|
2006-10-31 06:32:48 +08:00
|
|
|
/* Just a precaution. */
|
|
|
|
if (cb_pos + 2 <= cb_end)
|
|
|
|
*(u16*)cb_pos = 0;
|
|
|
|
ntfs_log_debug("Successfully read the compression block.\n");
|
2013-05-30 16:23:04 +08:00
|
|
|
/* Do not decompress beyond the requested block */
|
|
|
|
to_read = min(count, cb_size - ofs);
|
|
|
|
decompsz = ((ofs + to_read - 1) | (NTFS_SB_SIZE - 1)) + 1;
|
|
|
|
if (ntfs_decompress(dest, decompsz, cb, cb_size) < 0) {
|
2006-10-31 06:32:48 +08:00
|
|
|
err = errno;
|
|
|
|
free(cb);
|
|
|
|
free(dest);
|
|
|
|
if (total)
|
|
|
|
return total;
|
|
|
|
errno = err;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
memcpy(b, dest + ofs, to_read);
|
|
|
|
total += to_read;
|
|
|
|
count -= to_read;
|
|
|
|
b = (u8*)b + to_read;
|
|
|
|
ofs = 0;
|
|
|
|
}
|
|
|
|
/* Do we have more work to do? */
|
|
|
|
if (nr_cbs)
|
|
|
|
goto do_next_cb;
|
|
|
|
/* We no longer need the buffers. */
|
|
|
|
free(cb);
|
|
|
|
free(dest);
|
|
|
|
/* Return number of bytes read. */
|
|
|
|
return total + total2;
|
|
|
|
}
|
2009-04-03 21:33:24 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Read data from a set of clusters
|
|
|
|
*
|
|
|
|
* Returns the amount of data read
|
|
|
|
*/
|
|
|
|
|
2009-08-11 16:02:59 +08:00
|
|
|
static u32 read_clusters(ntfs_volume *vol, const runlist_element *rl,
|
|
|
|
s64 offs, u32 to_read, char *inbuf)
|
2009-04-03 21:33:24 +08:00
|
|
|
{
|
2009-08-11 16:02:59 +08:00
|
|
|
u32 count;
|
|
|
|
int xgot;
|
|
|
|
u32 got;
|
2009-04-03 21:33:24 +08:00
|
|
|
s64 xpos;
|
|
|
|
BOOL first;
|
|
|
|
char *xinbuf;
|
|
|
|
const runlist_element *xrl;
|
|
|
|
|
|
|
|
got = 0;
|
|
|
|
xrl = rl;
|
|
|
|
xinbuf = inbuf;
|
|
|
|
first = TRUE;
|
|
|
|
do {
|
|
|
|
count = xrl->length << vol->cluster_size_bits;
|
|
|
|
xpos = xrl->lcn << vol->cluster_size_bits;
|
|
|
|
if (first) {
|
|
|
|
count -= offs;
|
|
|
|
xpos += offs;
|
|
|
|
}
|
|
|
|
if ((to_read - got) < count)
|
|
|
|
count = to_read - got;
|
|
|
|
xgot = ntfs_pread(vol->dev, xpos, count, xinbuf);
|
2009-08-11 16:02:59 +08:00
|
|
|
if (xgot == (int)count) {
|
2009-04-03 21:33:24 +08:00
|
|
|
got += count;
|
|
|
|
xpos += count;
|
|
|
|
xinbuf += count;
|
|
|
|
xrl++;
|
|
|
|
}
|
|
|
|
first = FALSE;
|
2009-08-11 16:02:59 +08:00
|
|
|
} while ((xgot == (int)count) && (got < to_read));
|
2009-04-03 21:33:24 +08:00
|
|
|
return (got);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write data to a set of clusters
|
|
|
|
*
|
|
|
|
* Returns the amount of data written
|
|
|
|
*/
|
|
|
|
|
2010-05-25 16:25:31 +08:00
|
|
|
static s32 write_clusters(ntfs_volume *vol, const runlist_element *rl,
|
|
|
|
s64 offs, s32 to_write, const char *outbuf)
|
2009-04-03 21:33:24 +08:00
|
|
|
{
|
2010-05-25 16:25:31 +08:00
|
|
|
s32 count;
|
|
|
|
s32 put, xput;
|
2009-04-03 21:33:24 +08:00
|
|
|
s64 xpos;
|
|
|
|
BOOL first;
|
|
|
|
const char *xoutbuf;
|
|
|
|
const runlist_element *xrl;
|
|
|
|
|
|
|
|
put = 0;
|
|
|
|
xrl = rl;
|
|
|
|
xoutbuf = outbuf;
|
|
|
|
first = TRUE;
|
|
|
|
do {
|
|
|
|
count = xrl->length << vol->cluster_size_bits;
|
|
|
|
xpos = xrl->lcn << vol->cluster_size_bits;
|
|
|
|
if (first) {
|
|
|
|
count -= offs;
|
|
|
|
xpos += offs;
|
|
|
|
}
|
|
|
|
if ((to_write - put) < count)
|
|
|
|
count = to_write - put;
|
|
|
|
xput = ntfs_pwrite(vol->dev, xpos, count, xoutbuf);
|
|
|
|
if (xput == count) {
|
|
|
|
put += count;
|
|
|
|
xpos += count;
|
|
|
|
xoutbuf += count;
|
|
|
|
xrl++;
|
|
|
|
}
|
|
|
|
first = FALSE;
|
|
|
|
} while ((xput == count) && (put < to_write));
|
|
|
|
return (put);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Compress and write a set of blocks
|
|
|
|
*
|
|
|
|
* returns the size actually written (rounded to a full cluster)
|
2009-08-11 16:02:59 +08:00
|
|
|
* or 0 if all zeroes (nothing is written)
|
|
|
|
* or -1 if could not compress (nothing is written)
|
|
|
|
* or -2 if there were an irrecoverable error (errno set)
|
2009-04-03 21:33:24 +08:00
|
|
|
*/
|
|
|
|
|
2010-05-25 16:25:31 +08:00
|
|
|
static s32 ntfs_comp_set(ntfs_attr *na, runlist_element *rl,
|
|
|
|
s64 offs, u32 insz, const char *inbuf)
|
2009-04-03 21:33:24 +08:00
|
|
|
{
|
|
|
|
ntfs_volume *vol;
|
|
|
|
char *outbuf;
|
2009-08-11 16:02:59 +08:00
|
|
|
char *pbuf;
|
2010-05-25 16:25:31 +08:00
|
|
|
u32 compsz;
|
|
|
|
s32 written;
|
|
|
|
s32 rounded;
|
2009-04-03 21:33:24 +08:00
|
|
|
unsigned int clsz;
|
2010-05-25 16:25:31 +08:00
|
|
|
u32 p;
|
2009-04-03 21:33:24 +08:00
|
|
|
unsigned int sz;
|
|
|
|
unsigned int bsz;
|
|
|
|
BOOL fail;
|
2009-08-11 16:02:59 +08:00
|
|
|
BOOL allzeroes;
|
|
|
|
/* a single compressed zero */
|
|
|
|
static char onezero[] = { 0x01, 0xb0, 0x00, 0x00 } ;
|
|
|
|
/* a couple of compressed zeroes */
|
|
|
|
static char twozeroes[] = { 0x02, 0xb0, 0x00, 0x00, 0x00 } ;
|
|
|
|
/* more compressed zeroes, to be followed by some count */
|
|
|
|
static char morezeroes[] = { 0x03, 0xb0, 0x02, 0x00 } ;
|
2009-04-03 21:33:24 +08:00
|
|
|
|
|
|
|
vol = na->ni->vol;
|
2009-08-11 16:02:59 +08:00
|
|
|
written = -1; /* default return */
|
2009-04-03 21:33:24 +08:00
|
|
|
clsz = 1 << vol->cluster_size_bits;
|
|
|
|
/* may need 2 extra bytes per block and 2 more bytes */
|
|
|
|
outbuf = (char*)ntfs_malloc(na->compression_block_size
|
|
|
|
+ 2*(na->compression_block_size/NTFS_SB_SIZE)
|
|
|
|
+ 2);
|
|
|
|
if (outbuf) {
|
|
|
|
fail = FALSE;
|
|
|
|
compsz = 0;
|
2009-08-11 16:02:59 +08:00
|
|
|
allzeroes = TRUE;
|
2009-04-03 21:33:24 +08:00
|
|
|
for (p=0; (p<insz) && !fail; p+=NTFS_SB_SIZE) {
|
|
|
|
if ((p + NTFS_SB_SIZE) < insz)
|
|
|
|
bsz = NTFS_SB_SIZE;
|
|
|
|
else
|
|
|
|
bsz = insz - p;
|
2009-08-11 16:02:59 +08:00
|
|
|
pbuf = &outbuf[compsz];
|
|
|
|
sz = ntfs_compress_block(&inbuf[p],bsz,pbuf);
|
2009-04-03 21:33:24 +08:00
|
|
|
/* fail if all the clusters (or more) are needed */
|
|
|
|
if (!sz || ((compsz + sz + clsz + 2)
|
|
|
|
> na->compression_block_size))
|
|
|
|
fail = TRUE;
|
2009-08-11 16:02:59 +08:00
|
|
|
else {
|
|
|
|
if (allzeroes) {
|
|
|
|
/* check whether this is all zeroes */
|
|
|
|
switch (sz) {
|
|
|
|
case 4 :
|
|
|
|
allzeroes = !memcmp(
|
|
|
|
pbuf,onezero,4);
|
|
|
|
break;
|
|
|
|
case 5 :
|
|
|
|
allzeroes = !memcmp(
|
|
|
|
pbuf,twozeroes,5);
|
|
|
|
break;
|
|
|
|
case 6 :
|
|
|
|
allzeroes = !memcmp(
|
|
|
|
pbuf,morezeroes,4);
|
|
|
|
break;
|
|
|
|
default :
|
|
|
|
allzeroes = FALSE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
compsz += sz;
|
|
|
|
}
|
2009-04-03 21:33:24 +08:00
|
|
|
}
|
2009-08-11 16:02:59 +08:00
|
|
|
if (!fail && !allzeroes) {
|
2009-04-03 21:33:24 +08:00
|
|
|
/* add a couple of null bytes, space has been checked */
|
|
|
|
outbuf[compsz++] = 0;
|
|
|
|
outbuf[compsz++] = 0;
|
|
|
|
/* write a full cluster, to avoid partial reading */
|
|
|
|
rounded = ((compsz - 1) | (clsz - 1)) + 1;
|
2015-04-17 17:31:47 +08:00
|
|
|
memset(&outbuf[compsz], 0, rounded - compsz);
|
2009-04-03 21:33:24 +08:00
|
|
|
written = write_clusters(vol, rl, offs, rounded, outbuf);
|
|
|
|
if (written != rounded) {
|
2010-05-25 16:25:31 +08:00
|
|
|
/*
|
|
|
|
* TODO : previously written text has been
|
|
|
|
* spoilt, should return a specific error
|
|
|
|
*/
|
2009-04-03 21:33:24 +08:00
|
|
|
ntfs_log_error("error writing compressed data\n");
|
|
|
|
errno = EIO;
|
2009-08-11 16:02:59 +08:00
|
|
|
written = -2;
|
2009-04-03 21:33:24 +08:00
|
|
|
}
|
2009-08-11 16:02:59 +08:00
|
|
|
} else
|
|
|
|
if (!fail)
|
|
|
|
written = 0;
|
2009-04-03 21:33:24 +08:00
|
|
|
free(outbuf);
|
|
|
|
}
|
|
|
|
return (written);
|
|
|
|
}
|
|
|
|
|
2010-05-25 16:25:31 +08:00
|
|
|
/*
|
|
|
|
* Check the validity of a compressed runlist
|
|
|
|
* The check starts at the beginning of current run and ends
|
|
|
|
* at the end of runlist
|
|
|
|
* errno is set if the runlist is not valid
|
|
|
|
*/
|
|
|
|
|
|
|
|
static BOOL valid_compressed_run(ntfs_attr *na, runlist_element *rl,
|
|
|
|
BOOL fullcheck, const char *text)
|
|
|
|
{
|
|
|
|
runlist_element *xrl;
|
|
|
|
const char *err;
|
|
|
|
BOOL ok = TRUE;
|
|
|
|
|
|
|
|
xrl = rl;
|
|
|
|
while (xrl->vcn & (na->compression_block_clusters - 1))
|
|
|
|
xrl--;
|
|
|
|
err = (const char*)NULL;
|
|
|
|
while (xrl->length) {
|
|
|
|
if ((xrl->vcn + xrl->length) != xrl[1].vcn)
|
|
|
|
err = "Runs not adjacent";
|
|
|
|
if (xrl->lcn == LCN_HOLE) {
|
|
|
|
if ((xrl->vcn + xrl->length)
|
|
|
|
& (na->compression_block_clusters - 1)) {
|
|
|
|
err = "Invalid hole";
|
|
|
|
}
|
|
|
|
if (fullcheck && (xrl[1].lcn == LCN_HOLE)) {
|
|
|
|
err = "Adjacent holes";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (err) {
|
|
|
|
ntfs_log_error("%s at %s index %ld inode %lld\n",
|
|
|
|
err, text, (long)(xrl - na->rl),
|
|
|
|
(long long)na->ni->mft_no);
|
|
|
|
errno = EIO;
|
|
|
|
ok = FALSE;
|
|
|
|
err = (const char*)NULL;
|
|
|
|
}
|
|
|
|
xrl++;
|
|
|
|
}
|
|
|
|
return (ok);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free unneeded clusters after overwriting compressed data
|
|
|
|
*
|
|
|
|
* This generally requires one or two empty slots at the end of runlist,
|
|
|
|
* but we do not want to reallocate the runlist here because
|
|
|
|
* there are many pointers to it.
|
|
|
|
* So the empty slots have to be reserved beforehand
|
|
|
|
*
|
|
|
|
* Returns zero unless some error occurred (described by errno)
|
|
|
|
*
|
|
|
|
* +======= start of block =====+
|
|
|
|
* 0 |A chunk may overflow | <-- rl usedcnt : A + B
|
|
|
|
* |A on previous block | then B
|
|
|
|
* |A |
|
|
|
|
* +-- end of allocated chunk --+ freelength : C
|
|
|
|
* |B | (incl overflow)
|
|
|
|
* +== end of compressed data ==+
|
|
|
|
* |C | <-- freerl freecnt : C + D
|
|
|
|
* |C chunk may overflow |
|
|
|
|
* |C on next block |
|
|
|
|
* +-- end of allocated chunk --+
|
|
|
|
* |D |
|
|
|
|
* |D chunk may overflow |
|
|
|
|
* 15 |D on next block |
|
|
|
|
* +======== end of block ======+
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int ntfs_compress_overwr_free(ntfs_attr *na, runlist_element *rl,
|
|
|
|
s32 usedcnt, s32 freecnt, VCN *update_from)
|
|
|
|
{
|
|
|
|
BOOL beginhole;
|
|
|
|
BOOL mergeholes;
|
|
|
|
s32 oldlength;
|
|
|
|
s32 freelength;
|
|
|
|
s64 freelcn;
|
|
|
|
s64 freevcn;
|
|
|
|
runlist_element *freerl;
|
|
|
|
ntfs_volume *vol;
|
|
|
|
s32 carry;
|
|
|
|
int res;
|
|
|
|
|
|
|
|
vol = na->ni->vol;
|
|
|
|
res = 0;
|
|
|
|
freelcn = rl->lcn + usedcnt;
|
|
|
|
freevcn = rl->vcn + usedcnt;
|
|
|
|
freelength = rl->length - usedcnt;
|
|
|
|
beginhole = !usedcnt && !rl->vcn;
|
|
|
|
/* can merge with hole before ? */
|
|
|
|
mergeholes = !usedcnt
|
|
|
|
&& rl[0].vcn
|
|
|
|
&& (rl[-1].lcn == LCN_HOLE);
|
|
|
|
/* truncate current run, carry to subsequent hole */
|
|
|
|
carry = freelength;
|
|
|
|
oldlength = rl->length;
|
|
|
|
if (mergeholes) {
|
|
|
|
/* merging with a hole before */
|
|
|
|
freerl = rl;
|
|
|
|
} else {
|
2010-05-28 22:33:50 +08:00
|
|
|
rl->length -= freelength; /* warning : can be zero */
|
2010-05-25 16:25:31 +08:00
|
|
|
freerl = ++rl;
|
|
|
|
}
|
|
|
|
if (!mergeholes && (usedcnt || beginhole)) {
|
|
|
|
s32 freed;
|
|
|
|
runlist_element *frl;
|
|
|
|
runlist_element *erl;
|
|
|
|
int holes = 0;
|
|
|
|
BOOL threeparts;
|
|
|
|
|
|
|
|
/* free the unneeded clusters from initial run, then freerl */
|
2010-07-22 21:00:37 +08:00
|
|
|
threeparts = (freelength > freecnt);
|
|
|
|
freed = 0;
|
2010-05-25 16:25:31 +08:00
|
|
|
frl = freerl;
|
|
|
|
if (freelength) {
|
|
|
|
res = ntfs_cluster_free_basic(vol,freelcn,
|
|
|
|
(threeparts ? freecnt : freelength));
|
2010-07-22 21:00:37 +08:00
|
|
|
if (!res)
|
|
|
|
freed += (threeparts ? freecnt : freelength);
|
2010-05-25 16:25:31 +08:00
|
|
|
if (!usedcnt) {
|
|
|
|
holes++;
|
|
|
|
freerl--;
|
2010-05-28 22:33:50 +08:00
|
|
|
freerl->length += (threeparts
|
|
|
|
? freecnt : freelength);
|
2010-05-25 16:25:31 +08:00
|
|
|
if (freerl->vcn < *update_from)
|
|
|
|
*update_from = freerl->vcn;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
while (!res && frl->length && (freed < freecnt)) {
|
|
|
|
if (frl->length <= (freecnt - freed)) {
|
|
|
|
res = ntfs_cluster_free_basic(vol, frl->lcn,
|
|
|
|
frl->length);
|
|
|
|
if (!res) {
|
|
|
|
freed += frl->length;
|
|
|
|
frl->lcn = LCN_HOLE;
|
|
|
|
frl->length += carry;
|
|
|
|
carry = 0;
|
|
|
|
holes++;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
res = ntfs_cluster_free_basic(vol, frl->lcn,
|
|
|
|
freecnt - freed);
|
|
|
|
if (!res) {
|
|
|
|
frl->lcn += freecnt - freed;
|
|
|
|
frl->vcn += freecnt - freed;
|
|
|
|
frl->length -= freecnt - freed;
|
|
|
|
freed = freecnt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
frl++;
|
|
|
|
}
|
2010-07-22 21:00:37 +08:00
|
|
|
na->compressed_size -= freed << vol->cluster_size_bits;
|
2010-05-25 16:25:31 +08:00
|
|
|
switch (holes) {
|
|
|
|
case 0 :
|
|
|
|
/* there are no hole, must insert one */
|
|
|
|
/* space for hole has been prereserved */
|
|
|
|
if (freerl->lcn == LCN_HOLE) {
|
|
|
|
if (threeparts) {
|
|
|
|
erl = freerl;
|
|
|
|
while (erl->length)
|
|
|
|
erl++;
|
|
|
|
do {
|
|
|
|
erl[2] = *erl;
|
|
|
|
} while (erl-- != freerl);
|
|
|
|
|
|
|
|
freerl[1].length = freelength - freecnt;
|
|
|
|
freerl->length = freecnt;
|
|
|
|
freerl[1].lcn = freelcn + freecnt;
|
|
|
|
freerl[1].vcn = freevcn + freecnt;
|
|
|
|
freerl[2].lcn = LCN_HOLE;
|
|
|
|
freerl[2].vcn = freerl[1].vcn
|
|
|
|
+ freerl[1].length;
|
|
|
|
freerl->vcn = freevcn;
|
|
|
|
} else {
|
|
|
|
freerl->vcn = freevcn;
|
|
|
|
freerl->length += freelength;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
erl = freerl;
|
|
|
|
while (erl->length)
|
|
|
|
erl++;
|
|
|
|
if (threeparts) {
|
|
|
|
do {
|
|
|
|
erl[2] = *erl;
|
|
|
|
} while (erl-- != freerl);
|
|
|
|
freerl[1].lcn = freelcn + freecnt;
|
|
|
|
freerl[1].vcn = freevcn + freecnt;
|
|
|
|
freerl[1].length = oldlength - usedcnt - freecnt;
|
|
|
|
} else {
|
|
|
|
do {
|
|
|
|
erl[1] = *erl;
|
|
|
|
} while (erl-- != freerl);
|
|
|
|
}
|
|
|
|
freerl->lcn = LCN_HOLE;
|
|
|
|
freerl->vcn = freevcn;
|
|
|
|
freerl->length = freecnt;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 1 :
|
|
|
|
/* there is a single hole, may have to merge */
|
|
|
|
freerl->vcn = freevcn;
|
2011-02-08 20:52:12 +08:00
|
|
|
freerl->length = freecnt;
|
2010-05-25 16:25:31 +08:00
|
|
|
if (freerl[1].lcn == LCN_HOLE) {
|
|
|
|
freerl->length += freerl[1].length;
|
|
|
|
erl = freerl;
|
|
|
|
do {
|
|
|
|
erl++;
|
|
|
|
*erl = erl[1];
|
|
|
|
} while (erl->length);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default :
|
|
|
|
/* there were several holes, must merge them */
|
|
|
|
freerl->lcn = LCN_HOLE;
|
|
|
|
freerl->vcn = freevcn;
|
|
|
|
freerl->length = freecnt;
|
|
|
|
if (freerl[holes].lcn == LCN_HOLE) {
|
|
|
|
freerl->length += freerl[holes].length;
|
|
|
|
holes++;
|
|
|
|
}
|
|
|
|
erl = freerl;
|
|
|
|
do {
|
|
|
|
erl++;
|
|
|
|
*erl = erl[holes - 1];
|
|
|
|
} while (erl->length);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
s32 freed;
|
|
|
|
runlist_element *frl;
|
|
|
|
runlist_element *xrl;
|
|
|
|
|
|
|
|
freed = 0;
|
|
|
|
frl = freerl--;
|
|
|
|
if (freerl->vcn < *update_from)
|
|
|
|
*update_from = freerl->vcn;
|
|
|
|
while (!res && frl->length && (freed < freecnt)) {
|
|
|
|
if (frl->length <= (freecnt - freed)) {
|
|
|
|
freerl->length += frl->length;
|
|
|
|
freed += frl->length;
|
|
|
|
res = ntfs_cluster_free_basic(vol, frl->lcn,
|
|
|
|
frl->length);
|
|
|
|
frl++;
|
|
|
|
} else {
|
|
|
|
freerl->length += freecnt - freed;
|
|
|
|
res = ntfs_cluster_free_basic(vol, frl->lcn,
|
|
|
|
freecnt - freed);
|
|
|
|
frl->lcn += freecnt - freed;
|
|
|
|
frl->vcn += freecnt - freed;
|
|
|
|
frl->length -= freecnt - freed;
|
|
|
|
freed = freecnt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* remove unneded runlist entries */
|
|
|
|
xrl = freerl;
|
|
|
|
/* group with next run if also a hole */
|
|
|
|
if (frl->length && (frl->lcn == LCN_HOLE)) {
|
|
|
|
xrl->length += frl->length;
|
|
|
|
frl++;
|
|
|
|
}
|
|
|
|
while (frl->length) {
|
|
|
|
*++xrl = *frl++;
|
|
|
|
}
|
|
|
|
*++xrl = *frl; /* terminator */
|
2010-07-22 21:00:37 +08:00
|
|
|
na->compressed_size -= freed << vol->cluster_size_bits;
|
2010-05-25 16:25:31 +08:00
|
|
|
}
|
|
|
|
return (res);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-04-03 21:33:24 +08:00
|
|
|
/*
|
|
|
|
* Free unneeded clusters after compression
|
|
|
|
*
|
2010-05-25 16:25:31 +08:00
|
|
|
* This generally requires one or two empty slots at the end of runlist,
|
2009-04-03 21:33:24 +08:00
|
|
|
* but we do not want to reallocate the runlist here because
|
|
|
|
* there are many pointers to it.
|
2010-05-25 16:25:31 +08:00
|
|
|
* So the empty slots have to be reserved beforehand
|
2009-04-03 21:33:24 +08:00
|
|
|
*
|
|
|
|
* Returns zero unless some error occurred (described by errno)
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int ntfs_compress_free(ntfs_attr *na, runlist_element *rl,
|
2010-05-25 16:25:31 +08:00
|
|
|
s64 used, s64 reserved, BOOL appending,
|
|
|
|
VCN *update_from)
|
2009-04-03 21:33:24 +08:00
|
|
|
{
|
2010-05-25 16:25:31 +08:00
|
|
|
s32 freecnt;
|
|
|
|
s32 usedcnt;
|
2009-04-03 21:33:24 +08:00
|
|
|
int res;
|
|
|
|
s64 freelcn;
|
|
|
|
s64 freevcn;
|
2010-05-25 16:25:31 +08:00
|
|
|
s32 freelength;
|
2009-08-11 16:02:59 +08:00
|
|
|
BOOL mergeholes;
|
|
|
|
BOOL beginhole;
|
2009-04-03 21:33:24 +08:00
|
|
|
ntfs_volume *vol;
|
|
|
|
runlist_element *freerl;
|
|
|
|
|
|
|
|
res = -1; /* default return */
|
|
|
|
vol = na->ni->vol;
|
|
|
|
freecnt = (reserved - used) >> vol->cluster_size_bits;
|
|
|
|
usedcnt = (reserved >> vol->cluster_size_bits) - freecnt;
|
2010-05-25 16:25:31 +08:00
|
|
|
if (rl->vcn < *update_from)
|
|
|
|
*update_from = rl->vcn;
|
2009-04-03 21:33:24 +08:00
|
|
|
/* skip entries fully used, if any */
|
|
|
|
while (rl->length && (rl->length < usedcnt)) {
|
2010-05-25 16:25:31 +08:00
|
|
|
usedcnt -= rl->length; /* must be > 0 */
|
2009-04-03 21:33:24 +08:00
|
|
|
rl++;
|
|
|
|
}
|
|
|
|
if (rl->length) {
|
|
|
|
/*
|
|
|
|
* Splitting the current allocation block requires
|
|
|
|
* an extra runlist element to create the hole.
|
|
|
|
* The required entry has been prereserved when
|
|
|
|
* mapping the runlist.
|
|
|
|
*/
|
2010-05-25 16:25:31 +08:00
|
|
|
/* get the free part in initial run */
|
2009-04-03 21:33:24 +08:00
|
|
|
freelcn = rl->lcn + usedcnt;
|
|
|
|
freevcn = rl->vcn + usedcnt;
|
|
|
|
/* new count of allocated clusters */
|
|
|
|
if (!((freevcn + freecnt)
|
|
|
|
& (na->compression_block_clusters - 1))) {
|
2010-05-25 16:25:31 +08:00
|
|
|
if (!appending)
|
|
|
|
res = ntfs_compress_overwr_free(na,rl,
|
|
|
|
usedcnt,freecnt,update_from);
|
|
|
|
else {
|
|
|
|
freelength = rl->length - usedcnt;
|
|
|
|
beginhole = !usedcnt && !rl->vcn;
|
|
|
|
mergeholes = !usedcnt
|
|
|
|
&& rl[0].vcn
|
|
|
|
&& (rl[-1].lcn == LCN_HOLE);
|
|
|
|
if (mergeholes) {
|
2010-05-28 22:33:50 +08:00
|
|
|
s32 carry;
|
|
|
|
|
|
|
|
/* shorten the runs which have free space */
|
|
|
|
carry = freecnt;
|
2010-05-25 16:25:31 +08:00
|
|
|
freerl = rl;
|
2010-05-28 22:33:50 +08:00
|
|
|
while (freerl->length < carry) {
|
|
|
|
carry -= freerl->length;
|
|
|
|
freerl++;
|
|
|
|
}
|
|
|
|
freerl->length = carry;
|
|
|
|
freerl = rl;
|
|
|
|
} else {
|
|
|
|
rl->length = usedcnt; /* can be zero ? */
|
2010-05-25 16:25:31 +08:00
|
|
|
freerl = ++rl;
|
2010-05-28 22:33:50 +08:00
|
|
|
}
|
2010-05-25 16:25:31 +08:00
|
|
|
if ((freelength > 0)
|
|
|
|
&& !mergeholes
|
|
|
|
&& (usedcnt || beginhole)) {
|
2009-04-03 21:33:24 +08:00
|
|
|
/*
|
|
|
|
* move the unused part to the end. Doing so,
|
|
|
|
* the vcn will be out of order. This does
|
|
|
|
* not harm, the vcn are meaningless now, and
|
|
|
|
* only the lcn are meaningful for freeing.
|
|
|
|
*/
|
2010-05-25 16:25:31 +08:00
|
|
|
/* locate current end */
|
|
|
|
while (rl->length)
|
|
|
|
rl++;
|
|
|
|
/* new terminator relocated */
|
|
|
|
rl[1].vcn = rl->vcn;
|
|
|
|
rl[1].lcn = LCN_ENOENT;
|
|
|
|
rl[1].length = 0;
|
|
|
|
/* hole, currently allocated */
|
|
|
|
rl->vcn = freevcn;
|
|
|
|
rl->lcn = freelcn;
|
|
|
|
rl->length = freelength;
|
|
|
|
} else {
|
|
|
|
/* why is this different from the begin hole case ? */
|
|
|
|
if ((freelength > 0)
|
|
|
|
&& !mergeholes
|
|
|
|
&& !usedcnt) {
|
|
|
|
freerl--;
|
|
|
|
freerl->length = freelength;
|
|
|
|
if (freerl->vcn < *update_from)
|
|
|
|
*update_from
|
|
|
|
= freerl->vcn;
|
|
|
|
}
|
|
|
|
}
|
2009-04-03 21:33:24 +08:00
|
|
|
/* free the hole */
|
2010-05-25 16:25:31 +08:00
|
|
|
res = ntfs_cluster_free_from_rl(vol,freerl);
|
|
|
|
if (!res) {
|
2010-07-22 21:00:37 +08:00
|
|
|
na->compressed_size -= freecnt
|
|
|
|
<< vol->cluster_size_bits;
|
2010-05-25 16:25:31 +08:00
|
|
|
if (mergeholes) {
|
2009-08-11 16:02:59 +08:00
|
|
|
/* merge with adjacent hole */
|
|
|
|
freerl--;
|
2010-05-25 16:25:31 +08:00
|
|
|
freerl->length += freecnt;
|
|
|
|
} else {
|
|
|
|
if (beginhole)
|
|
|
|
freerl--;
|
2009-08-11 16:02:59 +08:00
|
|
|
/* mark hole as free */
|
2010-05-25 16:25:31 +08:00
|
|
|
freerl->lcn = LCN_HOLE;
|
|
|
|
freerl->vcn = freevcn;
|
|
|
|
freerl->length = freecnt;
|
|
|
|
}
|
|
|
|
if (freerl->vcn < *update_from)
|
|
|
|
*update_from = freerl->vcn;
|
|
|
|
/* and set up the new end */
|
|
|
|
freerl[1].lcn = LCN_ENOENT;
|
|
|
|
freerl[1].vcn = freevcn + freecnt;
|
|
|
|
freerl[1].length = 0;
|
2009-08-11 16:02:59 +08:00
|
|
|
}
|
2009-04-03 21:33:24 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ntfs_log_error("Bad end of a compression block set\n");
|
|
|
|
errno = EIO;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ntfs_log_error("No cluster to free after compression\n");
|
|
|
|
errno = EIO;
|
|
|
|
}
|
2014-03-11 17:11:00 +08:00
|
|
|
NAttrSetRunlistDirty(na);
|
2009-04-03 21:33:24 +08:00
|
|
|
return (res);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read existing data, decompress and append buffer
|
|
|
|
* Do nothing if something fails
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int ntfs_read_append(ntfs_attr *na, const runlist_element *rl,
|
2010-05-25 16:25:31 +08:00
|
|
|
s64 offs, u32 compsz, s32 pos, BOOL appending,
|
2009-04-03 21:33:24 +08:00
|
|
|
char *outbuf, s64 to_write, const void *b)
|
|
|
|
{
|
|
|
|
int fail = 1;
|
|
|
|
char *compbuf;
|
2009-08-11 16:02:59 +08:00
|
|
|
u32 decompsz;
|
|
|
|
u32 got;
|
2009-04-03 21:33:24 +08:00
|
|
|
|
2009-08-11 16:02:59 +08:00
|
|
|
if (compsz == na->compression_block_size) {
|
|
|
|
/* if the full block was requested, it was a hole */
|
|
|
|
memset(outbuf,0,compsz);
|
|
|
|
memcpy(&outbuf[pos],b,to_write);
|
|
|
|
fail = 0;
|
|
|
|
} else {
|
|
|
|
compbuf = (char*)ntfs_malloc(compsz);
|
|
|
|
if (compbuf) {
|
2009-04-03 21:33:24 +08:00
|
|
|
/* must align to full block for decompression */
|
2010-05-25 16:25:31 +08:00
|
|
|
if (appending)
|
|
|
|
decompsz = ((pos - 1) | (NTFS_SB_SIZE - 1)) + 1;
|
|
|
|
else
|
|
|
|
decompsz = na->compression_block_size;
|
2009-08-11 16:02:59 +08:00
|
|
|
got = read_clusters(na->ni->vol, rl, offs,
|
|
|
|
compsz, compbuf);
|
|
|
|
if ((got == compsz)
|
|
|
|
&& !ntfs_decompress((u8*)outbuf,decompsz,
|
|
|
|
(u8*)compbuf,compsz)) {
|
|
|
|
memcpy(&outbuf[pos],b,to_write);
|
|
|
|
fail = 0;
|
|
|
|
}
|
|
|
|
free(compbuf);
|
2009-04-03 21:33:24 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return (fail);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush a full compression block
|
|
|
|
*
|
|
|
|
* returns the size actually written (rounded to a full cluster)
|
|
|
|
* or 0 if could not compress (and written uncompressed)
|
|
|
|
* or -1 if there were an irrecoverable error (errno set)
|
|
|
|
*/
|
|
|
|
|
2011-07-05 18:17:30 +08:00
|
|
|
static s32 ntfs_flush(ntfs_attr *na, runlist_element *rl, s64 offs,
|
2010-05-25 16:25:31 +08:00
|
|
|
const char *outbuf, s32 count, BOOL compress,
|
|
|
|
BOOL appending, VCN *update_from)
|
2009-04-03 21:33:24 +08:00
|
|
|
{
|
2011-07-05 18:17:30 +08:00
|
|
|
s32 rounded;
|
|
|
|
s32 written;
|
2009-04-03 21:33:24 +08:00
|
|
|
int clsz;
|
|
|
|
|
|
|
|
if (compress) {
|
|
|
|
written = ntfs_comp_set(na, rl, offs, count, outbuf);
|
2009-08-11 16:02:59 +08:00
|
|
|
if (written == -1)
|
2009-04-03 21:33:24 +08:00
|
|
|
compress = FALSE;
|
2009-08-11 16:02:59 +08:00
|
|
|
if ((written >= 0)
|
2009-04-25 19:52:00 +08:00
|
|
|
&& ntfs_compress_free(na,rl,offs + written,
|
2010-05-25 16:25:31 +08:00
|
|
|
offs + na->compression_block_size, appending,
|
|
|
|
update_from))
|
2009-04-03 21:33:24 +08:00
|
|
|
written = -1;
|
2009-12-17 17:17:16 +08:00
|
|
|
} else
|
|
|
|
written = 0;
|
2009-04-03 21:33:24 +08:00
|
|
|
if (!compress) {
|
|
|
|
clsz = 1 << na->ni->vol->cluster_size_bits;
|
|
|
|
rounded = ((count - 1) | (clsz - 1)) + 1;
|
|
|
|
written = write_clusters(na->ni->vol, rl,
|
|
|
|
offs, rounded, outbuf);
|
|
|
|
if (written != rounded)
|
|
|
|
written = -1;
|
|
|
|
}
|
|
|
|
return (written);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write some data to be compressed.
|
|
|
|
* Compression only occurs when a few clusters (usually 16) are
|
|
|
|
* full. When this occurs an extra runlist slot may be needed, so
|
|
|
|
* it has to be reserved beforehand.
|
|
|
|
*
|
|
|
|
* Returns the size of uncompressed data written,
|
2010-07-22 20:39:19 +08:00
|
|
|
* or negative if an error occurred.
|
2009-04-03 21:33:24 +08:00
|
|
|
* When the returned size is less than requested, new clusters have
|
|
|
|
* to be allocated before the function is called again.
|
|
|
|
*/
|
|
|
|
|
|
|
|
s64 ntfs_compressed_pwrite(ntfs_attr *na, runlist_element *wrl, s64 wpos,
|
|
|
|
s64 offs, s64 to_write, s64 rounded,
|
2010-05-25 16:25:31 +08:00
|
|
|
const void *b, int compressed_part,
|
|
|
|
VCN *update_from)
|
2009-04-03 21:33:24 +08:00
|
|
|
{
|
|
|
|
ntfs_volume *vol;
|
|
|
|
runlist_element *brl; /* entry containing the beginning of block */
|
|
|
|
int compression_length;
|
|
|
|
s64 written;
|
|
|
|
s64 to_read;
|
2010-05-25 16:25:31 +08:00
|
|
|
s64 to_flush;
|
2009-04-03 21:33:24 +08:00
|
|
|
s64 roffs;
|
|
|
|
s64 got;
|
|
|
|
s64 start_vcn;
|
|
|
|
s64 nextblock;
|
2010-05-25 16:25:31 +08:00
|
|
|
s64 endwrite;
|
2009-08-11 16:02:59 +08:00
|
|
|
u32 compsz;
|
2009-04-03 21:33:24 +08:00
|
|
|
char *inbuf;
|
|
|
|
char *outbuf;
|
|
|
|
BOOL fail;
|
|
|
|
BOOL done;
|
|
|
|
BOOL compress;
|
2010-05-25 16:25:31 +08:00
|
|
|
BOOL appending;
|
2009-04-03 21:33:24 +08:00
|
|
|
|
2010-05-25 16:25:31 +08:00
|
|
|
if (!valid_compressed_run(na,wrl,FALSE,"begin compressed write")) {
|
2010-07-22 20:39:19 +08:00
|
|
|
return (-1);
|
2010-05-25 16:25:31 +08:00
|
|
|
}
|
|
|
|
if ((*update_from < 0)
|
|
|
|
|| (compressed_part < 0)
|
2010-07-22 20:40:24 +08:00
|
|
|
|| (compressed_part > (int)na->compression_block_clusters)) {
|
2010-05-25 16:25:31 +08:00
|
|
|
ntfs_log_error("Bad update vcn or compressed_part %d for compressed write\n",
|
|
|
|
compressed_part);
|
|
|
|
errno = EIO;
|
2010-07-22 20:39:19 +08:00
|
|
|
return (-1);
|
2010-05-25 16:25:31 +08:00
|
|
|
}
|
|
|
|
/* make sure there are two unused entries in runlist */
|
|
|
|
if (na->unused_runs < 2) {
|
|
|
|
ntfs_log_error("No unused runs for compressed write\n");
|
|
|
|
errno = EIO;
|
2010-07-22 20:39:19 +08:00
|
|
|
return (-1);
|
2010-05-25 16:25:31 +08:00
|
|
|
}
|
|
|
|
if (wrl->vcn < *update_from)
|
|
|
|
*update_from = wrl->vcn;
|
2010-07-22 20:39:19 +08:00
|
|
|
written = -1; /* default return */
|
2009-04-03 21:33:24 +08:00
|
|
|
vol = na->ni->vol;
|
|
|
|
compression_length = na->compression_block_clusters;
|
|
|
|
compress = FALSE;
|
|
|
|
done = FALSE;
|
|
|
|
/*
|
|
|
|
* Cannot accept writing beyond the current compression set
|
|
|
|
* because when compression occurs, clusters are freed
|
|
|
|
* and have to be reallocated.
|
|
|
|
* (cannot happen with standard fuse 4K buffers)
|
|
|
|
* Caller has to avoid this situation, or face consequences.
|
|
|
|
*/
|
|
|
|
nextblock = ((offs + (wrl->vcn << vol->cluster_size_bits))
|
|
|
|
| (na->compression_block_size - 1)) + 1;
|
2010-05-25 16:25:31 +08:00
|
|
|
/* determine whether we are appending to file */
|
|
|
|
endwrite = offs + to_write + (wrl->vcn << vol->cluster_size_bits);
|
|
|
|
appending = endwrite >= na->initialized_size;
|
|
|
|
if (endwrite >= nextblock) {
|
2009-04-03 21:33:24 +08:00
|
|
|
/* it is time to compress */
|
|
|
|
compress = TRUE;
|
|
|
|
/* only process what we can */
|
|
|
|
to_write = rounded = nextblock
|
|
|
|
- (offs + (wrl->vcn << vol->cluster_size_bits));
|
|
|
|
}
|
|
|
|
start_vcn = 0;
|
|
|
|
fail = FALSE;
|
|
|
|
brl = wrl;
|
|
|
|
roffs = 0;
|
|
|
|
/*
|
|
|
|
* If we are about to compress or we need to decompress
|
|
|
|
* existing data, we have to process a full set of blocks.
|
|
|
|
* So relocate the parameters to the beginning of allocation
|
|
|
|
* containing the first byte of the set of blocks.
|
|
|
|
*/
|
|
|
|
if (compress || compressed_part) {
|
|
|
|
/* find the beginning of block */
|
|
|
|
start_vcn = (wrl->vcn + (offs >> vol->cluster_size_bits))
|
|
|
|
& -compression_length;
|
2010-05-25 16:25:31 +08:00
|
|
|
if (start_vcn < *update_from)
|
|
|
|
*update_from = start_vcn;
|
2009-04-03 21:33:24 +08:00
|
|
|
while (brl->vcn && (brl->vcn > start_vcn)) {
|
|
|
|
/* jumping back a hole means big trouble */
|
|
|
|
if (brl->lcn == (LCN)LCN_HOLE) {
|
|
|
|
ntfs_log_error("jump back over a hole when appending\n");
|
|
|
|
fail = TRUE;
|
|
|
|
errno = EIO;
|
|
|
|
}
|
|
|
|
brl--;
|
|
|
|
offs += brl->length << vol->cluster_size_bits;
|
|
|
|
}
|
|
|
|
roffs = (start_vcn - brl->vcn) << vol->cluster_size_bits;
|
|
|
|
}
|
|
|
|
if (compressed_part && !fail) {
|
|
|
|
/*
|
|
|
|
* The set of compression blocks contains compressed data
|
|
|
|
* (we are reopening an existing file to append to it)
|
|
|
|
* Decompress the data and append
|
|
|
|
*/
|
2011-07-05 18:17:30 +08:00
|
|
|
compsz = (s32)compressed_part << vol->cluster_size_bits;
|
2009-04-03 21:33:24 +08:00
|
|
|
outbuf = (char*)ntfs_malloc(na->compression_block_size);
|
|
|
|
if (outbuf) {
|
2010-05-25 16:25:31 +08:00
|
|
|
if (appending) {
|
|
|
|
to_read = offs - roffs;
|
|
|
|
to_flush = to_read + to_write;
|
|
|
|
} else {
|
|
|
|
to_read = na->data_size
|
|
|
|
- (brl->vcn << vol->cluster_size_bits);
|
|
|
|
if (to_read > na->compression_block_size)
|
|
|
|
to_read = na->compression_block_size;
|
|
|
|
to_flush = to_read;
|
|
|
|
}
|
2009-04-03 21:33:24 +08:00
|
|
|
if (!ntfs_read_append(na, brl, roffs, compsz,
|
2010-05-25 16:25:31 +08:00
|
|
|
(s32)(offs - roffs), appending,
|
|
|
|
outbuf, to_write, b)) {
|
2009-04-03 21:33:24 +08:00
|
|
|
written = ntfs_flush(na, brl, roffs,
|
2010-05-25 16:25:31 +08:00
|
|
|
outbuf, to_flush, compress, appending,
|
|
|
|
update_from);
|
2009-04-03 21:33:24 +08:00
|
|
|
if (written >= 0) {
|
|
|
|
written = to_write;
|
|
|
|
done = TRUE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
free(outbuf);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (compress && !fail) {
|
|
|
|
/*
|
2010-05-25 16:25:31 +08:00
|
|
|
* we are filling up a block, read the full set
|
|
|
|
* of blocks and compress it
|
|
|
|
*/
|
2009-04-03 21:33:24 +08:00
|
|
|
inbuf = (char*)ntfs_malloc(na->compression_block_size);
|
|
|
|
if (inbuf) {
|
|
|
|
to_read = offs - roffs;
|
|
|
|
if (to_read)
|
|
|
|
got = read_clusters(vol, brl, roffs,
|
|
|
|
to_read, inbuf);
|
|
|
|
else
|
|
|
|
got = 0;
|
|
|
|
if (got == to_read) {
|
|
|
|
memcpy(&inbuf[to_read],b,to_write);
|
|
|
|
written = ntfs_comp_set(na, brl, roffs,
|
|
|
|
to_read + to_write, inbuf);
|
|
|
|
/*
|
|
|
|
* if compression was not successful,
|
|
|
|
* only write the part which was requested
|
|
|
|
*/
|
2009-08-11 16:02:59 +08:00
|
|
|
if ((written >= 0)
|
2009-04-03 21:33:24 +08:00
|
|
|
/* free the unused clusters */
|
|
|
|
&& !ntfs_compress_free(na,brl,
|
|
|
|
written + roffs,
|
|
|
|
na->compression_block_size
|
2010-05-25 16:25:31 +08:00
|
|
|
+ roffs,
|
|
|
|
appending, update_from)) {
|
2009-04-03 21:33:24 +08:00
|
|
|
done = TRUE;
|
|
|
|
written = to_write;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
free(inbuf);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!done) {
|
|
|
|
/*
|
|
|
|
* if the compression block is not full, or
|
|
|
|
* if compression failed for whatever reason,
|
|
|
|
* write uncompressed
|
|
|
|
*/
|
|
|
|
/* check we are not overflowing current allocation */
|
|
|
|
if ((wpos + rounded)
|
|
|
|
> ((wrl->lcn + wrl->length)
|
|
|
|
<< vol->cluster_size_bits)) {
|
|
|
|
ntfs_log_error("writing on unallocated clusters\n");
|
|
|
|
errno = EIO;
|
|
|
|
} else {
|
|
|
|
written = ntfs_pwrite(vol->dev, wpos,
|
|
|
|
rounded, b);
|
|
|
|
if (written == rounded)
|
|
|
|
written = to_write;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-07-22 20:39:19 +08:00
|
|
|
if ((written >= 0)
|
2010-05-25 16:25:31 +08:00
|
|
|
&& !valid_compressed_run(na,wrl,TRUE,"end compressed write"))
|
2010-07-22 20:39:19 +08:00
|
|
|
written = -1;
|
2009-04-03 21:33:24 +08:00
|
|
|
return (written);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Close a file written compressed.
|
|
|
|
* This compresses the last partial compression block of the file.
|
2010-05-25 16:25:31 +08:00
|
|
|
* Two empty runlist slots have to be reserved beforehand.
|
2009-04-03 21:33:24 +08:00
|
|
|
*
|
|
|
|
* Returns zero if closing is successful.
|
|
|
|
*/
|
|
|
|
|
2010-05-25 16:25:31 +08:00
|
|
|
int ntfs_compressed_close(ntfs_attr *na, runlist_element *wrl, s64 offs,
|
|
|
|
VCN *update_from)
|
2009-04-03 21:33:24 +08:00
|
|
|
{
|
|
|
|
ntfs_volume *vol;
|
|
|
|
runlist_element *brl; /* entry containing the beginning of block */
|
|
|
|
int compression_length;
|
|
|
|
s64 written;
|
|
|
|
s64 to_read;
|
|
|
|
s64 roffs;
|
|
|
|
s64 got;
|
|
|
|
s64 start_vcn;
|
|
|
|
char *inbuf;
|
|
|
|
BOOL fail;
|
|
|
|
BOOL done;
|
|
|
|
|
2010-05-25 16:25:31 +08:00
|
|
|
if (na->unused_runs < 2) {
|
|
|
|
ntfs_log_error("No unused runs for compressed close\n");
|
|
|
|
errno = EIO;
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
if (*update_from < 0) {
|
|
|
|
ntfs_log_error("Bad update vcn for compressed close\n");
|
|
|
|
errno = EIO;
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
if (wrl->vcn < *update_from)
|
|
|
|
*update_from = wrl->vcn;
|
2009-04-03 21:33:24 +08:00
|
|
|
vol = na->ni->vol;
|
|
|
|
compression_length = na->compression_block_clusters;
|
|
|
|
done = FALSE;
|
|
|
|
/*
|
|
|
|
* There generally is an uncompressed block at end of file,
|
|
|
|
* read the full block and compress it
|
|
|
|
*/
|
|
|
|
inbuf = (char*)ntfs_malloc(na->compression_block_size);
|
|
|
|
if (inbuf) {
|
|
|
|
start_vcn = (wrl->vcn + (offs >> vol->cluster_size_bits))
|
|
|
|
& -compression_length;
|
2010-05-25 16:25:31 +08:00
|
|
|
if (start_vcn < *update_from)
|
|
|
|
*update_from = start_vcn;
|
|
|
|
to_read = offs + ((wrl->vcn - start_vcn)
|
|
|
|
<< vol->cluster_size_bits);
|
2009-04-03 21:33:24 +08:00
|
|
|
brl = wrl;
|
|
|
|
fail = FALSE;
|
|
|
|
while (brl->vcn && (brl->vcn > start_vcn)) {
|
|
|
|
if (brl->lcn == (LCN)LCN_HOLE) {
|
|
|
|
ntfs_log_error("jump back over a hole when closing\n");
|
|
|
|
fail = TRUE;
|
|
|
|
errno = EIO;
|
|
|
|
}
|
|
|
|
brl--;
|
|
|
|
}
|
|
|
|
if (!fail) {
|
|
|
|
/* roffs can be an offset from another uncomp block */
|
2010-05-25 16:25:31 +08:00
|
|
|
roffs = (start_vcn - brl->vcn)
|
|
|
|
<< vol->cluster_size_bits;
|
2009-04-03 21:33:24 +08:00
|
|
|
if (to_read) {
|
|
|
|
got = read_clusters(vol, brl, roffs, to_read,
|
|
|
|
inbuf);
|
|
|
|
if (got == to_read) {
|
|
|
|
written = ntfs_comp_set(na, brl, roffs,
|
|
|
|
to_read, inbuf);
|
2009-08-11 16:02:59 +08:00
|
|
|
if ((written >= 0)
|
2009-04-03 21:33:24 +08:00
|
|
|
/* free the unused clusters */
|
|
|
|
&& !ntfs_compress_free(na,brl,
|
|
|
|
written + roffs,
|
2010-05-25 16:25:31 +08:00
|
|
|
na->compression_block_size + roffs,
|
|
|
|
TRUE, update_from)) {
|
2009-04-03 21:33:24 +08:00
|
|
|
done = TRUE;
|
|
|
|
} else
|
|
|
|
/* if compression failed, leave uncompressed */
|
2009-08-11 16:02:59 +08:00
|
|
|
if (written == -1)
|
2009-04-03 21:33:24 +08:00
|
|
|
done = TRUE;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
done = TRUE;
|
|
|
|
free(inbuf);
|
|
|
|
}
|
|
|
|
}
|
2010-05-25 16:25:31 +08:00
|
|
|
if (done && !valid_compressed_run(na,wrl,TRUE,"end compressed close"))
|
|
|
|
done = FALSE;
|
2009-04-03 21:33:24 +08:00
|
|
|
return (!done);
|
|
|
|
}
|