liblzma: Port from C99 to C89/90

Remove use of designated initializers and declarations of variables
after statements.  Leave "//" comments as-is for now.
This commit is contained in:
Daniel Pfeifer 2014-07-13 22:21:58 +02:00 committed by Brad King
parent b2a07ca49c
commit 7a92eddbcb
61 changed files with 1081 additions and 701 deletions

View File

@ -16,9 +16,6 @@
extern LZMA_API(lzma_bool) extern LZMA_API(lzma_bool)
lzma_check_is_supported(lzma_check type) lzma_check_is_supported(lzma_check type)
{ {
if ((unsigned int)(type) > LZMA_CHECK_ID_MAX)
return false;
static const lzma_bool available_checks[LZMA_CHECK_ID_MAX + 1] = { static const lzma_bool available_checks[LZMA_CHECK_ID_MAX + 1] = {
true, // LZMA_CHECK_NONE true, // LZMA_CHECK_NONE
@ -56,6 +53,9 @@ lzma_check_is_supported(lzma_check type)
false, // Reserved false, // Reserved
}; };
if ((unsigned int)(type) > LZMA_CHECK_ID_MAX)
return false;
return available_checks[(unsigned int)(type)]; return available_checks[(unsigned int)(type)];
} }
@ -63,9 +63,6 @@ lzma_check_is_supported(lzma_check type)
extern LZMA_API(uint32_t) extern LZMA_API(uint32_t)
lzma_check_size(lzma_check type) lzma_check_size(lzma_check type)
{ {
if ((unsigned int)(type) > LZMA_CHECK_ID_MAX)
return UINT32_MAX;
// See file-format.txt section 2.1.1.2. // See file-format.txt section 2.1.1.2.
static const uint8_t check_sizes[LZMA_CHECK_ID_MAX + 1] = { static const uint8_t check_sizes[LZMA_CHECK_ID_MAX + 1] = {
0, 0,
@ -76,6 +73,9 @@ lzma_check_size(lzma_check type)
64, 64, 64 64, 64, 64
}; };
if ((unsigned int)(type) > LZMA_CHECK_ID_MAX)
return UINT32_MAX;
return check_sizes[(unsigned int)(type)]; return check_sizes[(unsigned int)(type)];
} }

View File

@ -33,6 +33,8 @@ lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc)
#endif #endif
if (size > 8) { if (size > 8) {
const uint8_t * limit;
// Fix the alignment, if needed. The if statement above // Fix the alignment, if needed. The if statement above
// ensures that this won't read past the end of buf[]. // ensures that this won't read past the end of buf[].
while ((uintptr_t)(buf) & 7) { while ((uintptr_t)(buf) & 7) {
@ -41,7 +43,7 @@ lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc)
} }
// Calculate the position where to stop. // Calculate the position where to stop.
const uint8_t *const limit = buf + (size & ~(size_t)(7)); limit = buf + (size & ~(size_t)(7));
// Calculate how many bytes must be calculated separately // Calculate how many bytes must be calculated separately
// before returning the result. // before returning the result.
@ -49,6 +51,8 @@ lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc)
// Calculate the CRC32 using the slice-by-eight algorithm. // Calculate the CRC32 using the slice-by-eight algorithm.
while (buf < limit) { while (buf < limit) {
uint32_t tmp;
crc ^= *(const uint32_t *)(buf); crc ^= *(const uint32_t *)(buf);
buf += 4; buf += 4;
@ -57,7 +61,7 @@ lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc)
^ lzma_crc32_table[5][C(crc)] ^ lzma_crc32_table[5][C(crc)]
^ lzma_crc32_table[4][D(crc)]; ^ lzma_crc32_table[4][D(crc)];
const uint32_t tmp = *(const uint32_t *)(buf); tmp = *(const uint32_t *)(buf);
buf += 4; buf += 4;
// At least with some compilers, it is critical for // At least with some compilers, it is critical for

View File

@ -36,12 +36,14 @@ lzma_crc64(const uint8_t *buf, size_t size, uint64_t crc)
#endif #endif
if (size > 4) { if (size > 4) {
const uint8_t *limit;
while ((uintptr_t)(buf) & 3) { while ((uintptr_t)(buf) & 3) {
crc = lzma_crc64_table[0][*buf++ ^ A1(crc)] ^ S8(crc); crc = lzma_crc64_table[0][*buf++ ^ A1(crc)] ^ S8(crc);
--size; --size;
} }
const uint8_t *const limit = buf + (size & ~(size_t)(3)); limit = buf + (size & ~(size_t)(3));
size &= (size_t)(3); size &= (size_t)(3);
while (buf < limit) { while (buf < limit) {

View File

@ -80,16 +80,21 @@ static const uint32_t SHA256_K[64] = {
static void static void
#ifndef _MSC_VER
transform(uint32_t state[static 8], const uint32_t data[static 16]) transform(uint32_t state[static 8], const uint32_t data[static 16])
#else
transform(uint32_t state[], const uint32_t data[])
#endif
{ {
uint32_t W[16]; uint32_t W[16];
uint32_t T[8]; uint32_t T[8];
unsigned int j;
// Copy state[] to working vars. // Copy state[] to working vars.
memcpy(T, state, sizeof(T)); memcpy(T, state, sizeof(T));
// 64 operations, partially loop unrolled // 64 operations, partially loop unrolled
for (unsigned int j = 0; j < 64; j += 16) { for (j = 0; j < 64; j += 16) {
R( 0); R( 1); R( 2); R( 3); R( 0); R( 1); R( 2); R( 3);
R( 4); R( 5); R( 6); R( 7); R( 4); R( 5); R( 6); R( 7);
R( 8); R( 9); R(10); R(11); R( 8); R( 9); R(10); R(11);
@ -116,8 +121,9 @@ process(lzma_check_state *check)
#else #else
uint32_t data[16]; uint32_t data[16];
size_t i;
for (size_t i = 0; i < 16; ++i) for (i = 0; i < 16; ++i)
data[i] = bswap32(check->buffer.u32[i]); data[i] = bswap32(check->buffer.u32[i]);
transform(check->state.sha256.state, data); transform(check->state.sha256.state, data);
@ -172,6 +178,8 @@ lzma_sha256_update(const uint8_t *buf, size_t size, lzma_check_state *check)
extern void extern void
lzma_sha256_finish(lzma_check_state *check) lzma_sha256_finish(lzma_check_state *check)
{ {
size_t i;
// Add padding as described in RFC 3174 (it describes SHA-1 but // Add padding as described in RFC 3174 (it describes SHA-1 but
// the same padding style is used for SHA-256 too). // the same padding style is used for SHA-256 too).
size_t pos = check->state.sha256.size & 0x3F; size_t pos = check->state.sha256.size & 0x3F;
@ -193,7 +201,7 @@ lzma_sha256_finish(lzma_check_state *check)
process(check); process(check);
for (size_t i = 0; i < 8; ++i) for (i = 0; i < 8; ++i)
check->buffer.u32[i] = conv32be(check->state.sha256.state[i]); check->buffer.u32[i] = conv32be(check->state.sha256.state[i]);
return; return;

View File

@ -126,19 +126,17 @@ alone_decode(lzma_coder *coder,
// Fall through // Fall through
case SEQ_CODER_INIT: { case SEQ_CODER_INIT: {
lzma_ret ret;
lzma_filter_info filters[2] = {
{ 0, &lzma_lzma_decoder_init, &coder->options },
{ 0, NULL, NULL }
};
if (coder->memusage > coder->memlimit) if (coder->memusage > coder->memlimit)
return LZMA_MEMLIMIT_ERROR; return LZMA_MEMLIMIT_ERROR;
lzma_filter_info filters[2] = { ret = lzma_next_filter_init(&coder->next,
{
.init = &lzma_lzma_decoder_init,
.options = &coder->options,
}, {
.init = NULL,
}
};
const lzma_ret ret = lzma_next_filter_init(&coder->next,
allocator, filters); allocator, filters);
if (ret != LZMA_OK) if (ret != LZMA_OK)
return ret; return ret;
@ -229,7 +227,7 @@ lzma_alone_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern LZMA_API(lzma_ret) extern LZMA_API(lzma_ret)
lzma_alone_decoder(lzma_stream *strm, uint64_t memlimit) lzma_alone_decoder(lzma_stream *strm, uint64_t memlimit)
{ {
lzma_next_strm_init(lzma_alone_decoder_init, strm, memlimit, false); lzma_next_strm_init2(lzma_alone_decoder_init, strm, memlimit, false);
strm->internal->supported_actions[LZMA_RUN] = true; strm->internal->supported_actions[LZMA_RUN] = true;
strm->internal->supported_actions[LZMA_FINISH] = true; strm->internal->supported_actions[LZMA_FINISH] = true;

View File

@ -78,6 +78,14 @@ static lzma_ret
alone_encoder_init(lzma_next_coder *next, lzma_allocator *allocator, alone_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
const lzma_options_lzma *options) const lzma_options_lzma *options)
{ {
uint32_t d;
// Initialize the LZMA encoder.
const lzma_filter_info filters[2] = {
{ 0, &lzma_lzma_encoder_init, (void *)(options) },
{ 0, NULL, NULL }
};
lzma_next_coder_init(&alone_encoder_init, next, allocator); lzma_next_coder_init(&alone_encoder_init, next, allocator);
if (next->coder == NULL) { if (next->coder == NULL) {
@ -107,7 +115,7 @@ alone_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
// one is the next unless it is UINT32_MAX. While the header would // one is the next unless it is UINT32_MAX. While the header would
// allow any 32-bit integer, we do this to keep the decoder of liblzma // allow any 32-bit integer, we do this to keep the decoder of liblzma
// accepting the resulting files. // accepting the resulting files.
uint32_t d = options->dict_size - 1; d = options->dict_size - 1;
d |= d >> 2; d |= d >> 2;
d |= d >> 3; d |= d >> 3;
d |= d >> 4; d |= d >> 4;
@ -121,16 +129,6 @@ alone_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
// - Uncompressed size (always unknown and using EOPM) // - Uncompressed size (always unknown and using EOPM)
memset(next->coder->header + 1 + 4, 0xFF, 8); memset(next->coder->header + 1 + 4, 0xFF, 8);
// Initialize the LZMA encoder.
const lzma_filter_info filters[2] = {
{
.init = &lzma_lzma_encoder_init,
.options = (void *)(options),
}, {
.init = NULL,
}
};
return lzma_next_filter_init(&next->coder->next, allocator, filters); return lzma_next_filter_init(&next->coder->next, allocator, filters);
} }
@ -148,7 +146,7 @@ lzma_alone_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern LZMA_API(lzma_ret) extern LZMA_API(lzma_ret)
lzma_alone_encoder(lzma_stream *strm, const lzma_options_lzma *options) lzma_alone_encoder(lzma_stream *strm, const lzma_options_lzma *options)
{ {
lzma_next_strm_init(alone_encoder_init, strm, options); lzma_next_strm_init1(alone_encoder_init, strm, options);
strm->internal->supported_actions[LZMA_RUN] = true; strm->internal->supported_actions[LZMA_RUN] = true;
strm->internal->supported_actions[LZMA_FINISH] = true; strm->internal->supported_actions[LZMA_FINISH] = true;

View File

@ -177,7 +177,7 @@ auto_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern LZMA_API(lzma_ret) extern LZMA_API(lzma_ret)
lzma_auto_decoder(lzma_stream *strm, uint64_t memlimit, uint32_t flags) lzma_auto_decoder(lzma_stream *strm, uint64_t memlimit, uint32_t flags)
{ {
lzma_next_strm_init(auto_decoder_init, strm, memlimit, flags); lzma_next_strm_init2(auto_decoder_init, strm, memlimit, flags);
strm->internal->supported_actions[LZMA_RUN] = true; strm->internal->supported_actions[LZMA_RUN] = true;
strm->internal->supported_actions[LZMA_FINISH] = true; strm->internal->supported_actions[LZMA_FINISH] = true;

View File

@ -18,6 +18,9 @@ lzma_block_buffer_decode(lzma_block *block, lzma_allocator *allocator,
const uint8_t *in, size_t *in_pos, size_t in_size, const uint8_t *in, size_t *in_pos, size_t in_size,
uint8_t *out, size_t *out_pos, size_t out_size) uint8_t *out, size_t *out_pos, size_t out_size)
{ {
lzma_next_coder block_decoder;
lzma_ret ret;
if (in_pos == NULL || (in == NULL && *in_pos != in_size) if (in_pos == NULL || (in == NULL && *in_pos != in_size)
|| *in_pos > in_size || out_pos == NULL || *in_pos > in_size || out_pos == NULL
|| (out == NULL && *out_pos != out_size) || (out == NULL && *out_pos != out_size)
@ -25,9 +28,8 @@ lzma_block_buffer_decode(lzma_block *block, lzma_allocator *allocator,
return LZMA_PROG_ERROR; return LZMA_PROG_ERROR;
// Initialize the Block decoder. // Initialize the Block decoder.
lzma_next_coder block_decoder = LZMA_NEXT_CODER_INIT; block_decoder = LZMA_NEXT_CODER_INIT;
lzma_ret ret = lzma_block_decoder_init( ret = lzma_block_decoder_init(&block_decoder, allocator, block);
&block_decoder, allocator, block);
if (ret == LZMA_OK) { if (ret == LZMA_OK) {
// Save the positions so that we can restore them in case // Save the positions so that we can restore them in case

View File

@ -31,6 +31,8 @@
static lzma_vli static lzma_vli
lzma2_bound(lzma_vli uncompressed_size) lzma2_bound(lzma_vli uncompressed_size)
{ {
lzma_vli overhead;
// Prevent integer overflow in overhead calculation. // Prevent integer overflow in overhead calculation.
if (uncompressed_size > COMPRESSED_SIZE_MAX) if (uncompressed_size > COMPRESSED_SIZE_MAX)
return 0; return 0;
@ -39,7 +41,7 @@ lzma2_bound(lzma_vli uncompressed_size)
// uncompressed_size up to the next multiple of LZMA2_CHUNK_MAX, // uncompressed_size up to the next multiple of LZMA2_CHUNK_MAX,
// multiply by the size of per-chunk header, and add one byte for // multiply by the size of per-chunk header, and add one byte for
// the end marker. // the end marker.
const lzma_vli overhead = ((uncompressed_size + LZMA2_CHUNK_MAX - 1) overhead = ((uncompressed_size + LZMA2_CHUNK_MAX - 1)
/ LZMA2_CHUNK_MAX) / LZMA2_CHUNK_MAX)
* LZMA2_HEADER_UNCOMPRESSED + 1; * LZMA2_HEADER_UNCOMPRESSED + 1;
@ -82,15 +84,17 @@ static lzma_ret
block_encode_uncompressed(lzma_block *block, const uint8_t *in, size_t in_size, block_encode_uncompressed(lzma_block *block, const uint8_t *in, size_t in_size,
uint8_t *out, size_t *out_pos, size_t out_size) uint8_t *out, size_t *out_pos, size_t out_size)
{ {
size_t in_pos = 0;
uint8_t control = 0x01; // Dictionary reset
lzma_filter *filters_orig;
// TODO: Figure out if the last filter is LZMA2 or Subblock and use // TODO: Figure out if the last filter is LZMA2 or Subblock and use
// that filter to encode the uncompressed chunks. // that filter to encode the uncompressed chunks.
// Use LZMA2 uncompressed chunks. We wouldn't need a dictionary at // Use LZMA2 uncompressed chunks. We wouldn't need a dictionary at
// all, but LZMA2 always requires a dictionary, so use the minimum // all, but LZMA2 always requires a dictionary, so use the minimum
// value to minimize memory usage of the decoder. // value to minimize memory usage of the decoder.
lzma_options_lzma lzma2 = { lzma_options_lzma lzma2 = { LZMA_DICT_SIZE_MIN };
.dict_size = LZMA_DICT_SIZE_MIN,
};
lzma_filter filters[2]; lzma_filter filters[2];
filters[0].id = LZMA_FILTER_LZMA2; filters[0].id = LZMA_FILTER_LZMA2;
@ -99,7 +103,7 @@ block_encode_uncompressed(lzma_block *block, const uint8_t *in, size_t in_size,
// Set the above filter options to *block temporarily so that we can // Set the above filter options to *block temporarily so that we can
// encode the Block Header. // encode the Block Header.
lzma_filter *filters_orig = block->filters; filters_orig = block->filters;
block->filters = filters; block->filters = filters;
if (lzma_block_header_size(block) != LZMA_OK) { if (lzma_block_header_size(block) != LZMA_OK) {
@ -128,18 +132,17 @@ block_encode_uncompressed(lzma_block *block, const uint8_t *in, size_t in_size,
*out_pos += block->header_size; *out_pos += block->header_size;
// Encode the data using LZMA2 uncompressed chunks. // Encode the data using LZMA2 uncompressed chunks.
size_t in_pos = 0;
uint8_t control = 0x01; // Dictionary reset
while (in_pos < in_size) { while (in_pos < in_size) {
size_t copy_size;
// Control byte: Indicate uncompressed chunk, of which // Control byte: Indicate uncompressed chunk, of which
// the first resets the dictionary. // the first resets the dictionary.
out[(*out_pos)++] = control; out[(*out_pos)++] = control;
control = 0x02; // No dictionary reset control = 0x02; // No dictionary reset
// Size of the uncompressed chunk // Size of the uncompressed chunk
const size_t copy_size copy_size = my_min(in_size - in_pos, LZMA2_CHUNK_MAX);
= my_min(in_size - in_pos, LZMA2_CHUNK_MAX);
out[(*out_pos)++] = (copy_size - 1) >> 8; out[(*out_pos)++] = (copy_size - 1) >> 8;
out[(*out_pos)++] = (copy_size - 1) & 0xFF; out[(*out_pos)++] = (copy_size - 1) & 0xFF;
@ -164,6 +167,10 @@ block_encode_normal(lzma_block *block, lzma_allocator *allocator,
const uint8_t *in, size_t in_size, const uint8_t *in, size_t in_size,
uint8_t *out, size_t *out_pos, size_t out_size) uint8_t *out, size_t *out_pos, size_t out_size)
{ {
size_t out_start;
lzma_next_coder raw_encoder = LZMA_NEXT_CODER_INIT;
lzma_ret ret;
// Find out the size of the Block Header. // Find out the size of the Block Header.
block->compressed_size = lzma2_bound(in_size); block->compressed_size = lzma2_bound(in_size);
if (block->compressed_size == 0) if (block->compressed_size == 0)
@ -176,7 +183,7 @@ block_encode_normal(lzma_block *block, lzma_allocator *allocator,
if (out_size - *out_pos <= block->header_size) if (out_size - *out_pos <= block->header_size)
return LZMA_BUF_ERROR; return LZMA_BUF_ERROR;
const size_t out_start = *out_pos; out_start = *out_pos;
*out_pos += block->header_size; *out_pos += block->header_size;
// Limit out_size so that we stop encoding if the output would grow // Limit out_size so that we stop encoding if the output would grow
@ -186,8 +193,7 @@ block_encode_normal(lzma_block *block, lzma_allocator *allocator,
// TODO: In many common cases this could be optimized to use // TODO: In many common cases this could be optimized to use
// significantly less memory. // significantly less memory.
lzma_next_coder raw_encoder = LZMA_NEXT_CODER_INIT; ret = lzma_raw_encoder_init(
lzma_ret ret = lzma_raw_encoder_init(
&raw_encoder, allocator, block->filters); &raw_encoder, allocator, block->filters);
if (ret == LZMA_OK) { if (ret == LZMA_OK) {
@ -226,6 +232,10 @@ lzma_block_buffer_encode(lzma_block *block, lzma_allocator *allocator,
const uint8_t *in, size_t in_size, const uint8_t *in, size_t in_size,
uint8_t *out, size_t *out_pos, size_t out_size) uint8_t *out, size_t *out_pos, size_t out_size)
{ {
size_t check_size;
lzma_ret ret;
size_t i;
// Validate the arguments. // Validate the arguments.
if (block == NULL || (in == NULL && in_size != 0) || out == NULL if (block == NULL || (in == NULL && in_size != 0) || out == NULL
|| out_pos == NULL || *out_pos > out_size) || out_pos == NULL || *out_pos > out_size)
@ -249,7 +259,7 @@ lzma_block_buffer_encode(lzma_block *block, lzma_allocator *allocator,
out_size -= (out_size - *out_pos) & 3; out_size -= (out_size - *out_pos) & 3;
// Get the size of the Check field. // Get the size of the Check field.
const size_t check_size = lzma_check_size(block->check); check_size = lzma_check_size(block->check);
assert(check_size != UINT32_MAX); assert(check_size != UINT32_MAX);
// Reserve space for the Check field. // Reserve space for the Check field.
@ -259,7 +269,7 @@ lzma_block_buffer_encode(lzma_block *block, lzma_allocator *allocator,
out_size -= check_size; out_size -= check_size;
// Do the actual compression. // Do the actual compression.
const lzma_ret ret = block_encode_normal(block, allocator, ret = block_encode_normal(block, allocator,
in, in_size, out, out_pos, out_size); in, in_size, out, out_pos, out_size);
if (ret != LZMA_OK) { if (ret != LZMA_OK) {
// If the error was something else than output buffer // If the error was something else than output buffer
@ -281,7 +291,7 @@ lzma_block_buffer_encode(lzma_block *block, lzma_allocator *allocator,
// Block Padding. No buffer overflow here, because we already adjusted // Block Padding. No buffer overflow here, because we already adjusted
// out_size so that (out_size - out_start) is a multiple of four. // out_size so that (out_size - out_start) is a multiple of four.
// Thus, if the buffer is full, the loop body can never run. // Thus, if the buffer is full, the loop body can never run.
for (size_t i = (size_t)(block->compressed_size); i & 3; ++i) { for (i = (size_t)(block->compressed_size); i & 3; ++i) {
assert(*out_pos < out_size); assert(*out_pos < out_size);
out[(*out_pos)++] = 0x00; out[(*out_pos)++] = 0x00;
} }

View File

@ -233,7 +233,7 @@ lzma_block_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern LZMA_API(lzma_ret) extern LZMA_API(lzma_ret)
lzma_block_decoder(lzma_stream *strm, lzma_block *block) lzma_block_decoder(lzma_stream *strm, lzma_block *block)
{ {
lzma_next_strm_init(lzma_block_decoder_init, strm, block); lzma_next_strm_init1(lzma_block_decoder_init, strm, block);
strm->internal->supported_actions[LZMA_RUN] = true; strm->internal->supported_actions[LZMA_RUN] = true;
strm->internal->supported_actions[LZMA_FINISH] = true; strm->internal->supported_actions[LZMA_FINISH] = true;

View File

@ -208,7 +208,7 @@ lzma_block_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern LZMA_API(lzma_ret) extern LZMA_API(lzma_ret)
lzma_block_encoder(lzma_stream *strm, lzma_block *block) lzma_block_encoder(lzma_stream *strm, lzma_block *block)
{ {
lzma_next_strm_init(lzma_block_encoder_init, strm, block); lzma_next_strm_init1(lzma_block_encoder_init, strm, block);
strm->internal->supported_actions[LZMA_RUN] = true; strm->internal->supported_actions[LZMA_RUN] = true;
strm->internal->supported_actions[LZMA_FINISH] = true; strm->internal->supported_actions[LZMA_FINISH] = true;

View File

@ -17,10 +17,12 @@
static void static void
free_properties(lzma_block *block, lzma_allocator *allocator) free_properties(lzma_block *block, lzma_allocator *allocator)
{ {
size_t i;
// Free allocated filter options. The last array member is not // Free allocated filter options. The last array member is not
// touched after the initialization in the beginning of // touched after the initialization in the beginning of
// lzma_block_header_decode(), so we don't need to touch that here. // lzma_block_header_decode(), so we don't need to touch that here.
for (size_t i = 0; i < LZMA_FILTERS_MAX; ++i) { for (i = 0; i < LZMA_FILTERS_MAX; ++i) {
lzma_free(block->filters[i].options, allocator); lzma_free(block->filters[i].options, allocator);
block->filters[i].id = LZMA_VLI_UNKNOWN; block->filters[i].id = LZMA_VLI_UNKNOWN;
block->filters[i].options = NULL; block->filters[i].options = NULL;
@ -34,6 +36,13 @@ extern LZMA_API(lzma_ret)
lzma_block_header_decode(lzma_block *block, lzma_block_header_decode(lzma_block *block,
lzma_allocator *allocator, const uint8_t *in) lzma_allocator *allocator, const uint8_t *in)
{ {
const size_t filter_count = (in[1] & 3) + 1;
size_t in_size;
size_t i;
// Start after the Block Header Size and Block Flags fields.
size_t in_pos = 2;
// NOTE: We consider the header to be corrupt not only when the // NOTE: We consider the header to be corrupt not only when the
// CRC32 doesn't match, but also when variable-length integers // CRC32 doesn't match, but also when variable-length integers
// are invalid or over 63 bits, or if the header is too small // are invalid or over 63 bits, or if the header is too small
@ -41,7 +50,7 @@ lzma_block_header_decode(lzma_block *block,
// Initialize the filter options array. This way the caller can // Initialize the filter options array. This way the caller can
// safely free() the options even if an error occurs in this function. // safely free() the options even if an error occurs in this function.
for (size_t i = 0; i <= LZMA_FILTERS_MAX; ++i) { for (i = 0; i <= LZMA_FILTERS_MAX; ++i) {
block->filters[i].id = LZMA_VLI_UNKNOWN; block->filters[i].id = LZMA_VLI_UNKNOWN;
block->filters[i].options = NULL; block->filters[i].options = NULL;
} }
@ -56,7 +65,7 @@ lzma_block_header_decode(lzma_block *block,
return LZMA_PROG_ERROR; return LZMA_PROG_ERROR;
// Exclude the CRC32 field. // Exclude the CRC32 field.
const size_t in_size = block->header_size - 4; in_size = block->header_size - 4;
// Verify CRC32 // Verify CRC32
if (lzma_crc32(in, in_size, 0) != unaligned_read32le(in + in_size)) if (lzma_crc32(in, in_size, 0) != unaligned_read32le(in + in_size))
@ -66,9 +75,6 @@ lzma_block_header_decode(lzma_block *block,
if (in[1] & 0x3C) if (in[1] & 0x3C)
return LZMA_OPTIONS_ERROR; return LZMA_OPTIONS_ERROR;
// Start after the Block Header Size and Block Flags fields.
size_t in_pos = 2;
// Compressed Size // Compressed Size
if (in[1] & 0x40) { if (in[1] & 0x40) {
return_if_error(lzma_vli_decode(&block->compressed_size, return_if_error(lzma_vli_decode(&block->compressed_size,
@ -90,8 +96,7 @@ lzma_block_header_decode(lzma_block *block,
block->uncompressed_size = LZMA_VLI_UNKNOWN; block->uncompressed_size = LZMA_VLI_UNKNOWN;
// Filter Flags // Filter Flags
const size_t filter_count = (in[1] & 3) + 1; for (i = 0; i < filter_count; ++i) {
for (size_t i = 0; i < filter_count; ++i) {
const lzma_ret ret = lzma_filter_flags_decode( const lzma_ret ret = lzma_filter_flags_decode(
&block->filters[i], allocator, &block->filters[i], allocator,
in, &in_pos, in_size); in, &in_pos, in_size);

View File

@ -17,12 +17,14 @@
extern LZMA_API(lzma_ret) extern LZMA_API(lzma_ret)
lzma_block_header_size(lzma_block *block) lzma_block_header_size(lzma_block *block)
{ {
if (block->version != 0) size_t i;
return LZMA_OPTIONS_ERROR;
// Block Header Size + Block Flags + CRC32. // Block Header Size + Block Flags + CRC32.
uint32_t size = 1 + 1 + 4; uint32_t size = 1 + 1 + 4;
if (block->version != 0)
return LZMA_OPTIONS_ERROR;
// Compressed Size // Compressed Size
if (block->compressed_size != LZMA_VLI_UNKNOWN) { if (block->compressed_size != LZMA_VLI_UNKNOWN) {
const uint32_t add = lzma_vli_size(block->compressed_size); const uint32_t add = lzma_vli_size(block->compressed_size);
@ -45,12 +47,13 @@ lzma_block_header_size(lzma_block *block)
if (block->filters == NULL || block->filters[0].id == LZMA_VLI_UNKNOWN) if (block->filters == NULL || block->filters[0].id == LZMA_VLI_UNKNOWN)
return LZMA_PROG_ERROR; return LZMA_PROG_ERROR;
for (size_t i = 0; block->filters[i].id != LZMA_VLI_UNKNOWN; ++i) { for (i = 0; block->filters[i].id != LZMA_VLI_UNKNOWN; ++i) {
uint32_t add;
// Don't allow too many filters. // Don't allow too many filters.
if (i == LZMA_FILTERS_MAX) if (i == LZMA_FILTERS_MAX)
return LZMA_PROG_ERROR; return LZMA_PROG_ERROR;
uint32_t add;
return_if_error(lzma_filter_flags_size(&add, return_if_error(lzma_filter_flags_size(&add,
block->filters + i)); block->filters + i));
@ -73,20 +76,23 @@ lzma_block_header_size(lzma_block *block)
extern LZMA_API(lzma_ret) extern LZMA_API(lzma_ret)
lzma_block_header_encode(const lzma_block *block, uint8_t *out) lzma_block_header_encode(const lzma_block *block, uint8_t *out)
{ {
size_t out_size;
size_t out_pos = 2;
size_t filter_count = 0;
// Validate everything but filters. // Validate everything but filters.
if (lzma_block_unpadded_size(block) == 0 if (lzma_block_unpadded_size(block) == 0
|| !lzma_vli_is_valid(block->uncompressed_size)) || !lzma_vli_is_valid(block->uncompressed_size))
return LZMA_PROG_ERROR; return LZMA_PROG_ERROR;
// Indicate the size of the buffer _excluding_ the CRC32 field. // Indicate the size of the buffer _excluding_ the CRC32 field.
const size_t out_size = block->header_size - 4; out_size = block->header_size - 4;
// Store the Block Header Size. // Store the Block Header Size.
out[0] = out_size / 4; out[0] = out_size / 4;
// We write Block Flags in pieces. // We write Block Flags in pieces.
out[1] = 0x00; out[1] = 0x00;
size_t out_pos = 2;
// Compressed Size // Compressed Size
if (block->compressed_size != LZMA_VLI_UNKNOWN) { if (block->compressed_size != LZMA_VLI_UNKNOWN) {
@ -108,7 +114,6 @@ lzma_block_header_encode(const lzma_block *block, uint8_t *out)
if (block->filters == NULL || block->filters[0].id == LZMA_VLI_UNKNOWN) if (block->filters == NULL || block->filters[0].id == LZMA_VLI_UNKNOWN)
return LZMA_PROG_ERROR; return LZMA_PROG_ERROR;
size_t filter_count = 0;
do { do {
// There can be a maximum of four filters. // There can be a maximum of four filters.
if (filter_count == LZMA_FILTERS_MAX) if (filter_count == LZMA_FILTERS_MAX)

View File

@ -17,11 +17,14 @@
extern LZMA_API(lzma_ret) extern LZMA_API(lzma_ret)
lzma_block_compressed_size(lzma_block *block, lzma_vli unpadded_size) lzma_block_compressed_size(lzma_block *block, lzma_vli unpadded_size)
{ {
uint32_t container_size;
lzma_vli compressed_size;
// Validate everything but Uncompressed Size and filters. // Validate everything but Uncompressed Size and filters.
if (lzma_block_unpadded_size(block) == 0) if (lzma_block_unpadded_size(block) == 0)
return LZMA_PROG_ERROR; return LZMA_PROG_ERROR;
const uint32_t container_size = block->header_size container_size = block->header_size
+ lzma_check_size(block->check); + lzma_check_size(block->check);
// Validate that Compressed Size will be greater than zero. // Validate that Compressed Size will be greater than zero.
@ -31,7 +34,7 @@ lzma_block_compressed_size(lzma_block *block, lzma_vli unpadded_size)
// Calculate what Compressed Size is supposed to be. // Calculate what Compressed Size is supposed to be.
// If Compressed Size was present in Block Header, // If Compressed Size was present in Block Header,
// compare that the new value matches it. // compare that the new value matches it.
const lzma_vli compressed_size = unpadded_size - container_size; compressed_size = unpadded_size - container_size;
if (block->compressed_size != LZMA_VLI_UNKNOWN if (block->compressed_size != LZMA_VLI_UNKNOWN
&& block->compressed_size != compressed_size) && block->compressed_size != compressed_size)
return LZMA_DATA_ERROR; return LZMA_DATA_ERROR;
@ -45,6 +48,8 @@ lzma_block_compressed_size(lzma_block *block, lzma_vli unpadded_size)
extern LZMA_API(lzma_vli) extern LZMA_API(lzma_vli)
lzma_block_unpadded_size(const lzma_block *block) lzma_block_unpadded_size(const lzma_block *block)
{ {
lzma_vli unpadded_size;
// Validate the values that we are interested in i.e. all but // Validate the values that we are interested in i.e. all but
// Uncompressed Size and the filters. // Uncompressed Size and the filters.
// //
@ -66,7 +71,7 @@ lzma_block_unpadded_size(const lzma_block *block)
return LZMA_VLI_UNKNOWN; return LZMA_VLI_UNKNOWN;
// Calculate Unpadded Size and validate it. // Calculate Unpadded Size and validate it.
const lzma_vli unpadded_size = block->compressed_size unpadded_size = block->compressed_size
+ block->header_size + block->header_size
+ lzma_check_size(block->check); + lzma_check_size(block->check);

View File

@ -38,12 +38,12 @@ lzma_version_string(void)
extern void * lzma_attribute((__malloc__)) lzma_attr_alloc_size(1) extern void * lzma_attribute((__malloc__)) lzma_attr_alloc_size(1)
lzma_alloc(size_t size, lzma_allocator *allocator) lzma_alloc(size_t size, lzma_allocator *allocator)
{ {
void *ptr;
// Some malloc() variants return NULL if called with size == 0. // Some malloc() variants return NULL if called with size == 0.
if (size == 0) if (size == 0)
size = 1; size = 1;
void *ptr;
if (allocator != NULL && allocator->alloc != NULL) if (allocator != NULL && allocator->alloc != NULL)
ptr = allocator->alloc(allocator->opaque, 1, size); ptr = allocator->alloc(allocator->opaque, 1, size);
else else
@ -173,6 +173,10 @@ lzma_strm_init(lzma_stream *strm)
extern LZMA_API(lzma_ret) extern LZMA_API(lzma_ret)
lzma_code(lzma_stream *strm, lzma_action action) lzma_code(lzma_stream *strm, lzma_action action)
{ {
size_t in_pos = 0;
size_t out_pos = 0;
lzma_ret ret;
// Sanity checks // Sanity checks
if ((strm->next_in == NULL && strm->avail_in != 0) if ((strm->next_in == NULL && strm->avail_in != 0)
|| (strm->next_out == NULL && strm->avail_out != 0) || (strm->next_out == NULL && strm->avail_out != 0)
@ -248,9 +252,7 @@ lzma_code(lzma_stream *strm, lzma_action action)
return LZMA_PROG_ERROR; return LZMA_PROG_ERROR;
} }
size_t in_pos = 0; ret = strm->internal->next.code(
size_t out_pos = 0;
lzma_ret ret = strm->internal->next.code(
strm->internal->next.coder, strm->allocator, strm->internal->next.coder, strm->allocator,
strm->next_in, &in_pos, strm->avail_in, strm->next_in, &in_pos, strm->avail_in,
strm->next_out, &out_pos, strm->avail_out, action); strm->next_out, &out_pos, strm->avail_out, action);

View File

@ -155,18 +155,18 @@ struct lzma_next_coder_s {
}; };
/// Macro to initialize lzma_next_coder structure /// Constant to initialize lzma_next_coder structure
#define LZMA_NEXT_CODER_INIT \ static const lzma_next_coder LZMA_NEXT_CODER_INIT =
(lzma_next_coder){ \ {
.coder = NULL, \ NULL,
.init = (uintptr_t)(NULL), \ LZMA_VLI_UNKNOWN,
.id = LZMA_VLI_UNKNOWN, \ (uintptr_t)(NULL),
.code = NULL, \ NULL,
.end = NULL, \ NULL,
.get_check = NULL, \ NULL,
.memconfig = NULL, \ NULL,
.update = NULL, \ NULL,
} };
/// Internal data for lzma_strm_init, lzma_code, and lzma_end. A pointer to /// Internal data for lzma_strm_init, lzma_code, and lzma_end. A pointer to
@ -211,7 +211,7 @@ extern void lzma_free(void *ptr, lzma_allocator *allocator);
/// Allocates strm->internal if it is NULL, and initializes *strm and /// Allocates strm->internal if it is NULL, and initializes *strm and
/// strm->internal. This function is only called via lzma_next_strm_init macro. /// strm->internal. This function is only called via lzma_next_strm_init2 macro.
extern lzma_ret lzma_strm_init(lzma_stream *strm); extern lzma_ret lzma_strm_init(lzma_stream *strm);
/// Initializes the next filter in the chain, if any. This takes care of /// Initializes the next filter in the chain, if any. This takes care of
@ -269,15 +269,37 @@ do { \
/// (The function being called will use lzma_next_coder_init()). If /// (The function being called will use lzma_next_coder_init()). If
/// initialization fails, memory that wasn't freed by func() is freed /// initialization fails, memory that wasn't freed by func() is freed
/// along strm->internal. /// along strm->internal.
#define lzma_next_strm_init(func, strm, ...) \ #define lzma_next_strm_init1(func, strm, arg1) \
do { \ do { \
return_if_error(lzma_strm_init(strm)); \ lzma_ret ret_; \
const lzma_ret ret_ = func(&(strm)->internal->next, \ return_if_error(lzma_strm_init(strm)); \
(strm)->allocator, __VA_ARGS__); \ ret_ = func(&(strm)->internal->next, (strm)->allocator, arg1); \
if (ret_ != LZMA_OK) { \ if (ret_ != LZMA_OK) { \
lzma_end(strm); \ lzma_end(strm); \
return ret_; \ return ret_; \
} \ } \
} while (0)
#define lzma_next_strm_init2(func, strm, arg1, arg2) \
do { \
lzma_ret ret_; \
return_if_error(lzma_strm_init(strm)); \
ret_ = func(&(strm)->internal->next, (strm)->allocator, arg1, arg2); \
if (ret_ != LZMA_OK) { \
lzma_end(strm); \
return ret_; \
} \
} while (0)
#define lzma_next_strm_init3(func, strm, arg1, arg2, arg3) \
do { \
lzma_ret ret_; \
return_if_error(lzma_strm_init(strm)); \
ret_ = func(&(strm)->internal->next, (strm)->allocator, arg1, arg2, arg3); \
if (ret_ != LZMA_OK) { \
lzma_end(strm); \
return ret_; \
} \
} while (0) } while (0)
#endif #endif

View File

@ -18,22 +18,26 @@ lzma_raw_buffer_decode(const lzma_filter *filters, lzma_allocator *allocator,
const uint8_t *in, size_t *in_pos, size_t in_size, const uint8_t *in, size_t *in_pos, size_t in_size,
uint8_t *out, size_t *out_pos, size_t out_size) uint8_t *out, size_t *out_pos, size_t out_size)
{ {
lzma_next_coder next = LZMA_NEXT_CODER_INIT;
size_t in_start;
size_t out_start;
lzma_ret ret;
// Validate what isn't validated later in filter_common.c. // Validate what isn't validated later in filter_common.c.
if (in == NULL || in_pos == NULL || *in_pos > in_size || out == NULL if (in == NULL || in_pos == NULL || *in_pos > in_size || out == NULL
|| out_pos == NULL || *out_pos > out_size) || out_pos == NULL || *out_pos > out_size)
return LZMA_PROG_ERROR; return LZMA_PROG_ERROR;
// Initialize the decoer. // Initialize the decoer.
lzma_next_coder next = LZMA_NEXT_CODER_INIT;
return_if_error(lzma_raw_decoder_init(&next, allocator, filters)); return_if_error(lzma_raw_decoder_init(&next, allocator, filters));
// Store the positions so that we can restore them if something // Store the positions so that we can restore them if something
// goes wrong. // goes wrong.
const size_t in_start = *in_pos; in_start = *in_pos;
const size_t out_start = *out_pos; out_start = *out_pos;
// Do the actual decoding and free decoder's memory. // Do the actual decoding and free decoder's memory.
lzma_ret ret = next.code(next.coder, allocator, in, in_pos, in_size, ret = next.code(next.coder, allocator, in, in_pos, in_size,
out, out_pos, out_size, LZMA_FINISH); out, out_pos, out_size, LZMA_FINISH);
if (ret == LZMA_STREAM_END) { if (ret == LZMA_STREAM_END) {

View File

@ -18,22 +18,25 @@ lzma_raw_buffer_encode(const lzma_filter *filters, lzma_allocator *allocator,
const uint8_t *in, size_t in_size, uint8_t *out, const uint8_t *in, size_t in_size, uint8_t *out,
size_t *out_pos, size_t out_size) size_t *out_pos, size_t out_size)
{ {
lzma_next_coder next = LZMA_NEXT_CODER_INIT;
size_t out_start;
size_t in_pos = 0;
lzma_ret ret;
// Validate what isn't validated later in filter_common.c. // Validate what isn't validated later in filter_common.c.
if ((in == NULL && in_size != 0) || out == NULL if ((in == NULL && in_size != 0) || out == NULL
|| out_pos == NULL || *out_pos > out_size) || out_pos == NULL || *out_pos > out_size)
return LZMA_PROG_ERROR; return LZMA_PROG_ERROR;
// Initialize the encoder // Initialize the encoder
lzma_next_coder next = LZMA_NEXT_CODER_INIT;
return_if_error(lzma_raw_encoder_init(&next, allocator, filters)); return_if_error(lzma_raw_encoder_init(&next, allocator, filters));
// Store the output position so that we can restore it if // Store the output position so that we can restore it if
// something goes wrong. // something goes wrong.
const size_t out_start = *out_pos; out_start = *out_pos;
// Do the actual encoding and free coder's memory. // Do the actual encoding and free coder's memory.
size_t in_pos = 0; ret = next.code(next.coder, allocator, in, &in_pos, in_size,
lzma_ret ret = next.code(next.coder, allocator, in, &in_pos, in_size,
out, out_pos, out_size, LZMA_FINISH); out, out_pos, out_size, LZMA_FINISH);
lzma_next_end(&next, allocator); lzma_next_end(&next, allocator);

View File

@ -36,87 +36,87 @@ static const struct {
} features[] = { } features[] = {
#if defined (HAVE_ENCODER_LZMA1) || defined(HAVE_DECODER_LZMA1) #if defined (HAVE_ENCODER_LZMA1) || defined(HAVE_DECODER_LZMA1)
{ {
.id = LZMA_FILTER_LZMA1, LZMA_FILTER_LZMA1,
.options_size = sizeof(lzma_options_lzma), sizeof(lzma_options_lzma),
.non_last_ok = false, false,
.last_ok = true, true,
.changes_size = true, true,
}, },
#endif #endif
#if defined(HAVE_ENCODER_LZMA2) || defined(HAVE_DECODER_LZMA2) #if defined(HAVE_ENCODER_LZMA2) || defined(HAVE_DECODER_LZMA2)
{ {
.id = LZMA_FILTER_LZMA2, LZMA_FILTER_LZMA2,
.options_size = sizeof(lzma_options_lzma), sizeof(lzma_options_lzma),
.non_last_ok = false, false,
.last_ok = true, true,
.changes_size = true, true,
}, },
#endif #endif
#if defined(HAVE_ENCODER_X86) || defined(HAVE_DECODER_X86) #if defined(HAVE_ENCODER_X86) || defined(HAVE_DECODER_X86)
{ {
.id = LZMA_FILTER_X86, LZMA_FILTER_X86,
.options_size = sizeof(lzma_options_bcj), sizeof(lzma_options_bcj),
.non_last_ok = true, true,
.last_ok = false, false,
.changes_size = false, false,
}, },
#endif #endif
#if defined(HAVE_ENCODER_POWERPC) || defined(HAVE_DECODER_POWERPC) #if defined(HAVE_ENCODER_POWERPC) || defined(HAVE_DECODER_POWERPC)
{ {
.id = LZMA_FILTER_POWERPC, LZMA_FILTER_POWERPC,
.options_size = sizeof(lzma_options_bcj), sizeof(lzma_options_bcj),
.non_last_ok = true, true,
.last_ok = false, false,
.changes_size = false, false,
}, },
#endif #endif
#if defined(HAVE_ENCODER_IA64) || defined(HAVE_DECODER_IA64) #if defined(HAVE_ENCODER_IA64) || defined(HAVE_DECODER_IA64)
{ {
.id = LZMA_FILTER_IA64, LZMA_FILTER_IA64,
.options_size = sizeof(lzma_options_bcj), sizeof(lzma_options_bcj),
.non_last_ok = true, true,
.last_ok = false, false,
.changes_size = false, false,
}, },
#endif #endif
#if defined(HAVE_ENCODER_ARM) || defined(HAVE_DECODER_ARM) #if defined(HAVE_ENCODER_ARM) || defined(HAVE_DECODER_ARM)
{ {
.id = LZMA_FILTER_ARM, LZMA_FILTER_ARM,
.options_size = sizeof(lzma_options_bcj), sizeof(lzma_options_bcj),
.non_last_ok = true, true,
.last_ok = false, false,
.changes_size = false, false,
}, },
#endif #endif
#if defined(HAVE_ENCODER_ARMTHUMB) || defined(HAVE_DECODER_ARMTHUMB) #if defined(HAVE_ENCODER_ARMTHUMB) || defined(HAVE_DECODER_ARMTHUMB)
{ {
.id = LZMA_FILTER_ARMTHUMB, LZMA_FILTER_ARMTHUMB,
.options_size = sizeof(lzma_options_bcj), sizeof(lzma_options_bcj),
.non_last_ok = true, true,
.last_ok = false, false,
.changes_size = false, false,
}, },
#endif #endif
#if defined(HAVE_ENCODER_SPARC) || defined(HAVE_DECODER_SPARC) #if defined(HAVE_ENCODER_SPARC) || defined(HAVE_DECODER_SPARC)
{ {
.id = LZMA_FILTER_SPARC, LZMA_FILTER_SPARC,
.options_size = sizeof(lzma_options_bcj), sizeof(lzma_options_bcj),
.non_last_ok = true, true,
.last_ok = false, false,
.changes_size = false, false,
}, },
#endif #endif
#if defined(HAVE_ENCODER_DELTA) || defined(HAVE_DECODER_DELTA) #if defined(HAVE_ENCODER_DELTA) || defined(HAVE_DECODER_DELTA)
{ {
.id = LZMA_FILTER_DELTA, LZMA_FILTER_DELTA,
.options_size = sizeof(lzma_options_delta), sizeof(lzma_options_delta),
.non_last_ok = true, true,
.last_ok = false, false,
.changes_size = false, false,
}, },
#endif #endif
{ {
.id = LZMA_VLI_UNKNOWN LZMA_VLI_UNKNOWN
} }
}; };
@ -125,11 +125,12 @@ extern LZMA_API(lzma_ret)
lzma_filters_copy(const lzma_filter *src, lzma_filter *dest, lzma_filters_copy(const lzma_filter *src, lzma_filter *dest,
lzma_allocator *allocator) lzma_allocator *allocator)
{ {
size_t i;
lzma_ret ret;
if (src == NULL || dest == NULL) if (src == NULL || dest == NULL)
return LZMA_PROG_ERROR; return LZMA_PROG_ERROR;
lzma_ret ret;
size_t i;
for (i = 0; src[i].id != LZMA_VLI_UNKNOWN; ++i) { for (i = 0; src[i].id != LZMA_VLI_UNKNOWN; ++i) {
// There must be a maximum of four filters plus // There must be a maximum of four filters plus
// the array terminator. // the array terminator.
@ -193,10 +194,6 @@ error:
static lzma_ret static lzma_ret
validate_chain(const lzma_filter *filters, size_t *count) validate_chain(const lzma_filter *filters, size_t *count)
{ {
// There must be at least one filter.
if (filters == NULL || filters[0].id == LZMA_VLI_UNKNOWN)
return LZMA_PROG_ERROR;
// Number of non-last filters that may change the size of the data // Number of non-last filters that may change the size of the data
// significantly (that is, more than 1-2 % or so). // significantly (that is, more than 1-2 % or so).
size_t changes_size_count = 0; size_t changes_size_count = 0;
@ -210,6 +207,11 @@ validate_chain(const lzma_filter *filters, size_t *count)
bool last_ok = false; bool last_ok = false;
size_t i = 0; size_t i = 0;
// There must be at least one filter.
if (filters == NULL || filters[0].id == LZMA_VLI_UNKNOWN)
return LZMA_PROG_ERROR;
do { do {
size_t j; size_t j;
for (j = 0; filters[i].id != features[j].id; ++j) for (j = 0; filters[i].id != features[j].id; ++j)
@ -243,14 +245,17 @@ lzma_raw_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
const lzma_filter *options, const lzma_filter *options,
lzma_filter_find coder_find, bool is_encoder) lzma_filter_find coder_find, bool is_encoder)
{ {
// Do some basic validation and get the number of filters. lzma_filter_info filters[LZMA_FILTERS_MAX + 1];
size_t count; size_t count;
size_t i;
lzma_ret ret;
// Do some basic validation and get the number of filters.
return_if_error(validate_chain(options, &count)); return_if_error(validate_chain(options, &count));
// Set the filter functions and copy the options pointer. // Set the filter functions and copy the options pointer.
lzma_filter_info filters[LZMA_FILTERS_MAX + 1];
if (is_encoder) { if (is_encoder) {
for (size_t i = 0; i < count; ++i) { for (i = 0; i < count; ++i) {
// The order of the filters is reversed in the // The order of the filters is reversed in the
// encoder. It allows more efficient handling // encoder. It allows more efficient handling
// of the uncompressed data. // of the uncompressed data.
@ -266,7 +271,7 @@ lzma_raw_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
filters[j].options = options[i].options; filters[j].options = options[i].options;
} }
} else { } else {
for (size_t i = 0; i < count; ++i) { for (i = 0; i < count; ++i) {
const lzma_filter_coder *const fc const lzma_filter_coder *const fc
= coder_find(options[i].id); = coder_find(options[i].id);
if (fc == NULL || fc->init == NULL) if (fc == NULL || fc->init == NULL)
@ -283,7 +288,7 @@ lzma_raw_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
filters[count].init = NULL; filters[count].init = NULL;
// Initialize the filters. // Initialize the filters.
const lzma_ret ret = lzma_next_filter_init(next, allocator, filters); ret = lzma_next_filter_init(next, allocator, filters);
if (ret != LZMA_OK) if (ret != LZMA_OK)
lzma_next_end(next, allocator); lzma_next_end(next, allocator);
@ -295,6 +300,9 @@ extern uint64_t
lzma_raw_coder_memusage(lzma_filter_find coder_find, lzma_raw_coder_memusage(lzma_filter_find coder_find,
const lzma_filter *filters) const lzma_filter *filters)
{ {
uint64_t total = 0;
size_t i = 0;
// The chain has to have at least one filter. // The chain has to have at least one filter.
{ {
size_t tmp; size_t tmp;
@ -302,9 +310,6 @@ lzma_raw_coder_memusage(lzma_filter_find coder_find,
return UINT64_MAX; return UINT64_MAX;
} }
uint64_t total = 0;
size_t i = 0;
do { do {
const lzma_filter_coder *const fc const lzma_filter_coder *const fc
= coder_find(filters[i].id); = coder_find(filters[i].id);

View File

@ -44,74 +44,74 @@ typedef struct {
static const lzma_filter_decoder decoders[] = { static const lzma_filter_decoder decoders[] = {
#ifdef HAVE_DECODER_LZMA1 #ifdef HAVE_DECODER_LZMA1
{ {
.id = LZMA_FILTER_LZMA1, LZMA_FILTER_LZMA1,
.init = &lzma_lzma_decoder_init, &lzma_lzma_decoder_init,
.memusage = &lzma_lzma_decoder_memusage, &lzma_lzma_decoder_memusage,
.props_decode = &lzma_lzma_props_decode, &lzma_lzma_props_decode,
}, },
#endif #endif
#ifdef HAVE_DECODER_LZMA2 #ifdef HAVE_DECODER_LZMA2
{ {
.id = LZMA_FILTER_LZMA2, LZMA_FILTER_LZMA2,
.init = &lzma_lzma2_decoder_init, &lzma_lzma2_decoder_init,
.memusage = &lzma_lzma2_decoder_memusage, &lzma_lzma2_decoder_memusage,
.props_decode = &lzma_lzma2_props_decode, &lzma_lzma2_props_decode,
}, },
#endif #endif
#ifdef HAVE_DECODER_X86 #ifdef HAVE_DECODER_X86
{ {
.id = LZMA_FILTER_X86, LZMA_FILTER_X86,
.init = &lzma_simple_x86_decoder_init, &lzma_simple_x86_decoder_init,
.memusage = NULL, NULL,
.props_decode = &lzma_simple_props_decode, &lzma_simple_props_decode,
}, },
#endif #endif
#ifdef HAVE_DECODER_POWERPC #ifdef HAVE_DECODER_POWERPC
{ {
.id = LZMA_FILTER_POWERPC, LZMA_FILTER_POWERPC,
.init = &lzma_simple_powerpc_decoder_init, &lzma_simple_powerpc_decoder_init,
.memusage = NULL, NULL,
.props_decode = &lzma_simple_props_decode, &lzma_simple_props_decode,
}, },
#endif #endif
#ifdef HAVE_DECODER_IA64 #ifdef HAVE_DECODER_IA64
{ {
.id = LZMA_FILTER_IA64, LZMA_FILTER_IA64,
.init = &lzma_simple_ia64_decoder_init, &lzma_simple_ia64_decoder_init,
.memusage = NULL, NULL,
.props_decode = &lzma_simple_props_decode, &lzma_simple_props_decode,
}, },
#endif #endif
#ifdef HAVE_DECODER_ARM #ifdef HAVE_DECODER_ARM
{ {
.id = LZMA_FILTER_ARM, LZMA_FILTER_ARM,
.init = &lzma_simple_arm_decoder_init, &lzma_simple_arm_decoder_init,
.memusage = NULL, NULL,
.props_decode = &lzma_simple_props_decode, &lzma_simple_props_decode,
}, },
#endif #endif
#ifdef HAVE_DECODER_ARMTHUMB #ifdef HAVE_DECODER_ARMTHUMB
{ {
.id = LZMA_FILTER_ARMTHUMB, LZMA_FILTER_ARMTHUMB,
.init = &lzma_simple_armthumb_decoder_init, &lzma_simple_armthumb_decoder_init,
.memusage = NULL, NULL,
.props_decode = &lzma_simple_props_decode, &lzma_simple_props_decode,
}, },
#endif #endif
#ifdef HAVE_DECODER_SPARC #ifdef HAVE_DECODER_SPARC
{ {
.id = LZMA_FILTER_SPARC, LZMA_FILTER_SPARC,
.init = &lzma_simple_sparc_decoder_init, &lzma_simple_sparc_decoder_init,
.memusage = NULL, NULL,
.props_decode = &lzma_simple_props_decode, &lzma_simple_props_decode,
}, },
#endif #endif
#ifdef HAVE_DECODER_DELTA #ifdef HAVE_DECODER_DELTA
{ {
.id = LZMA_FILTER_DELTA, LZMA_FILTER_DELTA,
.init = &lzma_delta_decoder_init, &lzma_delta_decoder_init,
.memusage = &lzma_delta_coder_memusage, &lzma_delta_coder_memusage,
.props_decode = &lzma_delta_props_decode, &lzma_delta_props_decode,
}, },
#endif #endif
}; };
@ -120,7 +120,8 @@ static const lzma_filter_decoder decoders[] = {
static const lzma_filter_decoder * static const lzma_filter_decoder *
decoder_find(lzma_vli id) decoder_find(lzma_vli id)
{ {
for (size_t i = 0; i < ARRAY_SIZE(decoders); ++i) size_t i;
for (i = 0; i < ARRAY_SIZE(decoders); ++i)
if (decoders[i].id == id) if (decoders[i].id == id)
return decoders + i; return decoders + i;
@ -147,7 +148,7 @@ lzma_raw_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern LZMA_API(lzma_ret) extern LZMA_API(lzma_ret)
lzma_raw_decoder(lzma_stream *strm, const lzma_filter *options) lzma_raw_decoder(lzma_stream *strm, const lzma_filter *options)
{ {
lzma_next_strm_init(lzma_raw_decoder_init, strm, options); lzma_next_strm_init1(lzma_raw_decoder_init, strm, options);
strm->internal->supported_actions[LZMA_RUN] = true; strm->internal->supported_actions[LZMA_RUN] = true;
strm->internal->supported_actions[LZMA_FINISH] = true; strm->internal->supported_actions[LZMA_FINISH] = true;
@ -168,10 +169,11 @@ extern LZMA_API(lzma_ret)
lzma_properties_decode(lzma_filter *filter, lzma_allocator *allocator, lzma_properties_decode(lzma_filter *filter, lzma_allocator *allocator,
const uint8_t *props, size_t props_size) const uint8_t *props, size_t props_size)
{ {
const lzma_filter_decoder *const fd = decoder_find(filter->id);
// Make it always NULL so that the caller can always safely free() it. // Make it always NULL so that the caller can always safely free() it.
filter->options = NULL; filter->options = NULL;
const lzma_filter_decoder *const fd = decoder_find(filter->id);
if (fd == NULL) if (fd == NULL)
return LZMA_OPTIONS_ERROR; return LZMA_OPTIONS_ERROR;

View File

@ -56,95 +56,101 @@ typedef struct {
static const lzma_filter_encoder encoders[] = { static const lzma_filter_encoder encoders[] = {
#ifdef HAVE_ENCODER_LZMA1 #ifdef HAVE_ENCODER_LZMA1
{ {
.id = LZMA_FILTER_LZMA1, LZMA_FILTER_LZMA1,
.init = &lzma_lzma_encoder_init, &lzma_lzma_encoder_init,
.memusage = &lzma_lzma_encoder_memusage, &lzma_lzma_encoder_memusage,
.chunk_size = NULL, // FIXME NULL, // FIXME
.props_size_get = NULL, NULL,
.props_size_fixed = 5, 5,
.props_encode = &lzma_lzma_props_encode, &lzma_lzma_props_encode,
}, },
#endif #endif
#ifdef HAVE_ENCODER_LZMA2 #ifdef HAVE_ENCODER_LZMA2
{ {
.id = LZMA_FILTER_LZMA2, LZMA_FILTER_LZMA2,
.init = &lzma_lzma2_encoder_init, &lzma_lzma2_encoder_init,
.memusage = &lzma_lzma2_encoder_memusage, &lzma_lzma2_encoder_memusage,
.chunk_size = NULL, // FIXME NULL, // FIXME
.props_size_get = NULL, NULL,
.props_size_fixed = 1, 1,
.props_encode = &lzma_lzma2_props_encode, &lzma_lzma2_props_encode,
}, },
#endif #endif
#ifdef HAVE_ENCODER_X86 #ifdef HAVE_ENCODER_X86
{ {
.id = LZMA_FILTER_X86, LZMA_FILTER_X86,
.init = &lzma_simple_x86_encoder_init, &lzma_simple_x86_encoder_init,
.memusage = NULL, NULL,
.chunk_size = NULL, NULL,
.props_size_get = &lzma_simple_props_size, &lzma_simple_props_size,
.props_encode = &lzma_simple_props_encode, 0,
&lzma_simple_props_encode,
}, },
#endif #endif
#ifdef HAVE_ENCODER_POWERPC #ifdef HAVE_ENCODER_POWERPC
{ {
.id = LZMA_FILTER_POWERPC, LZMA_FILTER_POWERPC,
.init = &lzma_simple_powerpc_encoder_init, &lzma_simple_powerpc_encoder_init,
.memusage = NULL, NULL,
.chunk_size = NULL, NULL,
.props_size_get = &lzma_simple_props_size, &lzma_simple_props_size,
.props_encode = &lzma_simple_props_encode, 0,
&lzma_simple_props_encode,
}, },
#endif #endif
#ifdef HAVE_ENCODER_IA64 #ifdef HAVE_ENCODER_IA64
{ {
.id = LZMA_FILTER_IA64, LZMA_FILTER_IA64,
.init = &lzma_simple_ia64_encoder_init, &lzma_simple_ia64_encoder_init,
.memusage = NULL, NULL,
.chunk_size = NULL, NULL,
.props_size_get = &lzma_simple_props_size, &lzma_simple_props_size,
.props_encode = &lzma_simple_props_encode, 0,
&lzma_simple_props_encode,
}, },
#endif #endif
#ifdef HAVE_ENCODER_ARM #ifdef HAVE_ENCODER_ARM
{ {
.id = LZMA_FILTER_ARM, LZMA_FILTER_ARM,
.init = &lzma_simple_arm_encoder_init, &lzma_simple_arm_encoder_init,
.memusage = NULL, NULL,
.chunk_size = NULL, NULL,
.props_size_get = &lzma_simple_props_size, &lzma_simple_props_size,
.props_encode = &lzma_simple_props_encode, 0,
&lzma_simple_props_encode,
}, },
#endif #endif
#ifdef HAVE_ENCODER_ARMTHUMB #ifdef HAVE_ENCODER_ARMTHUMB
{ {
.id = LZMA_FILTER_ARMTHUMB, LZMA_FILTER_ARMTHUMB,
.init = &lzma_simple_armthumb_encoder_init, &lzma_simple_armthumb_encoder_init,
.memusage = NULL, NULL,
.chunk_size = NULL, NULL,
.props_size_get = &lzma_simple_props_size, &lzma_simple_props_size,
.props_encode = &lzma_simple_props_encode, 0,
&lzma_simple_props_encode,
}, },
#endif #endif
#ifdef HAVE_ENCODER_SPARC #ifdef HAVE_ENCODER_SPARC
{ {
.id = LZMA_FILTER_SPARC, LZMA_FILTER_SPARC,
.init = &lzma_simple_sparc_encoder_init, &lzma_simple_sparc_encoder_init,
.memusage = NULL, NULL,
.chunk_size = NULL, NULL,
.props_size_get = &lzma_simple_props_size, &lzma_simple_props_size,
.props_encode = &lzma_simple_props_encode, 0,
&lzma_simple_props_encode,
}, },
#endif #endif
#ifdef HAVE_ENCODER_DELTA #ifdef HAVE_ENCODER_DELTA
{ {
.id = LZMA_FILTER_DELTA, LZMA_FILTER_DELTA,
.init = &lzma_delta_encoder_init, &lzma_delta_encoder_init,
.memusage = &lzma_delta_coder_memusage, &lzma_delta_coder_memusage,
.chunk_size = NULL, NULL,
.props_size_get = NULL, NULL,
.props_size_fixed = 1, 1,
.props_encode = &lzma_delta_props_encode, &lzma_delta_props_encode,
}, },
#endif #endif
}; };
@ -153,7 +159,8 @@ static const lzma_filter_encoder encoders[] = {
static const lzma_filter_encoder * static const lzma_filter_encoder *
encoder_find(lzma_vli id) encoder_find(lzma_vli id)
{ {
for (size_t i = 0; i < ARRAY_SIZE(encoders); ++i) size_t i;
for (i = 0; i < ARRAY_SIZE(encoders); ++i)
if (encoders[i].id == id) if (encoders[i].id == id)
return encoders + i; return encoders + i;
@ -171,6 +178,10 @@ lzma_filter_encoder_is_supported(lzma_vli id)
extern LZMA_API(lzma_ret) extern LZMA_API(lzma_ret)
lzma_filters_update(lzma_stream *strm, const lzma_filter *filters) lzma_filters_update(lzma_stream *strm, const lzma_filter *filters)
{ {
size_t i;
size_t count = 1;
lzma_filter reversed_filters[LZMA_FILTERS_MAX + 1];
if (strm->internal->next.update == NULL) if (strm->internal->next.update == NULL)
return LZMA_PROG_ERROR; return LZMA_PROG_ERROR;
@ -180,12 +191,10 @@ lzma_filters_update(lzma_stream *strm, const lzma_filter *filters)
// The actual filter chain in the encoder is reversed. Some things // The actual filter chain in the encoder is reversed. Some things
// still want the normal order chain, so we provide both. // still want the normal order chain, so we provide both.
size_t count = 1;
while (filters[count].id != LZMA_VLI_UNKNOWN) while (filters[count].id != LZMA_VLI_UNKNOWN)
++count; ++count;
lzma_filter reversed_filters[LZMA_FILTERS_MAX + 1]; for (i = 0; i < count; ++i)
for (size_t i = 0; i < count; ++i)
reversed_filters[count - i - 1] = filters[i]; reversed_filters[count - i - 1] = filters[i];
reversed_filters[count].id = LZMA_VLI_UNKNOWN; reversed_filters[count].id = LZMA_VLI_UNKNOWN;
@ -207,7 +216,7 @@ lzma_raw_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern LZMA_API(lzma_ret) extern LZMA_API(lzma_ret)
lzma_raw_encoder(lzma_stream *strm, const lzma_filter *options) lzma_raw_encoder(lzma_stream *strm, const lzma_filter *options)
{ {
lzma_next_strm_init(lzma_raw_coder_init, strm, options, lzma_next_strm_init3(lzma_raw_coder_init, strm, options,
(lzma_filter_find)(&encoder_find), true); (lzma_filter_find)(&encoder_find), true);
strm->internal->supported_actions[LZMA_RUN] = true; strm->internal->supported_actions[LZMA_RUN] = true;

View File

@ -18,6 +18,9 @@ lzma_filter_flags_decode(
lzma_filter *filter, lzma_allocator *allocator, lzma_filter *filter, lzma_allocator *allocator,
const uint8_t *in, size_t *in_pos, size_t in_size) const uint8_t *in, size_t *in_pos, size_t in_size)
{ {
lzma_vli props_size;
lzma_ret ret;
// Set the pointer to NULL so the caller can always safely free it. // Set the pointer to NULL so the caller can always safely free it.
filter->options = NULL; filter->options = NULL;
@ -29,7 +32,6 @@ lzma_filter_flags_decode(
return LZMA_DATA_ERROR; return LZMA_DATA_ERROR;
// Size of Properties // Size of Properties
lzma_vli props_size;
return_if_error(lzma_vli_decode(&props_size, NULL, return_if_error(lzma_vli_decode(&props_size, NULL,
in, in_pos, in_size)); in, in_pos, in_size));
@ -37,7 +39,7 @@ lzma_filter_flags_decode(
if (in_size - *in_pos < props_size) if (in_size - *in_pos < props_size)
return LZMA_DATA_ERROR; return LZMA_DATA_ERROR;
const lzma_ret ret = lzma_properties_decode( ret = lzma_properties_decode(
filter, allocator, in + *in_pos, props_size); filter, allocator, in + *in_pos, props_size);
*in_pos += props_size; *in_pos += props_size;

View File

@ -31,6 +31,8 @@ extern LZMA_API(lzma_ret)
lzma_filter_flags_encode(const lzma_filter *filter, lzma_filter_flags_encode(const lzma_filter *filter,
uint8_t *out, size_t *out_pos, size_t out_size) uint8_t *out, size_t *out_pos, size_t out_size)
{ {
uint32_t props_size;
// Filter ID // Filter ID
if (filter->id >= LZMA_FILTER_RESERVED_START) if (filter->id >= LZMA_FILTER_RESERVED_START)
return LZMA_PROG_ERROR; return LZMA_PROG_ERROR;
@ -39,7 +41,6 @@ lzma_filter_flags_encode(const lzma_filter *filter,
out, out_pos, out_size)); out, out_pos, out_size));
// Size of Properties // Size of Properties
uint32_t props_size;
return_if_error(lzma_properties_size(&props_size, filter)); return_if_error(lzma_properties_size(&props_size, filter));
return_if_error(lzma_vli_encode(props_size, NULL, return_if_error(lzma_vli_encode(props_size, NULL,
out, out_pos, out_size)); out, out_pos, out_size));

View File

@ -230,6 +230,7 @@ index_tree_end(index_tree *tree, lzma_allocator *allocator,
static void static void
index_tree_append(index_tree *tree, index_tree_node *node) index_tree_append(index_tree *tree, index_tree_node *node)
{ {
uint32_t up;
node->parent = tree->rightmost; node->parent = tree->rightmost;
node->left = NULL; node->left = NULL;
node->right = NULL; node->right = NULL;
@ -258,8 +259,10 @@ index_tree_append(index_tree *tree, index_tree_node *node)
// and thus know the state of the tree just by looking at the node // and thus know the state of the tree just by looking at the node
// count. From the node count we can calculate how many steps to go // count. From the node count we can calculate how many steps to go
// up in the tree to find the rotation root. // up in the tree to find the rotation root.
uint32_t up = tree->count ^ (UINT32_C(1) << bsr32(tree->count)); up = tree->count ^ (UINT32_C(1) << bsr32(tree->count));
if (up != 0) { if (up != 0) {
index_tree_node *pivot;
// Locate the root node for the rotation. // Locate the root node for the rotation.
up = ctz32(tree->count) + 2; up = ctz32(tree->count) + 2;
do { do {
@ -267,7 +270,7 @@ index_tree_append(index_tree *tree, index_tree_node *node)
} while (--up > 0); } while (--up > 0);
// Rotate left using node as the rotation root. // Rotate left using node as the rotation root.
index_tree_node *pivot = node->right; pivot = node->right;
if (node->parent == NULL) { if (node->parent == NULL) {
tree->root = pivot; tree->root = pivot;
@ -397,11 +400,13 @@ index_init_plain(lzma_allocator *allocator)
extern LZMA_API(lzma_index *) extern LZMA_API(lzma_index *)
lzma_index_init(lzma_allocator *allocator) lzma_index_init(lzma_allocator *allocator)
{ {
index_stream *s;
lzma_index *i = index_init_plain(allocator); lzma_index *i = index_init_plain(allocator);
if (i == NULL) if (i == NULL)
return NULL; return NULL;
index_stream *s = index_stream_init(0, 0, 1, 0, allocator); s = index_stream_init(0, 0, 1, 0, allocator);
if (s == NULL) { if (s == NULL) {
lzma_free(i, allocator); lzma_free(i, allocator);
return NULL; return NULL;
@ -600,6 +605,8 @@ lzma_index_padding_size(const lzma_index *i)
extern LZMA_API(lzma_ret) extern LZMA_API(lzma_ret)
lzma_index_stream_flags(lzma_index *i, const lzma_stream_flags *stream_flags) lzma_index_stream_flags(lzma_index *i, const lzma_stream_flags *stream_flags)
{ {
index_stream *s;
if (i == NULL || stream_flags == NULL) if (i == NULL || stream_flags == NULL)
return LZMA_PROG_ERROR; return LZMA_PROG_ERROR;
@ -607,7 +614,7 @@ lzma_index_stream_flags(lzma_index *i, const lzma_stream_flags *stream_flags)
return_if_error(lzma_stream_flags_compare( return_if_error(lzma_stream_flags_compare(
stream_flags, stream_flags)); stream_flags, stream_flags));
index_stream *s = (index_stream *)(i->streams.rightmost); s = (index_stream *)(i->streams.rightmost);
s->stream_flags = *stream_flags; s->stream_flags = *stream_flags;
return LZMA_OK; return LZMA_OK;
@ -617,14 +624,17 @@ lzma_index_stream_flags(lzma_index *i, const lzma_stream_flags *stream_flags)
extern LZMA_API(lzma_ret) extern LZMA_API(lzma_ret)
lzma_index_stream_padding(lzma_index *i, lzma_vli stream_padding) lzma_index_stream_padding(lzma_index *i, lzma_vli stream_padding)
{ {
index_stream *s;
lzma_vli old_stream_padding;
if (i == NULL || stream_padding > LZMA_VLI_MAX if (i == NULL || stream_padding > LZMA_VLI_MAX
|| (stream_padding & 3) != 0) || (stream_padding & 3) != 0)
return LZMA_PROG_ERROR; return LZMA_PROG_ERROR;
index_stream *s = (index_stream *)(i->streams.rightmost); s = (index_stream *)(i->streams.rightmost);
// Check that the new value won't make the file grow too big. // Check that the new value won't make the file grow too big.
const lzma_vli old_stream_padding = s->stream_padding; old_stream_padding = s->stream_padding;
s->stream_padding = 0; s->stream_padding = 0;
if (lzma_index_file_size(i) + stream_padding > LZMA_VLI_MAX) { if (lzma_index_file_size(i) + stream_padding > LZMA_VLI_MAX) {
s->stream_padding = old_stream_padding; s->stream_padding = old_stream_padding;
@ -640,20 +650,26 @@ extern LZMA_API(lzma_ret)
lzma_index_append(lzma_index *i, lzma_allocator *allocator, lzma_index_append(lzma_index *i, lzma_allocator *allocator,
lzma_vli unpadded_size, lzma_vli uncompressed_size) lzma_vli unpadded_size, lzma_vli uncompressed_size)
{ {
index_stream *s;
index_group *g;
lzma_vli compressed_base;
lzma_vli uncompressed_base;
uint32_t index_list_size_add;
// Validate. // Validate.
if (i == NULL || unpadded_size < UNPADDED_SIZE_MIN if (i == NULL || unpadded_size < UNPADDED_SIZE_MIN
|| unpadded_size > UNPADDED_SIZE_MAX || unpadded_size > UNPADDED_SIZE_MAX
|| uncompressed_size > LZMA_VLI_MAX) || uncompressed_size > LZMA_VLI_MAX)
return LZMA_PROG_ERROR; return LZMA_PROG_ERROR;
index_stream *s = (index_stream *)(i->streams.rightmost); s = (index_stream *)(i->streams.rightmost);
index_group *g = (index_group *)(s->groups.rightmost); g = (index_group *)(s->groups.rightmost);
const lzma_vli compressed_base = g == NULL ? 0 compressed_base = g == NULL ? 0
: vli_ceil4(g->records[g->last].unpadded_sum); : vli_ceil4(g->records[g->last].unpadded_sum);
const lzma_vli uncompressed_base = g == NULL ? 0 uncompressed_base = g == NULL ? 0
: g->records[g->last].uncompressed_sum; : g->records[g->last].uncompressed_sum;
const uint32_t index_list_size_add = lzma_vli_size(unpadded_size) index_list_size_add = lzma_vli_size(unpadded_size)
+ lzma_vli_size(uncompressed_size); + lzma_vli_size(uncompressed_size);
// Check that the file size will stay within limits. // Check that the file size will stay within limits.
@ -767,6 +783,7 @@ extern LZMA_API(lzma_ret)
lzma_index_cat(lzma_index *restrict dest, lzma_index *restrict src, lzma_index_cat(lzma_index *restrict dest, lzma_index *restrict src,
lzma_allocator *allocator) lzma_allocator *allocator)
{ {
index_cat_info info;
const lzma_vli dest_file_size = lzma_index_file_size(dest); const lzma_vli dest_file_size = lzma_index_file_size(dest);
// Check that we don't exceed the file size limits. // Check that we don't exceed the file size limits.
@ -796,10 +813,12 @@ lzma_index_cat(lzma_index *restrict dest, lzma_index *restrict src,
index_stream *s = (index_stream *)(dest->streams.rightmost); index_stream *s = (index_stream *)(dest->streams.rightmost);
index_group *g = (index_group *)(s->groups.rightmost); index_group *g = (index_group *)(s->groups.rightmost);
if (g != NULL && g->last + 1 < g->allocated) { if (g != NULL && g->last + 1 < g->allocated) {
index_group *newg;
assert(g->node.left == NULL); assert(g->node.left == NULL);
assert(g->node.right == NULL); assert(g->node.right == NULL);
index_group *newg = lzma_alloc(sizeof(index_group) newg = lzma_alloc(sizeof(index_group)
+ (g->last + 1) + (g->last + 1)
* sizeof(index_record), * sizeof(index_record),
allocator); allocator);
@ -834,13 +853,12 @@ lzma_index_cat(lzma_index *restrict dest, lzma_index *restrict src,
// Add all the Streams from src to dest. Update the base offsets // Add all the Streams from src to dest. Update the base offsets
// of each Stream from src. // of each Stream from src.
const index_cat_info info = { info.uncompressed_size = dest->uncompressed_size;
.uncompressed_size = dest->uncompressed_size, info.file_size = dest_file_size;
.file_size = dest_file_size, info.stream_number_add = dest->streams.count;
.stream_number_add = dest->streams.count, info.block_number_add = dest->record_count;
.block_number_add = dest->record_count, info.streams = &dest->streams;
.streams = &dest->streams,
};
index_cat_helper(&info, (index_stream *)(src->streams.root)); index_cat_helper(&info, (index_stream *)(src->streams.root));
// Update info about all the combined Streams. // Update info about all the combined Streams.
@ -861,12 +879,17 @@ lzma_index_cat(lzma_index *restrict dest, lzma_index *restrict src,
static index_stream * static index_stream *
index_dup_stream(const index_stream *src, lzma_allocator *allocator) index_dup_stream(const index_stream *src, lzma_allocator *allocator)
{ {
index_stream *dest;
index_group *destg;
index_group *srcg;
size_t i = 0;
// Catch a somewhat theoretical integer overflow. // Catch a somewhat theoretical integer overflow.
if (src->record_count > PREALLOC_MAX) if (src->record_count > PREALLOC_MAX)
return NULL; return NULL;
// Allocate and initialize a new Stream. // Allocate and initialize a new Stream.
index_stream *dest = index_stream_init(src->node.compressed_base, dest = index_stream_init(src->node.compressed_base,
src->node.uncompressed_base, src->number, src->node.uncompressed_base, src->number,
src->block_number_base, allocator); src->block_number_base, allocator);
@ -884,7 +907,7 @@ index_dup_stream(const index_stream *src, lzma_allocator *allocator)
// Allocate memory for the Records. We put all the Records into // Allocate memory for the Records. We put all the Records into
// a single group. It's simplest and also tends to make // a single group. It's simplest and also tends to make
// lzma_index_locate() a little bit faster with very big Indexes. // lzma_index_locate() a little bit faster with very big Indexes.
index_group *destg = lzma_alloc(sizeof(index_group) destg = lzma_alloc(sizeof(index_group)
+ src->record_count * sizeof(index_record), + src->record_count * sizeof(index_record),
allocator); allocator);
if (destg == NULL) { if (destg == NULL) {
@ -900,8 +923,7 @@ index_dup_stream(const index_stream *src, lzma_allocator *allocator)
destg->last = src->record_count - 1; destg->last = src->record_count - 1;
// Go through all the groups in src and copy the Records into destg. // Go through all the groups in src and copy the Records into destg.
const index_group *srcg = (const index_group *)(src->groups.leftmost); srcg = (index_group *)(src->groups.leftmost);
size_t i = 0;
do { do {
memcpy(destg->records + i, srcg->records, memcpy(destg->records + i, srcg->records,
(srcg->last + 1) * sizeof(index_record)); (srcg->last + 1) * sizeof(index_record));
@ -921,6 +943,9 @@ index_dup_stream(const index_stream *src, lzma_allocator *allocator)
extern LZMA_API(lzma_index *) extern LZMA_API(lzma_index *)
lzma_index_dup(const lzma_index *src, lzma_allocator *allocator) lzma_index_dup(const lzma_index *src, lzma_allocator *allocator)
{ {
index_stream *srcstream;
index_stream *deststream;
// Allocate the base structure (no initial Stream). // Allocate the base structure (no initial Stream).
lzma_index *dest = index_init_plain(allocator); lzma_index *dest = index_init_plain(allocator);
if (dest == NULL) if (dest == NULL)
@ -933,11 +958,9 @@ lzma_index_dup(const lzma_index *src, lzma_allocator *allocator)
dest->index_list_size = src->index_list_size; dest->index_list_size = src->index_list_size;
// Copy the Streams and the groups in them. // Copy the Streams and the groups in them.
const index_stream *srcstream srcstream = (index_stream *)(src->streams.leftmost);
= (const index_stream *)(src->streams.leftmost);
do { do {
index_stream *deststream = index_dup_stream( deststream = index_dup_stream(srcstream, allocator);
srcstream, allocator);
if (deststream == NULL) { if (deststream == NULL) {
lzma_index_end(dest, allocator); lzma_index_end(dest, allocator);
return NULL; return NULL;
@ -1096,14 +1119,19 @@ lzma_index_iter_rewind(lzma_index_iter *iter)
extern LZMA_API(lzma_bool) extern LZMA_API(lzma_bool)
lzma_index_iter_next(lzma_index_iter *iter, lzma_index_iter_mode mode) lzma_index_iter_next(lzma_index_iter *iter, lzma_index_iter_mode mode)
{ {
const lzma_index *i;
const index_stream *stream;
const index_group *group;
size_t record;
// Catch unsupported mode values. // Catch unsupported mode values.
if ((unsigned int)(mode) > LZMA_INDEX_ITER_NONEMPTY_BLOCK) if ((unsigned int)(mode) > LZMA_INDEX_ITER_NONEMPTY_BLOCK)
return true; return true;
const lzma_index *i = iter->internal[ITER_INDEX].p; i = iter->internal[ITER_INDEX].p;
const index_stream *stream = iter->internal[ITER_STREAM].p; stream = iter->internal[ITER_STREAM].p;
const index_group *group = NULL; group = NULL;
size_t record = iter->internal[ITER_RECORD].s; record = iter->internal[ITER_RECORD].s;
// If we are being asked for the next Stream, leave group to NULL // If we are being asked for the next Stream, leave group to NULL
// so that the rest of the this function thinks that this Stream // so that the rest of the this function thinks that this Stream
@ -1203,6 +1231,10 @@ again:
extern LZMA_API(lzma_bool) extern LZMA_API(lzma_bool)
lzma_index_iter_locate(lzma_index_iter *iter, lzma_vli target) lzma_index_iter_locate(lzma_index_iter *iter, lzma_vli target)
{ {
const index_stream *stream;
const index_group *group;
size_t left, right;
const lzma_index *i = iter->internal[ITER_INDEX].p; const lzma_index *i = iter->internal[ITER_INDEX].p;
// If the target is past the end of the file, return immediately. // If the target is past the end of the file, return immediately.
@ -1210,12 +1242,12 @@ lzma_index_iter_locate(lzma_index_iter *iter, lzma_vli target)
return true; return true;
// Locate the Stream containing the target offset. // Locate the Stream containing the target offset.
const index_stream *stream = index_tree_locate(&i->streams, target); stream = index_tree_locate(&i->streams, target);
assert(stream != NULL); assert(stream != NULL);
target -= stream->node.uncompressed_base; target -= stream->node.uncompressed_base;
// Locate the group containing the target offset. // Locate the group containing the target offset.
const index_group *group = index_tree_locate(&stream->groups, target); group = index_tree_locate(&stream->groups, target);
assert(group != NULL); assert(group != NULL);
// Use binary search to locate the exact Record. It is the first // Use binary search to locate the exact Record. It is the first
@ -1223,8 +1255,8 @@ lzma_index_iter_locate(lzma_index_iter *iter, lzma_vli target)
// This is because we want the rightmost Record that fullfills the // This is because we want the rightmost Record that fullfills the
// search criterion. It is possible that there are empty Blocks; // search criterion. It is possible that there are empty Blocks;
// we don't want to return them. // we don't want to return them.
size_t left = 0; left = 0;
size_t right = group->last; right = group->last;
while (left < right) { while (left < right) {
const size_t pos = left + (right - left) / 2; const size_t pos = left + (right - left) / 2;

View File

@ -289,7 +289,7 @@ index_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern LZMA_API(lzma_ret) extern LZMA_API(lzma_ret)
lzma_index_decoder(lzma_stream *strm, lzma_index **i, uint64_t memlimit) lzma_index_decoder(lzma_stream *strm, lzma_index **i, uint64_t memlimit)
{ {
lzma_next_strm_init(index_decoder_init, strm, i, memlimit); lzma_next_strm_init2(index_decoder_init, strm, i, memlimit);
strm->internal->supported_actions[LZMA_RUN] = true; strm->internal->supported_actions[LZMA_RUN] = true;
strm->internal->supported_actions[LZMA_FINISH] = true; strm->internal->supported_actions[LZMA_FINISH] = true;
@ -303,21 +303,23 @@ lzma_index_buffer_decode(
lzma_index **i, uint64_t *memlimit, lzma_allocator *allocator, lzma_index **i, uint64_t *memlimit, lzma_allocator *allocator,
const uint8_t *in, size_t *in_pos, size_t in_size) const uint8_t *in, size_t *in_pos, size_t in_size)
{ {
lzma_coder coder;
lzma_ret ret;
// Store the input start position so that we can restore it in case
// of an error.
const size_t in_start = *in_pos;
// Sanity checks // Sanity checks
if (i == NULL || memlimit == NULL if (i == NULL || memlimit == NULL
|| in == NULL || in_pos == NULL || *in_pos > in_size) || in == NULL || in_pos == NULL || *in_pos > in_size)
return LZMA_PROG_ERROR; return LZMA_PROG_ERROR;
// Initialize the decoder. // Initialize the decoder.
lzma_coder coder;
return_if_error(index_decoder_reset(&coder, allocator, i, *memlimit)); return_if_error(index_decoder_reset(&coder, allocator, i, *memlimit));
// Store the input start position so that we can restore it in case
// of an error.
const size_t in_start = *in_pos;
// Do the actual decoding. // Do the actual decoding.
lzma_ret ret = index_decode(&coder, allocator, in, in_pos, in_size, ret = index_decode(&coder, allocator, in, in_pos, in_size,
NULL, NULL, 0, LZMA_RUN); NULL, NULL, 0, LZMA_RUN);
if (ret == LZMA_STREAM_END) { if (ret == LZMA_STREAM_END) {

View File

@ -207,7 +207,7 @@ lzma_index_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern LZMA_API(lzma_ret) extern LZMA_API(lzma_ret)
lzma_index_encoder(lzma_stream *strm, const lzma_index *i) lzma_index_encoder(lzma_stream *strm, const lzma_index *i)
{ {
lzma_next_strm_init(lzma_index_encoder_init, strm, i); lzma_next_strm_init1(lzma_index_encoder_init, strm, i);
strm->internal->supported_actions[LZMA_RUN] = true; strm->internal->supported_actions[LZMA_RUN] = true;
strm->internal->supported_actions[LZMA_FINISH] = true; strm->internal->supported_actions[LZMA_FINISH] = true;
@ -220,6 +220,10 @@ extern LZMA_API(lzma_ret)
lzma_index_buffer_encode(const lzma_index *i, lzma_index_buffer_encode(const lzma_index *i,
uint8_t *out, size_t *out_pos, size_t out_size) uint8_t *out, size_t *out_pos, size_t out_size)
{ {
lzma_coder coder;
size_t out_start;
lzma_ret ret;
// Validate the arguments. // Validate the arguments.
if (i == NULL || out == NULL || out_pos == NULL || *out_pos > out_size) if (i == NULL || out == NULL || out_pos == NULL || *out_pos > out_size)
return LZMA_PROG_ERROR; return LZMA_PROG_ERROR;
@ -230,13 +234,12 @@ lzma_index_buffer_encode(const lzma_index *i,
// The Index encoder needs just one small data structure so we can // The Index encoder needs just one small data structure so we can
// allocate it on stack. // allocate it on stack.
lzma_coder coder;
index_encoder_reset(&coder, i); index_encoder_reset(&coder, i);
// Do the actual encoding. This should never fail, but store // Do the actual encoding. This should never fail, but store
// the original *out_pos just in case. // the original *out_pos just in case.
const size_t out_start = *out_pos; out_start = *out_pos;
lzma_ret ret = index_encode(&coder, NULL, NULL, NULL, 0, ret = index_encode(&coder, NULL, NULL, NULL, 0,
out, out_pos, out_size, LZMA_RUN); out, out_pos, out_size, LZMA_RUN);
if (ret == LZMA_STREAM_END) { if (ret == LZMA_STREAM_END) {

View File

@ -124,13 +124,14 @@ static lzma_ret
hash_append(lzma_index_hash_info *info, lzma_vli unpadded_size, hash_append(lzma_index_hash_info *info, lzma_vli unpadded_size,
lzma_vli uncompressed_size) lzma_vli uncompressed_size)
{ {
const lzma_vli sizes[2] = { unpadded_size, uncompressed_size };
info->blocks_size += vli_ceil4(unpadded_size); info->blocks_size += vli_ceil4(unpadded_size);
info->uncompressed_size += uncompressed_size; info->uncompressed_size += uncompressed_size;
info->index_list_size += lzma_vli_size(unpadded_size) info->index_list_size += lzma_vli_size(unpadded_size)
+ lzma_vli_size(uncompressed_size); + lzma_vli_size(uncompressed_size);
++info->count; ++info->count;
const lzma_vli sizes[2] = { unpadded_size, uncompressed_size };
lzma_check_update(&info->check, LZMA_CHECK_BEST, lzma_check_update(&info->check, LZMA_CHECK_BEST,
(const uint8_t *)(sizes), sizeof(sizes)); (const uint8_t *)(sizes), sizeof(sizes));
@ -173,6 +174,9 @@ extern LZMA_API(lzma_ret)
lzma_index_hash_decode(lzma_index_hash *index_hash, const uint8_t *in, lzma_index_hash_decode(lzma_index_hash *index_hash, const uint8_t *in,
size_t *in_pos, size_t in_size) size_t *in_pos, size_t in_size)
{ {
size_t in_start;
lzma_ret ret;
// Catch zero input buffer here, because in contrast to Index encoder // Catch zero input buffer here, because in contrast to Index encoder
// and decoder functions, applications call this function directly // and decoder functions, applications call this function directly
// instead of via lzma_code(), which does the buffer checking. // instead of via lzma_code(), which does the buffer checking.
@ -182,8 +186,8 @@ lzma_index_hash_decode(lzma_index_hash *index_hash, const uint8_t *in,
// NOTE: This function has many similarities to index_encode() and // NOTE: This function has many similarities to index_encode() and
// index_decode() functions found from index_encoder.c and // index_decode() functions found from index_encoder.c and
// index_decoder.c. See the comments especially in index_encoder.c. // index_decoder.c. See the comments especially in index_encoder.c.
const size_t in_start = *in_pos; in_start = *in_pos;
lzma_ret ret = LZMA_OK; ret = LZMA_OK;
while (*in_pos < in_size) while (*in_pos < in_size)
switch (index_hash->sequence) { switch (index_hash->sequence) {

View File

@ -19,6 +19,9 @@ lzma_stream_buffer_decode(uint64_t *memlimit, uint32_t flags,
const uint8_t *in, size_t *in_pos, size_t in_size, const uint8_t *in, size_t *in_pos, size_t in_size,
uint8_t *out, size_t *out_pos, size_t out_size) uint8_t *out, size_t *out_pos, size_t out_size)
{ {
lzma_next_coder stream_decoder = LZMA_NEXT_CODER_INIT;
lzma_ret ret;
// Sanity checks // Sanity checks
if (in_pos == NULL || (in == NULL && *in_pos != in_size) if (in_pos == NULL || (in == NULL && *in_pos != in_size)
|| *in_pos > in_size || out_pos == NULL || *in_pos > in_size || out_pos == NULL
@ -33,8 +36,7 @@ lzma_stream_buffer_decode(uint64_t *memlimit, uint32_t flags,
// Initialize the Stream decoder. // Initialize the Stream decoder.
// TODO: We need something to tell the decoder that it can use the // TODO: We need something to tell the decoder that it can use the
// output buffer as workspace, and thus save significant amount of RAM. // output buffer as workspace, and thus save significant amount of RAM.
lzma_next_coder stream_decoder = LZMA_NEXT_CODER_INIT; ret = lzma_stream_decoder_init(
lzma_ret ret = lzma_stream_decoder_init(
&stream_decoder, allocator, *memlimit, flags); &stream_decoder, allocator, *memlimit, flags);
if (ret == LZMA_OK) { if (ret == LZMA_OK) {

View File

@ -45,6 +45,10 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check,
lzma_allocator *allocator, const uint8_t *in, size_t in_size, lzma_allocator *allocator, const uint8_t *in, size_t in_size,
uint8_t *out, size_t *out_pos_ptr, size_t out_size) uint8_t *out, size_t *out_pos_ptr, size_t out_size)
{ {
lzma_stream_flags stream_flags = { 0 };
lzma_block block = { 0 };
size_t out_pos;
// Sanity checks // Sanity checks
if (filters == NULL || (unsigned int)(check) > LZMA_CHECK_ID_MAX if (filters == NULL || (unsigned int)(check) > LZMA_CHECK_ID_MAX
|| (in == NULL && in_size != 0) || out == NULL || (in == NULL && in_size != 0) || out == NULL
@ -61,7 +65,7 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check,
// Use a local copy. We update *out_pos_ptr only if everything // Use a local copy. We update *out_pos_ptr only if everything
// succeeds. // succeeds.
size_t out_pos = *out_pos_ptr; out_pos = *out_pos_ptr;
// Check that there's enough space for both Stream Header and // Check that there's enough space for both Stream Header and
// Stream Footer. // Stream Footer.
@ -73,10 +77,7 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check,
out_size -= LZMA_STREAM_HEADER_SIZE; out_size -= LZMA_STREAM_HEADER_SIZE;
// Encode the Stream Header. // Encode the Stream Header.
lzma_stream_flags stream_flags = { stream_flags.check = check;
.version = 0,
.check = check,
};
if (lzma_stream_header_encode(&stream_flags, out + out_pos) if (lzma_stream_header_encode(&stream_flags, out + out_pos)
!= LZMA_OK) != LZMA_OK)
@ -85,11 +86,8 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check,
out_pos += LZMA_STREAM_HEADER_SIZE; out_pos += LZMA_STREAM_HEADER_SIZE;
// Encode a Block but only if there is at least one byte of input. // Encode a Block but only if there is at least one byte of input.
lzma_block block = { block.check = check;
.version = 0, block.filters = filters;
.check = check,
.filters = filters,
};
if (in_size > 0) if (in_size > 0)
return_if_error(lzma_block_buffer_encode(&block, allocator, return_if_error(lzma_block_buffer_encode(&block, allocator,
@ -97,6 +95,8 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check,
// Index // Index
{ {
lzma_ret ret;
// Create an Index. It will have one Record if there was // Create an Index. It will have one Record if there was
// at least one byte of input to encode. Otherwise the // at least one byte of input to encode. Otherwise the
// Index will be empty. // Index will be empty.
@ -104,7 +104,7 @@ lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check,
if (i == NULL) if (i == NULL)
return LZMA_MEM_ERROR; return LZMA_MEM_ERROR;
lzma_ret ret = LZMA_OK; ret = LZMA_OK;
if (in_size > 0) if (in_size > 0)
ret = lzma_index_append(i, allocator, ret = lzma_index_append(i, allocator,

View File

@ -106,6 +106,8 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
while (true) while (true)
switch (coder->sequence) { switch (coder->sequence) {
case SEQ_STREAM_HEADER: { case SEQ_STREAM_HEADER: {
lzma_ret ret;
// Copy the Stream Header to the internal buffer. // Copy the Stream Header to the internal buffer.
lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos, lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
LZMA_STREAM_HEADER_SIZE); LZMA_STREAM_HEADER_SIZE);
@ -117,7 +119,7 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
coder->pos = 0; coder->pos = 0;
// Decode the Stream Header. // Decode the Stream Header.
const lzma_ret ret = lzma_stream_header_decode( ret = lzma_stream_header_decode(
&coder->stream_flags, coder->buffer); &coder->stream_flags, coder->buffer);
if (ret != LZMA_OK) if (ret != LZMA_OK)
return ret == LZMA_FORMAT_ERROR && !coder->first_stream return ret == LZMA_FORMAT_ERROR && !coder->first_stream
@ -154,6 +156,11 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
// Fall through // Fall through
case SEQ_BLOCK_HEADER: { case SEQ_BLOCK_HEADER: {
lzma_filter filters[LZMA_FILTERS_MAX + 1];
uint64_t memusage;
lzma_ret ret;
size_t i;
if (*in_pos >= in_size) if (*in_pos >= in_size)
return LZMA_OK; return LZMA_OK;
@ -188,7 +195,6 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
// Set up a buffer to hold the filter chain. Block Header // Set up a buffer to hold the filter chain. Block Header
// decoder will initialize all members of this array so // decoder will initialize all members of this array so
// we don't need to do it here. // we don't need to do it here.
lzma_filter filters[LZMA_FILTERS_MAX + 1];
coder->block_options.filters = filters; coder->block_options.filters = filters;
// Decode the Block Header. // Decode the Block Header.
@ -196,9 +202,7 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
allocator, coder->buffer)); allocator, coder->buffer));
// Check the memory usage limit. // Check the memory usage limit.
const uint64_t memusage = lzma_raw_decoder_memusage(filters); memusage = lzma_raw_decoder_memusage(filters);
lzma_ret ret;
if (memusage == UINT64_MAX) { if (memusage == UINT64_MAX) {
// One or more unknown Filter IDs. // One or more unknown Filter IDs.
ret = LZMA_OPTIONS_ERROR; ret = LZMA_OPTIONS_ERROR;
@ -224,7 +228,7 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
// Free the allocated filter options since they are needed // Free the allocated filter options since they are needed
// only to initialize the Block decoder. // only to initialize the Block decoder.
for (size_t i = 0; i < LZMA_FILTERS_MAX; ++i) for (i = 0; i < LZMA_FILTERS_MAX; ++i)
lzma_free(filters[i].options, allocator); lzma_free(filters[i].options, allocator);
coder->block_options.filters = NULL; coder->block_options.filters = NULL;
@ -260,6 +264,8 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
} }
case SEQ_INDEX: { case SEQ_INDEX: {
lzma_ret ret;
// If we don't have any input, don't call // If we don't have any input, don't call
// lzma_index_hash_decode() since it would return // lzma_index_hash_decode() since it would return
// LZMA_BUF_ERROR, which we must not do here. // LZMA_BUF_ERROR, which we must not do here.
@ -268,7 +274,7 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
// Decode the Index and compare it to the hash calculated // Decode the Index and compare it to the hash calculated
// from the sizes of the Blocks (if any). // from the sizes of the Blocks (if any).
const lzma_ret ret = lzma_index_hash_decode(coder->index_hash, ret = lzma_index_hash_decode(coder->index_hash,
in, in_pos, in_size); in, in_pos, in_size);
if (ret != LZMA_STREAM_END) if (ret != LZMA_STREAM_END)
return ret; return ret;
@ -279,6 +285,9 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
// Fall through // Fall through
case SEQ_STREAM_FOOTER: { case SEQ_STREAM_FOOTER: {
lzma_stream_flags footer_flags;
lzma_ret ret;
// Copy the Stream Footer to the internal buffer. // Copy the Stream Footer to the internal buffer.
lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos, lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
LZMA_STREAM_HEADER_SIZE); LZMA_STREAM_HEADER_SIZE);
@ -292,8 +301,7 @@ stream_decode(lzma_coder *coder, lzma_allocator *allocator,
// Decode the Stream Footer. The decoder gives // Decode the Stream Footer. The decoder gives
// LZMA_FORMAT_ERROR if the magic bytes don't match, // LZMA_FORMAT_ERROR if the magic bytes don't match,
// so convert that return code to LZMA_DATA_ERROR. // so convert that return code to LZMA_DATA_ERROR.
lzma_stream_flags footer_flags; ret = lzma_stream_footer_decode(
const lzma_ret ret = lzma_stream_footer_decode(
&footer_flags, coder->buffer); &footer_flags, coder->buffer);
if (ret != LZMA_OK) if (ret != LZMA_OK)
return ret == LZMA_FORMAT_ERROR return ret == LZMA_FORMAT_ERROR
@ -442,7 +450,7 @@ lzma_stream_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern LZMA_API(lzma_ret) extern LZMA_API(lzma_ret)
lzma_stream_decoder(lzma_stream *strm, uint64_t memlimit, uint32_t flags) lzma_stream_decoder(lzma_stream *strm, uint64_t memlimit, uint32_t flags)
{ {
lzma_next_strm_init(lzma_stream_decoder_init, strm, memlimit, flags); lzma_next_strm_init2(lzma_stream_decoder_init, strm, memlimit, flags);
strm->internal->supported_actions[LZMA_RUN] = true; strm->internal->supported_actions[LZMA_RUN] = true;
strm->internal->supported_actions[LZMA_FINISH] = true; strm->internal->supported_actions[LZMA_FINISH] = true;

View File

@ -147,6 +147,8 @@ stream_encode(lzma_coder *coder, lzma_allocator *allocator,
} }
case SEQ_BLOCK_ENCODE: { case SEQ_BLOCK_ENCODE: {
lzma_vli unpadded_size;
static const lzma_action convert[4] = { static const lzma_action convert[4] = {
LZMA_RUN, LZMA_RUN,
LZMA_SYNC_FLUSH, LZMA_SYNC_FLUSH,
@ -162,7 +164,7 @@ stream_encode(lzma_coder *coder, lzma_allocator *allocator,
return ret; return ret;
// Add a new Index Record. // Add a new Index Record.
const lzma_vli unpadded_size = lzma_block_unpadded_size( unpadded_size = lzma_block_unpadded_size(
&coder->block_options); &coder->block_options);
assert(unpadded_size != 0); assert(unpadded_size != 0);
return_if_error(lzma_index_append(coder->index, allocator, return_if_error(lzma_index_append(coder->index, allocator,
@ -174,6 +176,12 @@ stream_encode(lzma_coder *coder, lzma_allocator *allocator,
} }
case SEQ_INDEX_ENCODE: { case SEQ_INDEX_ENCODE: {
const lzma_stream_flags stream_flags = {
0,
lzma_index_size(coder->index),
coder->block_options.check,
};
// Call the Index encoder. It doesn't take any input, so // Call the Index encoder. It doesn't take any input, so
// those pointers can be NULL. // those pointers can be NULL.
const lzma_ret ret = coder->index_encoder.code( const lzma_ret ret = coder->index_encoder.code(
@ -184,11 +192,6 @@ stream_encode(lzma_coder *coder, lzma_allocator *allocator,
return ret; return ret;
// Encode the Stream Footer into coder->buffer. // Encode the Stream Footer into coder->buffer.
const lzma_stream_flags stream_flags = {
.version = 0,
.backward_size = lzma_index_size(coder->index),
.check = coder->block_options.check,
};
if (lzma_stream_footer_encode(&stream_flags, coder->buffer) if (lzma_stream_footer_encode(&stream_flags, coder->buffer)
!= LZMA_OK) != LZMA_OK)
@ -211,11 +214,13 @@ stream_encode(lzma_coder *coder, lzma_allocator *allocator,
static void static void
stream_encoder_end(lzma_coder *coder, lzma_allocator *allocator) stream_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
{ {
size_t i;
lzma_next_end(&coder->block_encoder, allocator); lzma_next_end(&coder->block_encoder, allocator);
lzma_next_end(&coder->index_encoder, allocator); lzma_next_end(&coder->index_encoder, allocator);
lzma_index_end(coder->index, allocator); lzma_index_end(coder->index, allocator);
for (size_t i = 0; coder->filters[i].id != LZMA_VLI_UNKNOWN; ++i) for (i = 0; coder->filters[i].id != LZMA_VLI_UNKNOWN; ++i)
lzma_free(coder->filters[i].options, allocator); lzma_free(coder->filters[i].options, allocator);
lzma_free(coder, allocator); lzma_free(coder, allocator);
@ -228,14 +233,18 @@ stream_encoder_update(lzma_coder *coder, lzma_allocator *allocator,
const lzma_filter *filters, const lzma_filter *filters,
const lzma_filter *reversed_filters) const lzma_filter *reversed_filters)
{ {
size_t i;
if (coder->sequence <= SEQ_BLOCK_INIT) { if (coder->sequence <= SEQ_BLOCK_INIT) {
lzma_ret ret;
// There is no incomplete Block waiting to be finished, // There is no incomplete Block waiting to be finished,
// thus we can change the whole filter chain. Start by // thus we can change the whole filter chain. Start by
// trying to initialize the Block encoder with the new // trying to initialize the Block encoder with the new
// chain. This way we detect if the chain is valid. // chain. This way we detect if the chain is valid.
coder->block_encoder_is_initialized = false; coder->block_encoder_is_initialized = false;
coder->block_options.filters = (lzma_filter *)(filters); coder->block_options.filters = (lzma_filter *)(filters);
const lzma_ret ret = block_encoder_init(coder, allocator); ret = block_encoder_init(coder, allocator);
coder->block_options.filters = coder->filters; coder->block_options.filters = coder->filters;
if (ret != LZMA_OK) if (ret != LZMA_OK)
return ret; return ret;
@ -255,7 +264,7 @@ stream_encoder_update(lzma_coder *coder, lzma_allocator *allocator,
} }
// Free the copy of the old chain and make a copy of the new chain. // Free the copy of the old chain and make a copy of the new chain.
for (size_t i = 0; coder->filters[i].id != LZMA_VLI_UNKNOWN; ++i) for (i = 0; coder->filters[i].id != LZMA_VLI_UNKNOWN; ++i)
lzma_free(coder->filters[i].options, allocator); lzma_free(coder->filters[i].options, allocator);
return lzma_filters_copy(filters, coder->filters, allocator); return lzma_filters_copy(filters, coder->filters, allocator);
@ -266,6 +275,8 @@ extern lzma_ret
lzma_stream_encoder_init(lzma_next_coder *next, lzma_allocator *allocator, lzma_stream_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
const lzma_filter *filters, lzma_check check) const lzma_filter *filters, lzma_check check)
{ {
lzma_stream_flags stream_flags = { 0, 0, check };
lzma_next_coder_init(&lzma_stream_encoder_init, next, allocator); lzma_next_coder_init(&lzma_stream_encoder_init, next, allocator);
if (filters == NULL) if (filters == NULL)
@ -298,10 +309,6 @@ lzma_stream_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
return LZMA_MEM_ERROR; return LZMA_MEM_ERROR;
// Encode the Stream Header // Encode the Stream Header
lzma_stream_flags stream_flags = {
.version = 0,
.check = check,
};
return_if_error(lzma_stream_header_encode( return_if_error(lzma_stream_header_encode(
&stream_flags, next->coder->buffer)); &stream_flags, next->coder->buffer));
@ -320,7 +327,7 @@ extern LZMA_API(lzma_ret)
lzma_stream_encoder(lzma_stream *strm, lzma_stream_encoder(lzma_stream *strm,
const lzma_filter *filters, lzma_check check) const lzma_filter *filters, lzma_check check)
{ {
lzma_next_strm_init(lzma_stream_encoder_init, strm, filters, check); lzma_next_strm_init2(lzma_stream_encoder_init, strm, filters, check);
strm->internal->supported_actions[LZMA_RUN] = true; strm->internal->supported_actions[LZMA_RUN] = true;
strm->internal->supported_actions[LZMA_SYNC_FLUSH] = true; strm->internal->supported_actions[LZMA_SYNC_FLUSH] = true;

View File

@ -30,13 +30,15 @@ stream_flags_decode(lzma_stream_flags *options, const uint8_t *in)
extern LZMA_API(lzma_ret) extern LZMA_API(lzma_ret)
lzma_stream_header_decode(lzma_stream_flags *options, const uint8_t *in) lzma_stream_header_decode(lzma_stream_flags *options, const uint8_t *in)
{ {
uint32_t crc;
// Magic // Magic
if (memcmp(in, lzma_header_magic, sizeof(lzma_header_magic)) != 0) if (memcmp(in, lzma_header_magic, sizeof(lzma_header_magic)) != 0)
return LZMA_FORMAT_ERROR; return LZMA_FORMAT_ERROR;
// Verify the CRC32 so we can distinguish between corrupt // Verify the CRC32 so we can distinguish between corrupt
// and unsupported files. // and unsupported files.
const uint32_t crc = lzma_crc32(in + sizeof(lzma_header_magic), crc = lzma_crc32(in + sizeof(lzma_header_magic),
LZMA_STREAM_FLAGS_SIZE, 0); LZMA_STREAM_FLAGS_SIZE, 0);
if (crc != unaligned_read32le(in + sizeof(lzma_header_magic) if (crc != unaligned_read32le(in + sizeof(lzma_header_magic)
+ LZMA_STREAM_FLAGS_SIZE)) + LZMA_STREAM_FLAGS_SIZE))
@ -59,13 +61,15 @@ lzma_stream_header_decode(lzma_stream_flags *options, const uint8_t *in)
extern LZMA_API(lzma_ret) extern LZMA_API(lzma_ret)
lzma_stream_footer_decode(lzma_stream_flags *options, const uint8_t *in) lzma_stream_footer_decode(lzma_stream_flags *options, const uint8_t *in)
{ {
uint32_t crc;
// Magic // Magic
if (memcmp(in + sizeof(uint32_t) * 2 + LZMA_STREAM_FLAGS_SIZE, if (memcmp(in + sizeof(uint32_t) * 2 + LZMA_STREAM_FLAGS_SIZE,
lzma_footer_magic, sizeof(lzma_footer_magic)) != 0) lzma_footer_magic, sizeof(lzma_footer_magic)) != 0)
return LZMA_FORMAT_ERROR; return LZMA_FORMAT_ERROR;
// CRC32 // CRC32
const uint32_t crc = lzma_crc32(in + sizeof(uint32_t), crc = lzma_crc32(in + sizeof(uint32_t),
sizeof(uint32_t) + LZMA_STREAM_FLAGS_SIZE, 0); sizeof(uint32_t) + LZMA_STREAM_FLAGS_SIZE, 0);
if (crc != unaligned_read32le(in)) if (crc != unaligned_read32le(in))
return LZMA_DATA_ERROR; return LZMA_DATA_ERROR;

View File

@ -29,6 +29,8 @@ stream_flags_encode(const lzma_stream_flags *options, uint8_t *out)
extern LZMA_API(lzma_ret) extern LZMA_API(lzma_ret)
lzma_stream_header_encode(const lzma_stream_flags *options, uint8_t *out) lzma_stream_header_encode(const lzma_stream_flags *options, uint8_t *out)
{ {
uint32_t crc;
assert(sizeof(lzma_header_magic) + LZMA_STREAM_FLAGS_SIZE assert(sizeof(lzma_header_magic) + LZMA_STREAM_FLAGS_SIZE
+ 4 == LZMA_STREAM_HEADER_SIZE); + 4 == LZMA_STREAM_HEADER_SIZE);
@ -43,7 +45,7 @@ lzma_stream_header_encode(const lzma_stream_flags *options, uint8_t *out)
return LZMA_PROG_ERROR; return LZMA_PROG_ERROR;
// CRC32 of the Stream Header // CRC32 of the Stream Header
const uint32_t crc = lzma_crc32(out + sizeof(lzma_header_magic), crc = lzma_crc32(out + sizeof(lzma_header_magic),
LZMA_STREAM_FLAGS_SIZE, 0); LZMA_STREAM_FLAGS_SIZE, 0);
unaligned_write32le(out + sizeof(lzma_header_magic) unaligned_write32le(out + sizeof(lzma_header_magic)
@ -56,6 +58,8 @@ lzma_stream_header_encode(const lzma_stream_flags *options, uint8_t *out)
extern LZMA_API(lzma_ret) extern LZMA_API(lzma_ret)
lzma_stream_footer_encode(const lzma_stream_flags *options, uint8_t *out) lzma_stream_footer_encode(const lzma_stream_flags *options, uint8_t *out)
{ {
uint32_t crc;
assert(2 * 4 + LZMA_STREAM_FLAGS_SIZE + sizeof(lzma_footer_magic) assert(2 * 4 + LZMA_STREAM_FLAGS_SIZE + sizeof(lzma_footer_magic)
== LZMA_STREAM_HEADER_SIZE); == LZMA_STREAM_HEADER_SIZE);
@ -73,7 +77,7 @@ lzma_stream_footer_encode(const lzma_stream_flags *options, uint8_t *out)
return LZMA_PROG_ERROR; return LZMA_PROG_ERROR;
// CRC32 // CRC32
const uint32_t crc = lzma_crc32( crc = lzma_crc32(
out + 4, 4 + LZMA_STREAM_FLAGS_SIZE, 0); out + 4, 4 + LZMA_STREAM_FLAGS_SIZE, 0);
unaligned_write32le(out, crc); unaligned_write32le(out, crc);

View File

@ -16,10 +16,11 @@
extern LZMA_API(uint32_t) extern LZMA_API(uint32_t)
lzma_vli_size(lzma_vli vli) lzma_vli_size(lzma_vli vli)
{ {
uint32_t i = 0;
if (vli > LZMA_VLI_MAX) if (vli > LZMA_VLI_MAX)
return 0; return 0;
uint32_t i = 0;
do { do {
vli >>= 7; vli >>= 7;
++i; ++i;

View File

@ -27,6 +27,8 @@ extern lzma_ret
lzma_delta_coder_init(lzma_next_coder *next, lzma_allocator *allocator, lzma_delta_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
const lzma_filter_info *filters) const lzma_filter_info *filters)
{ {
const lzma_options_delta *opt;
// Allocate memory for the decoder if needed. // Allocate memory for the decoder if needed.
if (next->coder == NULL) { if (next->coder == NULL) {
next->coder = lzma_alloc(sizeof(lzma_coder), allocator); next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
@ -43,7 +45,7 @@ lzma_delta_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
return LZMA_OPTIONS_ERROR; return LZMA_OPTIONS_ERROR;
// Set the delta distance. // Set the delta distance.
const lzma_options_delta *opt = filters[0].options; opt = filters[0].options;
next->coder->distance = opt->dist; next->coder->distance = opt->dist;
// Initialize the rest of the variables. // Initialize the rest of the variables.

View File

@ -17,9 +17,10 @@
static void static void
decode_buffer(lzma_coder *coder, uint8_t *buffer, size_t size) decode_buffer(lzma_coder *coder, uint8_t *buffer, size_t size)
{ {
size_t i;
const size_t distance = coder->distance; const size_t distance = coder->distance;
for (size_t i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
buffer[i] += coder->history[(distance + coder->pos) & 0xFF]; buffer[i] += coder->history[(distance + coder->pos) & 0xFF];
coder->history[coder->pos-- & 0xFF] = buffer[i]; coder->history[coder->pos-- & 0xFF] = buffer[i];
} }
@ -32,11 +33,12 @@ delta_decode(lzma_coder *coder, lzma_allocator *allocator,
size_t in_size, uint8_t *restrict out, size_t in_size, uint8_t *restrict out,
size_t *restrict out_pos, size_t out_size, lzma_action action) size_t *restrict out_pos, size_t out_size, lzma_action action)
{ {
const size_t out_start = *out_pos;
lzma_ret ret;
assert(coder->next.code != NULL); assert(coder->next.code != NULL);
const size_t out_start = *out_pos; ret = coder->next.code(coder->next.coder, allocator,
const lzma_ret ret = coder->next.code(coder->next.coder, allocator,
in, in_pos, in_size, out, out_pos, out_size, in, in_pos, in_size, out, out_pos, out_size,
action); action);
@ -59,11 +61,12 @@ extern lzma_ret
lzma_delta_props_decode(void **options, lzma_allocator *allocator, lzma_delta_props_decode(void **options, lzma_allocator *allocator,
const uint8_t *props, size_t props_size) const uint8_t *props, size_t props_size)
{ {
lzma_options_delta *opt;
if (props_size != 1) if (props_size != 1)
return LZMA_OPTIONS_ERROR; return LZMA_OPTIONS_ERROR;
lzma_options_delta *opt opt = lzma_alloc(sizeof(lzma_options_delta), allocator);
= lzma_alloc(sizeof(lzma_options_delta), allocator);
if (opt == NULL) if (opt == NULL)
return LZMA_MEM_ERROR; return LZMA_MEM_ERROR;

View File

@ -21,9 +21,10 @@ static void
copy_and_encode(lzma_coder *coder, copy_and_encode(lzma_coder *coder,
const uint8_t *restrict in, uint8_t *restrict out, size_t size) const uint8_t *restrict in, uint8_t *restrict out, size_t size)
{ {
size_t i;
const size_t distance = coder->distance; const size_t distance = coder->distance;
for (size_t i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
const uint8_t tmp = coder->history[ const uint8_t tmp = coder->history[
(distance + coder->pos) & 0xFF]; (distance + coder->pos) & 0xFF];
coder->history[coder->pos-- & 0xFF] = in[i]; coder->history[coder->pos-- & 0xFF] = in[i];
@ -37,9 +38,10 @@ copy_and_encode(lzma_coder *coder,
static void static void
encode_in_place(lzma_coder *coder, uint8_t *buffer, size_t size) encode_in_place(lzma_coder *coder, uint8_t *buffer, size_t size)
{ {
size_t i;
const size_t distance = coder->distance; const size_t distance = coder->distance;
for (size_t i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
const uint8_t tmp = coder->history[ const uint8_t tmp = coder->history[
(distance + coder->pos) & 0xFF]; (distance + coder->pos) & 0xFF];
coder->history[coder->pos-- & 0xFF] = buffer[i]; coder->history[coder->pos-- & 0xFF] = buffer[i];
@ -109,12 +111,13 @@ lzma_delta_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern lzma_ret extern lzma_ret
lzma_delta_props_encode(const void *options, uint8_t *out) lzma_delta_props_encode(const void *options, uint8_t *out)
{ {
const lzma_options_delta *opt = options;
// The caller must have already validated the options, so it's // The caller must have already validated the options, so it's
// LZMA_PROG_ERROR if they are invalid. // LZMA_PROG_ERROR if they are invalid.
if (lzma_delta_coder_memusage(options) == UINT64_MAX) if (lzma_delta_coder_memusage(options) == UINT64_MAX)
return LZMA_PROG_ERROR; return LZMA_PROG_ERROR;
const lzma_options_delta *opt = options;
out[0] = opt->dist - LZMA_DELTA_DIST_MIN; out[0] = opt->dist - LZMA_DELTA_DIST_MIN;
return LZMA_OK; return LZMA_OK;

View File

@ -69,13 +69,17 @@ decode_buffer(lzma_coder *coder,
size_t *restrict out_pos, size_t out_size) size_t *restrict out_pos, size_t out_size)
{ {
while (true) { while (true) {
size_t copy_size;
size_t dict_start;
lzma_ret ret;
// Wrap the dictionary if needed. // Wrap the dictionary if needed.
if (coder->dict.pos == coder->dict.size) if (coder->dict.pos == coder->dict.size)
coder->dict.pos = 0; coder->dict.pos = 0;
// Store the current dictionary position. It is needed to know // Store the current dictionary position. It is needed to know
// where to start copying to the out[] buffer. // where to start copying to the out[] buffer.
const size_t dict_start = coder->dict.pos; dict_start = coder->dict.pos;
// Calculate how much we allow coder->lz.code() to decode. // Calculate how much we allow coder->lz.code() to decode.
// It must not decode past the end of the dictionary // It must not decode past the end of the dictionary
@ -86,13 +90,13 @@ decode_buffer(lzma_coder *coder,
coder->dict.size - coder->dict.pos); coder->dict.size - coder->dict.pos);
// Call the coder->lz.code() to do the actual decoding. // Call the coder->lz.code() to do the actual decoding.
const lzma_ret ret = coder->lz.code( ret = coder->lz.code(
coder->lz.coder, &coder->dict, coder->lz.coder, &coder->dict,
in, in_pos, in_size); in, in_pos, in_size);
// Copy the decoded data from the dictionary to the out[] // Copy the decoded data from the dictionary to the out[]
// buffer. // buffer.
const size_t copy_size = coder->dict.pos - dict_start; copy_size = coder->dict.pos - dict_start;
assert(copy_size <= out_size - *out_pos); assert(copy_size <= out_size - *out_pos);
memcpy(out + *out_pos, coder->dict.buf + dict_start, memcpy(out + *out_pos, coder->dict.buf + dict_start,
copy_size); copy_size);
@ -139,13 +143,15 @@ lz_decode(lzma_coder *coder,
// We aren't the last coder in the chain, we need to decode // We aren't the last coder in the chain, we need to decode
// our input to a temporary buffer. // our input to a temporary buffer.
while (*out_pos < out_size) { while (*out_pos < out_size) {
lzma_ret ret;
// Fill the temporary buffer if it is empty. // Fill the temporary buffer if it is empty.
if (!coder->next_finished if (!coder->next_finished
&& coder->temp.pos == coder->temp.size) { && coder->temp.pos == coder->temp.size) {
coder->temp.pos = 0; coder->temp.pos = 0;
coder->temp.size = 0; coder->temp.size = 0;
const lzma_ret ret = coder->next.code( ret = coder->next.code(
coder->next.coder, coder->next.coder,
allocator, in, in_pos, in_size, allocator, in, in_pos, in_size,
coder->temp.buffer, &coder->temp.size, coder->temp.buffer, &coder->temp.size,
@ -167,7 +173,7 @@ lz_decode(lzma_coder *coder,
return LZMA_OK; return LZMA_OK;
} }
const lzma_ret ret = decode_buffer(coder, coder->temp.buffer, ret = decode_buffer(coder, coder->temp.buffer,
&coder->temp.pos, coder->temp.size, &coder->temp.pos, coder->temp.size,
out, out_pos, out_size); out, out_pos, out_size);
@ -206,6 +212,8 @@ lzma_lz_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
lzma_allocator *allocator, const void *options, lzma_allocator *allocator, const void *options,
lzma_lz_options *lz_options)) lzma_lz_options *lz_options))
{ {
lzma_lz_options lz_options;
// Allocate the base structure if it isn't already allocated. // Allocate the base structure if it isn't already allocated.
if (next->coder == NULL) { if (next->coder == NULL) {
next->coder = lzma_alloc(sizeof(lzma_coder), allocator); next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
@ -223,7 +231,6 @@ lzma_lz_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
// Allocate and initialize the LZ-based decoder. It will also give // Allocate and initialize the LZ-based decoder. It will also give
// us the dictionary size. // us the dictionary size.
lzma_lz_options lz_options;
return_if_error(lz_init(&next->coder->lz, allocator, return_if_error(lz_init(&next->coder->lz, allocator,
filters[0].options, &lz_options)); filters[0].options, &lz_options));

View File

@ -72,14 +72,14 @@ typedef struct {
} lzma_lz_decoder; } lzma_lz_decoder;
#define LZMA_LZ_DECODER_INIT \ static const lzma_lz_decoder LZMA_LZ_DECODER_INIT =
(lzma_lz_decoder){ \ {
.coder = NULL, \ NULL,
.code = NULL, \ NULL,
.reset = NULL, \ NULL,
.set_uncompressed = NULL, \ NULL,
.end = NULL, \ NULL,
} };
extern lzma_ret lzma_lz_decoder_init(lzma_next_coder *next, extern lzma_ret lzma_lz_decoder_init(lzma_next_coder *next,
@ -151,13 +151,15 @@ dict_repeat(lzma_dict *dict, uint32_t distance, uint32_t *len)
dict->pos += left; dict->pos += left;
} else { } else {
uint32_t copy_pos;
uint32_t copy_size;
// The bigger the dictionary, the more rare this // The bigger the dictionary, the more rare this
// case occurs. We need to "wrap" the dict, thus // case occurs. We need to "wrap" the dict, thus
// we might need two memcpy() to copy all the data. // we might need two memcpy() to copy all the data.
assert(dict->full == dict->size); assert(dict->full == dict->size);
const uint32_t copy_pos copy_pos = dict->pos - distance - 1 + dict->size;
= dict->pos - distance - 1 + dict->size; copy_size = dict->size - copy_pos;
uint32_t copy_size = dict->size - copy_pos;
if (copy_size < left) { if (copy_size < left) {
memmove(dict->buf + dict->pos, dict->buf + copy_pos, memmove(dict->buf + dict->pos, dict->buf + copy_pos,

View File

@ -43,16 +43,18 @@ struct lzma_coder_s {
static void static void
move_window(lzma_mf *mf) move_window(lzma_mf *mf)
{ {
uint32_t move_offset;
size_t move_size;
// Align the move to a multiple of 16 bytes. Some LZ-based encoders // Align the move to a multiple of 16 bytes. Some LZ-based encoders
// like LZMA use the lowest bits of mf->read_pos to know the // like LZMA use the lowest bits of mf->read_pos to know the
// alignment of the uncompressed data. We also get better speed // alignment of the uncompressed data. We also get better speed
// for memmove() with aligned buffers. // for memmove() with aligned buffers.
assert(mf->read_pos > mf->keep_size_before); assert(mf->read_pos > mf->keep_size_before);
const uint32_t move_offset move_offset = (mf->read_pos - mf->keep_size_before) & ~UINT32_C(15);
= (mf->read_pos - mf->keep_size_before) & ~UINT32_C(15);
assert(mf->write_pos > move_offset); assert(mf->write_pos > move_offset);
const size_t move_size = mf->write_pos - move_offset; move_size = mf->write_pos - move_offset;
assert(move_offset + move_size <= mf->size); assert(move_offset + move_size <= mf->size);
@ -79,6 +81,9 @@ static lzma_ret
fill_window(lzma_coder *coder, lzma_allocator *allocator, const uint8_t *in, fill_window(lzma_coder *coder, lzma_allocator *allocator, const uint8_t *in,
size_t *in_pos, size_t in_size, lzma_action action) size_t *in_pos, size_t in_size, lzma_action action)
{ {
size_t write_pos;
lzma_ret ret;
assert(coder->mf.read_pos <= coder->mf.write_pos); assert(coder->mf.read_pos <= coder->mf.write_pos);
// Move the sliding window if needed. // Move the sliding window if needed.
@ -88,8 +93,7 @@ fill_window(lzma_coder *coder, lzma_allocator *allocator, const uint8_t *in,
// Maybe this is ugly, but lzma_mf uses uint32_t for most things // Maybe this is ugly, but lzma_mf uses uint32_t for most things
// (which I find cleanest), but we need size_t here when filling // (which I find cleanest), but we need size_t here when filling
// the history window. // the history window.
size_t write_pos = coder->mf.write_pos; write_pos = coder->mf.write_pos;
lzma_ret ret;
if (coder->next.code == NULL) { if (coder->next.code == NULL) {
// Not using a filter, simply memcpy() as much as possible. // Not using a filter, simply memcpy() as much as possible.
lzma_bufcpy(in, in_pos, in_size, coder->mf.buffer, lzma_bufcpy(in, in_pos, in_size, coder->mf.buffer,
@ -156,6 +160,8 @@ lz_encode(lzma_coder *coder, lzma_allocator *allocator,
{ {
while (*out_pos < out_size while (*out_pos < out_size
&& (*in_pos < in_size || action != LZMA_RUN)) { && (*in_pos < in_size || action != LZMA_RUN)) {
lzma_ret ret;
// Read more data to coder->mf.buffer if needed. // Read more data to coder->mf.buffer if needed.
if (coder->mf.action == LZMA_RUN && coder->mf.read_pos if (coder->mf.action == LZMA_RUN && coder->mf.read_pos
>= coder->mf.read_limit) >= coder->mf.read_limit)
@ -163,7 +169,7 @@ lz_encode(lzma_coder *coder, lzma_allocator *allocator,
in, in_pos, in_size, action)); in, in_pos, in_size, action));
// Encode // Encode
const lzma_ret ret = coder->lz.code(coder->lz.coder, ret = coder->lz.code(coder->lz.coder,
&coder->mf, out, out_pos, out_size); &coder->mf, out, out_pos, out_size);
if (ret != LZMA_OK) { if (ret != LZMA_OK) {
// Setting this to LZMA_RUN for cases when we are // Setting this to LZMA_RUN for cases when we are
@ -182,6 +188,14 @@ static bool
lz_encoder_prepare(lzma_mf *mf, lzma_allocator *allocator, lz_encoder_prepare(lzma_mf *mf, lzma_allocator *allocator,
const lzma_lz_options *lz_options) const lzma_lz_options *lz_options)
{ {
bool is_bt;
uint32_t new_count;
uint32_t reserve;
uint32_t old_size;
uint32_t hash_bytes;
uint32_t hs;
uint32_t old_count;
// For now, the dictionary size is limited to 1.5 GiB. This may grow // For now, the dictionary size is limited to 1.5 GiB. This may grow
// in the future if needed, but it needs a little more work than just // in the future if needed, but it needs a little more work than just
// changing this check. // changing this check.
@ -207,14 +221,14 @@ lz_encoder_prepare(lzma_mf *mf, lzma_allocator *allocator,
// to size_t. // to size_t.
// - Memory usage calculation needs something too, e.g. use uint64_t // - Memory usage calculation needs something too, e.g. use uint64_t
// for mf->size. // for mf->size.
uint32_t reserve = lz_options->dict_size / 2; reserve = lz_options->dict_size / 2;
if (reserve > (UINT32_C(1) << 30)) if (reserve > (UINT32_C(1) << 30))
reserve /= 2; reserve /= 2;
reserve += (lz_options->before_size + lz_options->match_len_max reserve += (lz_options->before_size + lz_options->match_len_max
+ lz_options->after_size) / 2 + (UINT32_C(1) << 19); + lz_options->after_size) / 2 + (UINT32_C(1) << 19);
const uint32_t old_size = mf->size; old_size = mf->size;
mf->size = mf->keep_size_before + reserve + mf->keep_size_after; mf->size = mf->keep_size_before + reserve + mf->keep_size_after;
// Deallocate the old history buffer if it exists but has different // Deallocate the old history buffer if it exists but has different
@ -284,12 +298,11 @@ lz_encoder_prepare(lzma_mf *mf, lzma_allocator *allocator,
// Calculate the sizes of mf->hash and mf->son and check that // Calculate the sizes of mf->hash and mf->son and check that
// nice_len is big enough for the selected match finder. // nice_len is big enough for the selected match finder.
const uint32_t hash_bytes = lz_options->match_finder & 0x0F; hash_bytes = lz_options->match_finder & 0x0F;
if (hash_bytes > mf->nice_len) if (hash_bytes > mf->nice_len)
return true; return true;
const bool is_bt = (lz_options->match_finder & 0x10) != 0; is_bt = (lz_options->match_finder & 0x10) != 0;
uint32_t hs;
if (hash_bytes == 2) { if (hash_bytes == 2) {
hs = 0xFFFF; hs = 0xFFFF;
@ -331,13 +344,13 @@ lz_encoder_prepare(lzma_mf *mf, lzma_allocator *allocator,
// hash_size_sum + sons_count cannot overflow. // hash_size_sum + sons_count cannot overflow.
assert(hs < UINT32_MAX / 5); assert(hs < UINT32_MAX / 5);
const uint32_t old_count = mf->hash_size_sum + mf->sons_count; old_count = mf->hash_size_sum + mf->sons_count;
mf->hash_size_sum = hs; mf->hash_size_sum = hs;
mf->sons_count = mf->cyclic_size; mf->sons_count = mf->cyclic_size;
if (is_bt) if (is_bt)
mf->sons_count *= 2; mf->sons_count *= 2;
const uint32_t new_count = mf->hash_size_sum + mf->sons_count; new_count = mf->hash_size_sum + mf->sons_count;
// Deallocate the old hash array if it exists and has different size // Deallocate the old hash array if it exists and has different size
// than what is needed now. // than what is needed now.
@ -363,6 +376,8 @@ static bool
lz_encoder_init(lzma_mf *mf, lzma_allocator *allocator, lz_encoder_init(lzma_mf *mf, lzma_allocator *allocator,
const lzma_lz_options *lz_options) const lzma_lz_options *lz_options)
{ {
size_t alloc_count;
// Allocate the history buffer. // Allocate the history buffer.
if (mf->buffer == NULL) { if (mf->buffer == NULL) {
mf->buffer = lzma_alloc(mf->size, allocator); mf->buffer = lzma_alloc(mf->size, allocator);
@ -382,7 +397,7 @@ lz_encoder_init(lzma_mf *mf, lzma_allocator *allocator,
mf->pending = 0; mf->pending = 0;
// Allocate match finder's hash array. // Allocate match finder's hash array.
const size_t alloc_count = mf->hash_size_sum + mf->sons_count; alloc_count = mf->hash_size_sum + mf->sons_count;
#if UINT32_MAX >= SIZE_MAX / 4 #if UINT32_MAX >= SIZE_MAX / 4
// Check for integer overflow. (Huge dictionaries are not // Check for integer overflow. (Huge dictionaries are not
@ -442,12 +457,7 @@ extern uint64_t
lzma_lz_encoder_memusage(const lzma_lz_options *lz_options) lzma_lz_encoder_memusage(const lzma_lz_options *lz_options)
{ {
// Old buffers must not exist when calling lz_encoder_prepare(). // Old buffers must not exist when calling lz_encoder_prepare().
lzma_mf mf = { lzma_mf mf = { NULL };
.buffer = NULL,
.hash = NULL,
.hash_size_sum = 0,
.sons_count = 0,
};
// Setup the size information into mf. // Setup the size information into mf.
if (lz_encoder_prepare(&mf, NULL, lz_options)) if (lz_encoder_prepare(&mf, NULL, lz_options))
@ -501,6 +511,8 @@ lzma_lz_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
lzma_allocator *allocator, const void *options, lzma_allocator *allocator, const void *options,
lzma_lz_options *lz_options)) lzma_lz_options *lz_options))
{ {
lzma_lz_options lz_options;
#ifdef HAVE_SMALL #ifdef HAVE_SMALL
// We need that the CRC32 table has been initialized. // We need that the CRC32 table has been initialized.
lzma_crc32_init(); lzma_crc32_init();
@ -529,7 +541,6 @@ lzma_lz_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
} }
// Initialize the LZ-based encoder. // Initialize the LZ-based encoder.
lzma_lz_options lz_options;
return_if_error(lz_init(&next->coder->lz, allocator, return_if_error(lz_init(&next->coder->lz, allocator,
filters[0].options, &lz_options)); filters[0].options, &lz_options));

View File

@ -218,7 +218,7 @@ typedef struct {
/// Get pointer to the first byte not ran through the match finder /// Get pointer to the first byte not ran through the match finder
static inline const uint8_t * static inline uint8_t *
mf_ptr(const lzma_mf *mf) mf_ptr(const lzma_mf *mf)
{ {
return mf->buffer + mf->read_pos; return mf->buffer + mf->read_pos;

View File

@ -39,25 +39,22 @@
// Endianness doesn't matter in hash_2_calc() (no effect on the output). // Endianness doesn't matter in hash_2_calc() (no effect on the output).
#ifdef TUKLIB_FAST_UNALIGNED_ACCESS #ifdef TUKLIB_FAST_UNALIGNED_ACCESS
# define hash_2_calc() \ # define hash_2_calc() \
const uint32_t hash_value = *(const uint16_t *)(cur) hash_value = *(const uint16_t *)(cur)
#else #else
# define hash_2_calc() \ # define hash_2_calc() \
const uint32_t hash_value \ hash_value = (uint32_t)(cur[0]) | ((uint32_t)(cur[1]) << 8)
= (uint32_t)(cur[0]) | ((uint32_t)(cur[1]) << 8)
#endif #endif
#define hash_3_calc() \ #define hash_3_calc() \
const uint32_t temp = hash_table[cur[0]] ^ cur[1]; \ temp = hash_table[cur[0]] ^ cur[1]; \
const uint32_t hash_2_value = temp & HASH_2_MASK; \ hash_2_value = temp & HASH_2_MASK; \
const uint32_t hash_value \ hash_value = (temp ^ ((uint32_t)(cur[2]) << 8)) & mf->hash_mask
= (temp ^ ((uint32_t)(cur[2]) << 8)) & mf->hash_mask
#define hash_4_calc() \ #define hash_4_calc() \
const uint32_t temp = hash_table[cur[0]] ^ cur[1]; \ temp = hash_table[cur[0]] ^ cur[1]; \
const uint32_t hash_2_value = temp & HASH_2_MASK; \ hash_2_value = temp & HASH_2_MASK; \
const uint32_t hash_3_value \ hash_3_value = (temp ^ ((uint32_t)(cur[2]) << 8)) & HASH_3_MASK; \
= (temp ^ ((uint32_t)(cur[2]) << 8)) & HASH_3_MASK; \ hash_value = (temp ^ ((uint32_t)(cur[2]) << 8) \
const uint32_t hash_value = (temp ^ ((uint32_t)(cur[2]) << 8) \
^ (hash_table[cur[3]] << 5)) & mf->hash_mask ^ (hash_table[cur[3]] << 5)) & mf->hash_mask

View File

@ -32,8 +32,9 @@ lzma_mf_find(lzma_mf *mf, uint32_t *count_ptr, lzma_match *matches)
if (count > 0) { if (count > 0) {
#ifndef NDEBUG #ifndef NDEBUG
uint32_t i;
// Validate the matches. // Validate the matches.
for (uint32_t i = 0; i < count; ++i) { for (i = 0; i < count; ++i) {
assert(matches[i].len <= mf->nice_len); assert(matches[i].len <= mf->nice_len);
assert(matches[i].dist < mf->read_pos); assert(matches[i].dist < mf->read_pos);
assert(memcmp(mf_ptr(mf) - 1, assert(memcmp(mf_ptr(mf) - 1,
@ -49,6 +50,9 @@ lzma_mf_find(lzma_mf *mf, uint32_t *count_ptr, lzma_match *matches)
// If a match of maximum search length was found, try to // If a match of maximum search length was found, try to
// extend the match to maximum possible length. // extend the match to maximum possible length.
if (len_best == mf->nice_len) { if (len_best == mf->nice_len) {
uint8_t *p1;
uint8_t *p2;
// The limit for the match length is either the // The limit for the match length is either the
// maximum match length supported by the LZ-based // maximum match length supported by the LZ-based
// encoder or the number of bytes left in the // encoder or the number of bytes left in the
@ -59,11 +63,11 @@ lzma_mf_find(lzma_mf *mf, uint32_t *count_ptr, lzma_match *matches)
// Pointer to the byte we just ran through // Pointer to the byte we just ran through
// the match finder. // the match finder.
const uint8_t *p1 = mf_ptr(mf) - 1; p1 = mf_ptr(mf) - 1;
// Pointer to the beginning of the match. We need -1 // Pointer to the beginning of the match. We need -1
// here because the match distances are zero based. // here because the match distances are zero based.
const uint8_t *p2 = p1 - matches[count - 1].dist - 1; p2 = p1 - matches[count - 1].dist - 1;
while (len_best < limit while (len_best < limit
&& p1[len_best] == p2[len_best]) && p1[len_best] == p2[len_best])
@ -108,18 +112,22 @@ lzma_mf_find(lzma_mf *mf, uint32_t *count_ptr, lzma_match *matches)
static void static void
normalize(lzma_mf *mf) normalize(lzma_mf *mf)
{ {
uint32_t i;
uint32_t subvalue;
uint32_t count;
uint32_t *hash;
assert(mf->read_pos + mf->offset == MUST_NORMALIZE_POS); assert(mf->read_pos + mf->offset == MUST_NORMALIZE_POS);
// In future we may not want to touch the lowest bits, because there // In future we may not want to touch the lowest bits, because there
// may be match finders that use larger resolution than one byte. // may be match finders that use larger resolution than one byte.
const uint32_t subvalue subvalue = (MUST_NORMALIZE_POS - mf->cyclic_size);
= (MUST_NORMALIZE_POS - mf->cyclic_size);
// & (~(UINT32_C(1) << 10) - 1); // & (~(UINT32_C(1) << 10) - 1);
const uint32_t count = mf->hash_size_sum + mf->sons_count; count = mf->hash_size_sum + mf->sons_count;
uint32_t *hash = mf->hash; hash = mf->hash;
for (uint32_t i = 0; i < count; ++i) { for (i = 0; i < count; ++i) {
// If the distance is greater than the dictionary size, // If the distance is greater than the dictionary size,
// we can simply mark the hash element as empty. // we can simply mark the hash element as empty.
// //
@ -196,15 +204,14 @@ move_pending(lzma_mf *mf)
move_pending(mf); \ move_pending(mf); \
ret_op; \ ret_op; \
} \ } \
const uint8_t *cur = mf_ptr(mf); \ cur = mf_ptr(mf); \
const uint32_t pos = mf->read_pos + mf->offset pos = mf->read_pos + mf->offset
/// Header for find functions. "return 0" indicates that zero matches /// Header for find functions. "return 0" indicates that zero matches
/// were found. /// were found.
#define header_find(is_bt, len_min) \ #define header_find(is_bt, len_min) \
header(is_bt, len_min, return 0); \ header(is_bt, len_min, return 0)
uint32_t matches_count = 0
/// Header for a loop in a skip function. "continue" tells to skip the rest /// Header for a loop in a skip function. "continue" tells to skip the rest
@ -261,10 +268,11 @@ hc_find_func(
while (true) { while (true) {
const uint32_t delta = pos - cur_match; const uint32_t delta = pos - cur_match;
const uint8_t *pb;
if (depth-- == 0 || delta >= cyclic_size) if (depth-- == 0 || delta >= cyclic_size)
return matches; return matches;
const uint8_t *const pb = cur - delta; pb = cur - delta;
cur_match = son[cyclic_pos - delta cur_match = son[cyclic_pos - delta
+ (delta > cyclic_pos ? cyclic_size : 0)]; + (delta > cyclic_pos ? cyclic_size : 0)];
@ -305,18 +313,23 @@ do { \
extern uint32_t extern uint32_t
lzma_mf_hc3_find(lzma_mf *mf, lzma_match *matches) lzma_mf_hc3_find(lzma_mf *mf, lzma_match *matches)
{ {
const uint8_t *cur;
uint32_t pos;
uint32_t temp, hash_value, hash_2_value; /* hash_3_calc */
uint32_t delta2, cur_match;
uint32_t len_best = 2;
uint32_t matches_count = 0;
header_find(false, 3); header_find(false, 3);
hash_3_calc(); hash_3_calc();
const uint32_t delta2 = pos - mf->hash[hash_2_value]; delta2 = pos - mf->hash[hash_2_value];
const uint32_t cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value]; cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value];
mf->hash[hash_2_value] = pos; mf->hash[hash_2_value] = pos;
mf->hash[FIX_3_HASH_SIZE + hash_value] = pos; mf->hash[FIX_3_HASH_SIZE + hash_value] = pos;
uint32_t len_best = 2;
if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) { if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) {
for ( ; len_best != len_limit; ++len_best) for ( ; len_best != len_limit; ++len_best)
if (*(cur + len_best - delta2) != cur[len_best]) if (*(cur + len_best - delta2) != cur[len_best])
@ -340,18 +353,22 @@ extern void
lzma_mf_hc3_skip(lzma_mf *mf, uint32_t amount) lzma_mf_hc3_skip(lzma_mf *mf, uint32_t amount)
{ {
do { do {
const uint8_t *cur;
uint32_t pos;
uint32_t temp, hash_value, hash_2_value; /* hash_3_calc */
uint32_t cur_match;
if (mf_avail(mf) < 3) { if (mf_avail(mf) < 3) {
move_pending(mf); move_pending(mf);
continue; continue;
} }
const uint8_t *cur = mf_ptr(mf); cur = mf_ptr(mf);
const uint32_t pos = mf->read_pos + mf->offset; pos = mf->read_pos + mf->offset;
hash_3_calc(); hash_3_calc();
const uint32_t cur_match cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value];
= mf->hash[FIX_3_HASH_SIZE + hash_value];
mf->hash[hash_2_value] = pos; mf->hash[hash_2_value] = pos;
mf->hash[FIX_3_HASH_SIZE + hash_value] = pos; mf->hash[FIX_3_HASH_SIZE + hash_value] = pos;
@ -367,21 +384,25 @@ lzma_mf_hc3_skip(lzma_mf *mf, uint32_t amount)
extern uint32_t extern uint32_t
lzma_mf_hc4_find(lzma_mf *mf, lzma_match *matches) lzma_mf_hc4_find(lzma_mf *mf, lzma_match *matches)
{ {
const uint8_t *cur;
uint32_t pos;
uint32_t temp, hash_value, hash_2_value, hash_3_value; /* hash_4_calc */
uint32_t delta2, delta3, cur_match;
uint32_t len_best = 1;
uint32_t matches_count = 0;
header_find(false, 4); header_find(false, 4);
hash_4_calc(); hash_4_calc();
uint32_t delta2 = pos - mf->hash[hash_2_value]; delta2 = pos - mf->hash[hash_2_value];
const uint32_t delta3 delta3 = pos - mf->hash[FIX_3_HASH_SIZE + hash_3_value];
= pos - mf->hash[FIX_3_HASH_SIZE + hash_3_value]; cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value];
const uint32_t cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value];
mf->hash[hash_2_value ] = pos; mf->hash[hash_2_value ] = pos;
mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos; mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos;
mf->hash[FIX_4_HASH_SIZE + hash_value] = pos; mf->hash[FIX_4_HASH_SIZE + hash_value] = pos;
uint32_t len_best = 1;
if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) { if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) {
len_best = 2; len_best = 2;
matches[0].len = 2; matches[0].len = 2;
@ -420,18 +441,22 @@ extern void
lzma_mf_hc4_skip(lzma_mf *mf, uint32_t amount) lzma_mf_hc4_skip(lzma_mf *mf, uint32_t amount)
{ {
do { do {
const uint8_t *cur;
uint32_t pos;
uint32_t temp, hash_value, hash_2_value, hash_3_value; /* hash_4_calc */
uint32_t cur_match;
if (mf_avail(mf) < 4) { if (mf_avail(mf) < 4) {
move_pending(mf); move_pending(mf);
continue; continue;
} }
const uint8_t *cur = mf_ptr(mf); cur = mf_ptr(mf);
const uint32_t pos = mf->read_pos + mf->offset; pos = mf->read_pos + mf->offset;
hash_4_calc(); hash_4_calc();
const uint32_t cur_match cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value];
= mf->hash[FIX_4_HASH_SIZE + hash_value];
mf->hash[hash_2_value] = pos; mf->hash[hash_2_value] = pos;
mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos; mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos;
@ -469,6 +494,10 @@ bt_find_func(
uint32_t len1 = 0; uint32_t len1 = 0;
while (true) { while (true) {
uint32_t *pair;
const uint8_t *pb;
uint32_t len;
const uint32_t delta = pos - cur_match; const uint32_t delta = pos - cur_match;
if (depth-- == 0 || delta >= cyclic_size) { if (depth-- == 0 || delta >= cyclic_size) {
*ptr0 = EMPTY_HASH_VALUE; *ptr0 = EMPTY_HASH_VALUE;
@ -476,12 +505,12 @@ bt_find_func(
return matches; return matches;
} }
uint32_t *const pair = son + ((cyclic_pos - delta pair = son + ((cyclic_pos - delta
+ (delta > cyclic_pos ? cyclic_size : 0)) + (delta > cyclic_pos ? cyclic_size : 0))
<< 1); << 1);
const uint8_t *const pb = cur - delta; pb = cur - delta;
uint32_t len = my_min(len0, len1); len = my_min(len0, len1);
if (pb[len] == cur[len]) { if (pb[len] == cur[len]) {
while (++len != len_limit) while (++len != len_limit)
@ -535,6 +564,10 @@ bt_skip_func(
uint32_t len1 = 0; uint32_t len1 = 0;
while (true) { while (true) {
uint32_t *pair;
const uint8_t *pb;
uint32_t len;
const uint32_t delta = pos - cur_match; const uint32_t delta = pos - cur_match;
if (depth-- == 0 || delta >= cyclic_size) { if (depth-- == 0 || delta >= cyclic_size) {
*ptr0 = EMPTY_HASH_VALUE; *ptr0 = EMPTY_HASH_VALUE;
@ -542,11 +575,11 @@ bt_skip_func(
return; return;
} }
uint32_t *pair = son + ((cyclic_pos - delta pair = son + ((cyclic_pos - delta
+ (delta > cyclic_pos ? cyclic_size : 0)) + (delta > cyclic_pos ? cyclic_size : 0))
<< 1); << 1);
const uint8_t *pb = cur - delta; pb = cur - delta;
uint32_t len = my_min(len0, len1); len = my_min(len0, len1);
if (pb[len] == cur[len]) { if (pb[len] == cur[len]) {
while (++len != len_limit) while (++len != len_limit)
@ -593,11 +626,17 @@ do { \
extern uint32_t extern uint32_t
lzma_mf_bt2_find(lzma_mf *mf, lzma_match *matches) lzma_mf_bt2_find(lzma_mf *mf, lzma_match *matches)
{ {
const uint8_t *cur;
uint32_t pos;
uint32_t hash_value; /* hash_2_calc */
uint32_t cur_match;
uint32_t matches_count = 0;
header_find(true, 2); header_find(true, 2);
hash_2_calc(); hash_2_calc();
const uint32_t cur_match = mf->hash[hash_value]; cur_match = mf->hash[hash_value];
mf->hash[hash_value] = pos; mf->hash[hash_value] = pos;
bt_find(1); bt_find(1);
@ -608,11 +647,16 @@ extern void
lzma_mf_bt2_skip(lzma_mf *mf, uint32_t amount) lzma_mf_bt2_skip(lzma_mf *mf, uint32_t amount)
{ {
do { do {
const uint8_t *cur;
uint32_t pos;
uint32_t hash_value; /* hash_2_calc */
uint32_t cur_match;
header_skip(true, 2); header_skip(true, 2);
hash_2_calc(); hash_2_calc();
const uint32_t cur_match = mf->hash[hash_value]; cur_match = mf->hash[hash_value];
mf->hash[hash_value] = pos; mf->hash[hash_value] = pos;
bt_skip(); bt_skip();
@ -626,18 +670,23 @@ lzma_mf_bt2_skip(lzma_mf *mf, uint32_t amount)
extern uint32_t extern uint32_t
lzma_mf_bt3_find(lzma_mf *mf, lzma_match *matches) lzma_mf_bt3_find(lzma_mf *mf, lzma_match *matches)
{ {
const uint8_t *cur;
uint32_t pos;
uint32_t temp, hash_value, hash_2_value; /* hash_3_calc */
uint32_t delta2, cur_match;
uint32_t len_best = 2;
uint32_t matches_count = 0;
header_find(true, 3); header_find(true, 3);
hash_3_calc(); hash_3_calc();
const uint32_t delta2 = pos - mf->hash[hash_2_value]; delta2 = pos - mf->hash[hash_2_value];
const uint32_t cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value]; cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value];
mf->hash[hash_2_value] = pos; mf->hash[hash_2_value] = pos;
mf->hash[FIX_3_HASH_SIZE + hash_value] = pos; mf->hash[FIX_3_HASH_SIZE + hash_value] = pos;
uint32_t len_best = 2;
if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) { if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) {
for ( ; len_best != len_limit; ++len_best) for ( ; len_best != len_limit; ++len_best)
if (*(cur + len_best - delta2) != cur[len_best]) if (*(cur + len_best - delta2) != cur[len_best])
@ -661,12 +710,16 @@ extern void
lzma_mf_bt3_skip(lzma_mf *mf, uint32_t amount) lzma_mf_bt3_skip(lzma_mf *mf, uint32_t amount)
{ {
do { do {
const uint8_t *cur;
uint32_t pos;
uint32_t temp, hash_value, hash_2_value; /* hash_3_calc */
uint32_t cur_match;
header_skip(true, 3); header_skip(true, 3);
hash_3_calc(); hash_3_calc();
const uint32_t cur_match cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value];
= mf->hash[FIX_3_HASH_SIZE + hash_value];
mf->hash[hash_2_value] = pos; mf->hash[hash_2_value] = pos;
mf->hash[FIX_3_HASH_SIZE + hash_value] = pos; mf->hash[FIX_3_HASH_SIZE + hash_value] = pos;
@ -682,21 +735,25 @@ lzma_mf_bt3_skip(lzma_mf *mf, uint32_t amount)
extern uint32_t extern uint32_t
lzma_mf_bt4_find(lzma_mf *mf, lzma_match *matches) lzma_mf_bt4_find(lzma_mf *mf, lzma_match *matches)
{ {
const uint8_t *cur;
uint32_t pos;
uint32_t temp, hash_value, hash_2_value, hash_3_value; /* hash_4_calc */
uint32_t delta2, delta3, cur_match;
uint32_t len_best = 1;
uint32_t matches_count = 0;
header_find(true, 4); header_find(true, 4);
hash_4_calc(); hash_4_calc();
uint32_t delta2 = pos - mf->hash[hash_2_value]; delta2 = pos - mf->hash[hash_2_value];
const uint32_t delta3 delta3 = pos - mf->hash[FIX_3_HASH_SIZE + hash_3_value];
= pos - mf->hash[FIX_3_HASH_SIZE + hash_3_value]; cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value];
const uint32_t cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value];
mf->hash[hash_2_value] = pos; mf->hash[hash_2_value] = pos;
mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos; mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos;
mf->hash[FIX_4_HASH_SIZE + hash_value] = pos; mf->hash[FIX_4_HASH_SIZE + hash_value] = pos;
uint32_t len_best = 1;
if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) { if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) {
len_best = 2; len_best = 2;
matches[0].len = 2; matches[0].len = 2;
@ -735,12 +792,16 @@ extern void
lzma_mf_bt4_skip(lzma_mf *mf, uint32_t amount) lzma_mf_bt4_skip(lzma_mf *mf, uint32_t amount)
{ {
do { do {
const uint8_t *cur;
uint32_t pos;
uint32_t temp, hash_value, hash_2_value, hash_3_value; /* hash_4_calc */
uint32_t cur_match;
header_skip(true, 4); header_skip(true, 4);
hash_4_calc(); hash_4_calc();
const uint32_t cur_match cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value];
= mf->hash[FIX_4_HASH_SIZE + hash_value];
mf->hash[hash_2_value] = pos; mf->hash[hash_2_value] = pos;
mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos; mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos;

View File

@ -75,6 +75,8 @@
// on all systems I have tried. The size optimized version is sometimes // on all systems I have tried. The size optimized version is sometimes
// slightly faster, but sometimes it is a lot slower. // slightly faster, but sometimes it is a lot slower.
#include "config.h"
#ifdef HAVE_SMALL #ifdef HAVE_SMALL
# define get_pos_slot(pos) ((pos) <= 4 ? (pos) : get_pos_slot_2(pos)) # define get_pos_slot(pos) ((pos) <= 4 ? (pos) : get_pos_slot_2(pos))

View File

@ -224,6 +224,8 @@ static lzma_ret
lzma2_decoder_init(lzma_lz_decoder *lz, lzma_allocator *allocator, lzma2_decoder_init(lzma_lz_decoder *lz, lzma_allocator *allocator,
const void *opt, lzma_lz_options *lz_options) const void *opt, lzma_lz_options *lz_options)
{ {
const lzma_options_lzma *options = opt;
if (lz->coder == NULL) { if (lz->coder == NULL) {
lz->coder = lzma_alloc(sizeof(lzma_coder), allocator); lz->coder = lzma_alloc(sizeof(lzma_coder), allocator);
if (lz->coder == NULL) if (lz->coder == NULL)
@ -235,8 +237,6 @@ lzma2_decoder_init(lzma_lz_decoder *lz, lzma_allocator *allocator,
lz->coder->lzma = LZMA_LZ_DECODER_INIT; lz->coder->lzma = LZMA_LZ_DECODER_INIT;
} }
const lzma_options_lzma *options = opt;
lz->coder->sequence = SEQ_CONTROL; lz->coder->sequence = SEQ_CONTROL;
lz->coder->need_properties = true; lz->coder->need_properties = true;
lz->coder->need_dictionary_reset = options->preset_dict == NULL lz->coder->need_dictionary_reset = options->preset_dict == NULL
@ -272,6 +272,8 @@ extern lzma_ret
lzma_lzma2_props_decode(void **options, lzma_allocator *allocator, lzma_lzma2_props_decode(void **options, lzma_allocator *allocator,
const uint8_t *props, size_t props_size) const uint8_t *props, size_t props_size)
{ {
lzma_options_lzma *opt;
if (props_size != 1) if (props_size != 1)
return LZMA_OPTIONS_ERROR; return LZMA_OPTIONS_ERROR;
@ -283,8 +285,7 @@ lzma_lzma2_props_decode(void **options, lzma_allocator *allocator,
if (props[0] > 40) if (props[0] > 40)
return LZMA_OPTIONS_ERROR; return LZMA_OPTIONS_ERROR;
lzma_options_lzma *opt = lzma_alloc( opt = lzma_alloc(sizeof(lzma_options_lzma), allocator);
sizeof(lzma_options_lzma), allocator);
if (opt == NULL) if (opt == NULL)
return LZMA_MEM_ERROR; return LZMA_MEM_ERROR;

View File

@ -54,13 +54,14 @@ struct lzma_coder_s {
static void static void
lzma2_header_lzma(lzma_coder *coder) lzma2_header_lzma(lzma_coder *coder)
{ {
size_t pos;
size_t size;
assert(coder->uncompressed_size > 0); assert(coder->uncompressed_size > 0);
assert(coder->uncompressed_size <= LZMA2_UNCOMPRESSED_MAX); assert(coder->uncompressed_size <= LZMA2_UNCOMPRESSED_MAX);
assert(coder->compressed_size > 0); assert(coder->compressed_size > 0);
assert(coder->compressed_size <= LZMA2_CHUNK_MAX); assert(coder->compressed_size <= LZMA2_CHUNK_MAX);
size_t pos;
if (coder->need_properties) { if (coder->need_properties) {
pos = 0; pos = 0;
@ -81,7 +82,7 @@ lzma2_header_lzma(lzma_coder *coder)
coder->buf_pos = pos; coder->buf_pos = pos;
// Uncompressed size // Uncompressed size
size_t size = coder->uncompressed_size - 1; size = coder->uncompressed_size - 1;
coder->buf[pos++] += size >> 16; coder->buf[pos++] += size >> 16;
coder->buf[pos++] = (size >> 8) & 0xFF; coder->buf[pos++] = (size >> 8) & 0xFF;
coder->buf[pos++] = size & 0xFF; coder->buf[pos++] = size & 0xFF;
@ -162,6 +163,9 @@ lzma2_encode(lzma_coder *restrict coder, lzma_mf *restrict mf,
// Fall through // Fall through
case SEQ_LZMA_ENCODE: { case SEQ_LZMA_ENCODE: {
uint32_t read_start;
lzma_ret ret;
// Calculate how much more uncompressed data this chunk // Calculate how much more uncompressed data this chunk
// could accept. // could accept.
const uint32_t left = LZMA2_UNCOMPRESSED_MAX const uint32_t left = LZMA2_UNCOMPRESSED_MAX
@ -182,10 +186,10 @@ lzma2_encode(lzma_coder *restrict coder, lzma_mf *restrict mf,
// Save the start position so that we can update // Save the start position so that we can update
// coder->uncompressed_size. // coder->uncompressed_size.
const uint32_t read_start = mf->read_pos - mf->read_ahead; read_start = mf->read_pos - mf->read_ahead;
// Call the LZMA encoder until the chunk is finished. // Call the LZMA encoder until the chunk is finished.
const lzma_ret ret = lzma_lzma_encode(coder->lzma, mf, ret = lzma_lzma_encode(coder->lzma, mf,
coder->buf + LZMA2_HEADER_MAX, coder->buf + LZMA2_HEADER_MAX,
&coder->compressed_size, &coder->compressed_size,
LZMA2_CHUNK_MAX, limit); LZMA2_CHUNK_MAX, limit);
@ -273,6 +277,8 @@ lzma2_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
static lzma_ret static lzma_ret
lzma2_encoder_options_update(lzma_coder *coder, const lzma_filter *filter) lzma2_encoder_options_update(lzma_coder *coder, const lzma_filter *filter)
{ {
lzma_options_lzma *opt;
// New options can be set only when there is no incomplete chunk. // New options can be set only when there is no incomplete chunk.
// This is the case at the beginning of the raw stream and right // This is the case at the beginning of the raw stream and right
// after LZMA_SYNC_FLUSH. // after LZMA_SYNC_FLUSH.
@ -281,7 +287,7 @@ lzma2_encoder_options_update(lzma_coder *coder, const lzma_filter *filter)
// Look if there are new options. At least for now, // Look if there are new options. At least for now,
// only lc/lp/pb can be changed. // only lc/lp/pb can be changed.
const lzma_options_lzma *opt = filter->options; opt = filter->options;
if (coder->opt_cur.lc != opt->lc || coder->opt_cur.lp != opt->lp if (coder->opt_cur.lc != opt->lc || coder->opt_cur.lp != opt->lp
|| coder->opt_cur.pb != opt->pb) { || coder->opt_cur.pb != opt->pb) {
// Validate the options. // Validate the options.

View File

@ -129,12 +129,15 @@ static inline void
literal_init(probability (*probs)[LITERAL_CODER_SIZE], literal_init(probability (*probs)[LITERAL_CODER_SIZE],
uint32_t lc, uint32_t lp) uint32_t lc, uint32_t lp)
{ {
uint32_t coders;
uint32_t i, j;
assert(lc + lp <= LZMA_LCLP_MAX); assert(lc + lp <= LZMA_LCLP_MAX);
const uint32_t coders = 1U << (lc + lp); coders = 1U << (lc + lp);
for (uint32_t i = 0; i < coders; ++i) for (i = 0; i < coders; ++i)
for (uint32_t j = 0; j < LITERAL_CODER_SIZE; ++j) for (j = 0; j < LITERAL_CODER_SIZE; ++j)
bit_reset(probs[i][j]); bit_reset(probs[i][j]);
return; return;

View File

@ -114,33 +114,33 @@ do { \
case seq ## _CHOICE: \ case seq ## _CHOICE: \
rc_if_0(ld.choice, seq ## _CHOICE) { \ rc_if_0(ld.choice, seq ## _CHOICE) { \
rc_update_0(ld.choice); \ rc_update_0(ld.choice); \
rc_bit_case(ld.low[pos_state][symbol], , , seq ## _LOW0); \ rc_bit_case(ld.low[pos_state][symbol], 0, 0, seq ## _LOW0); \
rc_bit_case(ld.low[pos_state][symbol], , , seq ## _LOW1); \ rc_bit_case(ld.low[pos_state][symbol], 0, 0, seq ## _LOW1); \
rc_bit_case(ld.low[pos_state][symbol], , , seq ## _LOW2); \ rc_bit_case(ld.low[pos_state][symbol], 0, 0, seq ## _LOW2); \
target = symbol - LEN_LOW_SYMBOLS + MATCH_LEN_MIN; \ target = symbol - LEN_LOW_SYMBOLS + MATCH_LEN_MIN; \
} else { \ } else { \
rc_update_1(ld.choice); \ rc_update_1(ld.choice); \
case seq ## _CHOICE2: \ case seq ## _CHOICE2: \
rc_if_0(ld.choice2, seq ## _CHOICE2) { \ rc_if_0(ld.choice2, seq ## _CHOICE2) { \
rc_update_0(ld.choice2); \ rc_update_0(ld.choice2); \
rc_bit_case(ld.mid[pos_state][symbol], , , \ rc_bit_case(ld.mid[pos_state][symbol], 0, 0, \
seq ## _MID0); \ seq ## _MID0); \
rc_bit_case(ld.mid[pos_state][symbol], , , \ rc_bit_case(ld.mid[pos_state][symbol], 0, 0, \
seq ## _MID1); \ seq ## _MID1); \
rc_bit_case(ld.mid[pos_state][symbol], , , \ rc_bit_case(ld.mid[pos_state][symbol], 0, 0, \
seq ## _MID2); \ seq ## _MID2); \
target = symbol - LEN_MID_SYMBOLS \ target = symbol - LEN_MID_SYMBOLS \
+ MATCH_LEN_MIN + LEN_LOW_SYMBOLS; \ + MATCH_LEN_MIN + LEN_LOW_SYMBOLS; \
} else { \ } else { \
rc_update_1(ld.choice2); \ rc_update_1(ld.choice2); \
rc_bit_case(ld.high[symbol], , , seq ## _HIGH0); \ rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH0); \
rc_bit_case(ld.high[symbol], , , seq ## _HIGH1); \ rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH1); \
rc_bit_case(ld.high[symbol], , , seq ## _HIGH2); \ rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH2); \
rc_bit_case(ld.high[symbol], , , seq ## _HIGH3); \ rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH3); \
rc_bit_case(ld.high[symbol], , , seq ## _HIGH4); \ rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH4); \
rc_bit_case(ld.high[symbol], , , seq ## _HIGH5); \ rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH5); \
rc_bit_case(ld.high[symbol], , , seq ## _HIGH6); \ rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH6); \
rc_bit_case(ld.high[symbol], , , seq ## _HIGH7); \ rc_bit_case(ld.high[symbol], 0, 0, seq ## _HIGH7); \
target = symbol - LEN_HIGH_SYMBOLS \ target = symbol - LEN_HIGH_SYMBOLS \
+ MATCH_LEN_MIN \ + MATCH_LEN_MIN \
+ LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS; \ + LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS; \
@ -285,13 +285,6 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
const uint8_t *restrict in, const uint8_t *restrict in,
size_t *restrict in_pos, size_t in_size) size_t *restrict in_pos, size_t in_size)
{ {
////////////////////
// Initialization //
////////////////////
if (!rc_read_init(&coder->rc, in, in_pos, in_size))
return LZMA_OK;
/////////////// ///////////////
// Variables // // Variables //
/////////////// ///////////////
@ -338,6 +331,16 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
if (no_eopm && coder->uncompressed_size < dict.limit - dict.pos) if (no_eopm && coder->uncompressed_size < dict.limit - dict.pos)
dict.limit = dict.pos + (size_t)(coder->uncompressed_size); dict.limit = dict.pos + (size_t)(coder->uncompressed_size);
////////////////////
// Initialization //
////////////////////
if (!rc_read_init(&coder->rc, in, in_pos, in_size))
return LZMA_OK;
rc = coder->rc;
rc_in_pos = *in_pos;
// The main decoder loop. The "switch" is used to restart the decoder at // The main decoder loop. The "switch" is used to restart the decoder at
// correct location. Once restarted, the "switch" is no longer used. // correct location. Once restarted, the "switch" is no longer used.
switch (coder->sequence) switch (coder->sequence)
@ -353,6 +356,21 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
break; break;
rc_if_0(coder->is_match[state][pos_state], SEQ_IS_MATCH) { rc_if_0(coder->is_match[state][pos_state], SEQ_IS_MATCH) {
static const lzma_lzma_state next_state[] = {
STATE_LIT_LIT,
STATE_LIT_LIT,
STATE_LIT_LIT,
STATE_LIT_LIT,
STATE_MATCH_LIT_LIT,
STATE_REP_LIT_LIT,
STATE_SHORTREP_LIT_LIT,
STATE_MATCH_LIT,
STATE_REP_LIT,
STATE_SHORTREP_LIT,
STATE_MATCH_LIT,
STATE_REP_LIT
};
rc_update_0(coder->is_match[state][pos_state]); rc_update_0(coder->is_match[state][pos_state]);
// It's a literal i.e. a single 8-bit byte. // It's a literal i.e. a single 8-bit byte.
@ -370,16 +388,21 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
rc_bit(probs[symbol], , , SEQ_LITERAL); rc_bit(probs[symbol], , , SEQ_LITERAL);
} while (symbol < (1 << 8)); } while (symbol < (1 << 8));
#else #else
rc_bit_case(probs[symbol], , , SEQ_LITERAL0); rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL0);
rc_bit_case(probs[symbol], , , SEQ_LITERAL1); rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL1);
rc_bit_case(probs[symbol], , , SEQ_LITERAL2); rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL2);
rc_bit_case(probs[symbol], , , SEQ_LITERAL3); rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL3);
rc_bit_case(probs[symbol], , , SEQ_LITERAL4); rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL4);
rc_bit_case(probs[symbol], , , SEQ_LITERAL5); rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL5);
rc_bit_case(probs[symbol], , , SEQ_LITERAL6); rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL6);
rc_bit_case(probs[symbol], , , SEQ_LITERAL7); rc_bit_case(probs[symbol], 0, 0, SEQ_LITERAL7);
#endif #endif
} else { } else {
#ifndef HAVE_SMALL
uint32_t match_bit;
uint32_t subcoder_index;
#endif
// Decode literal with match byte. // Decode literal with match byte.
// //
// We store the byte we compare against // We store the byte we compare against
@ -418,8 +441,6 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
} while (symbol < (1 << 8)); } while (symbol < (1 << 8));
#else #else
// Unroll the loop. // Unroll the loop.
uint32_t match_bit;
uint32_t subcoder_index;
# define d(seq) \ # define d(seq) \
case seq: \ case seq: \
@ -453,20 +474,6 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
// Use a lookup table to update to literal state, // Use a lookup table to update to literal state,
// since compared to other state updates, this would // since compared to other state updates, this would
// need two branches. // need two branches.
static const lzma_lzma_state next_state[] = {
STATE_LIT_LIT,
STATE_LIT_LIT,
STATE_LIT_LIT,
STATE_LIT_LIT,
STATE_MATCH_LIT_LIT,
STATE_REP_LIT_LIT,
STATE_SHORTREP_LIT_LIT,
STATE_MATCH_LIT,
STATE_REP_LIT,
STATE_SHORTREP_LIT,
STATE_MATCH_LIT,
STATE_REP_LIT
};
state = next_state[state]; state = next_state[state];
case SEQ_LITERAL_WRITE: case SEQ_LITERAL_WRITE:
@ -511,12 +518,12 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
rc_bit(probs[symbol], , , SEQ_POS_SLOT); rc_bit(probs[symbol], , , SEQ_POS_SLOT);
} while (symbol < POS_SLOTS); } while (symbol < POS_SLOTS);
#else #else
rc_bit_case(probs[symbol], , , SEQ_POS_SLOT0); rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT0);
rc_bit_case(probs[symbol], , , SEQ_POS_SLOT1); rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT1);
rc_bit_case(probs[symbol], , , SEQ_POS_SLOT2); rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT2);
rc_bit_case(probs[symbol], , , SEQ_POS_SLOT3); rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT3);
rc_bit_case(probs[symbol], , , SEQ_POS_SLOT4); rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT4);
rc_bit_case(probs[symbol], , , SEQ_POS_SLOT5); rc_bit_case(probs[symbol], 0, 0, SEQ_POS_SLOT5);
#endif #endif
// Get rid of the highest bit that was needed for // Get rid of the highest bit that was needed for
// indexing of the probability array. // indexing of the probability array.
@ -564,25 +571,25 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
switch (limit) { switch (limit) {
case 5: case 5:
assert(offset == 0); assert(offset == 0);
rc_bit(probs[symbol], , rc_bit(probs[symbol], 0,
rep0 += 1, rep0 += 1,
SEQ_POS_MODEL); SEQ_POS_MODEL);
++offset; ++offset;
--limit; --limit;
case 4: case 4:
rc_bit(probs[symbol], , rc_bit(probs[symbol], 0,
rep0 += 1 << offset, rep0 += 1 << offset,
SEQ_POS_MODEL); SEQ_POS_MODEL);
++offset; ++offset;
--limit; --limit;
case 3: case 3:
rc_bit(probs[symbol], , rc_bit(probs[symbol], 0,
rep0 += 1 << offset, rep0 += 1 << offset,
SEQ_POS_MODEL); SEQ_POS_MODEL);
++offset; ++offset;
--limit; --limit;
case 2: case 2:
rc_bit(probs[symbol], , rc_bit(probs[symbol], 0,
rep0 += 1 << offset, rep0 += 1 << offset,
SEQ_POS_MODEL); SEQ_POS_MODEL);
++offset; ++offset;
@ -594,7 +601,7 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
// rc_bit_last() here to omit // rc_bit_last() here to omit
// the unneeded updating of // the unneeded updating of
// "symbol". // "symbol".
rc_bit_last(probs[symbol], , rc_bit_last(probs[symbol], 0,
rep0 += 1 << offset, rep0 += 1 << offset,
SEQ_POS_MODEL); SEQ_POS_MODEL);
} }
@ -628,19 +635,19 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
} while (++offset < ALIGN_BITS); } while (++offset < ALIGN_BITS);
#else #else
case SEQ_ALIGN0: case SEQ_ALIGN0:
rc_bit(coder->pos_align[symbol], , rc_bit(coder->pos_align[symbol], 0,
rep0 += 1, SEQ_ALIGN0); rep0 += 1, SEQ_ALIGN0);
case SEQ_ALIGN1: case SEQ_ALIGN1:
rc_bit(coder->pos_align[symbol], , rc_bit(coder->pos_align[symbol], 0,
rep0 += 2, SEQ_ALIGN1); rep0 += 2, SEQ_ALIGN1);
case SEQ_ALIGN2: case SEQ_ALIGN2:
rc_bit(coder->pos_align[symbol], , rc_bit(coder->pos_align[symbol], 0,
rep0 += 4, SEQ_ALIGN2); rep0 += 4, SEQ_ALIGN2);
case SEQ_ALIGN3: case SEQ_ALIGN3:
// Like in SEQ_POS_MODEL, we don't // Like in SEQ_POS_MODEL, we don't
// need "symbol" for anything else // need "symbol" for anything else
// than indexing the probability array. // than indexing the probability array.
rc_bit_last(coder->pos_align[symbol], , rc_bit_last(coder->pos_align[symbol], 0,
rep0 += 8, SEQ_ALIGN3); rep0 += 8, SEQ_ALIGN3);
#endif #endif
@ -725,9 +732,11 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
// is stored to rep0 and rep1, rep2 and rep3 // is stored to rep0 and rep1, rep2 and rep3
// are updated accordingly. // are updated accordingly.
rc_if_0(coder->is_rep1[state], SEQ_IS_REP1) { rc_if_0(coder->is_rep1[state], SEQ_IS_REP1) {
uint32_t distance;
rc_update_0(coder->is_rep1[state]); rc_update_0(coder->is_rep1[state]);
const uint32_t distance = rep1; distance = rep1;
rep1 = rep0; rep1 = rep0;
rep0 = distance; rep0 = distance;
@ -736,19 +745,23 @@ lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
case SEQ_IS_REP2: case SEQ_IS_REP2:
rc_if_0(coder->is_rep2[state], rc_if_0(coder->is_rep2[state],
SEQ_IS_REP2) { SEQ_IS_REP2) {
uint32_t distance;
rc_update_0(coder->is_rep2[ rc_update_0(coder->is_rep2[
state]); state]);
const uint32_t distance = rep2; distance = rep2;
rep2 = rep1; rep2 = rep1;
rep1 = rep0; rep1 = rep0;
rep0 = distance; rep0 = distance;
} else { } else {
uint32_t distance;
rc_update_1(coder->is_rep2[ rc_update_1(coder->is_rep2[
state]); state]);
const uint32_t distance = rep3; distance = rep3;
rep3 = rep2; rep3 = rep2;
rep2 = rep1; rep2 = rep1;
rep1 = rep0; rep1 = rep0;
@ -853,6 +866,9 @@ lzma_lzma_decoder_uncompressed(void *coder_ptr, lzma_vli uncompressed_size)
static void static void
lzma_decoder_reset(lzma_coder *coder, const void *opt) lzma_decoder_reset(lzma_coder *coder, const void *opt)
{ {
uint32_t i, j, pos_state;
uint32_t num_pos_states;
const lzma_options_lzma *options = opt; const lzma_options_lzma *options = opt;
// NOTE: We assume that lc/lp/pb are valid since they were // NOTE: We assume that lc/lp/pb are valid since they were
@ -879,8 +895,8 @@ lzma_decoder_reset(lzma_coder *coder, const void *opt)
rc_reset(coder->rc); rc_reset(coder->rc);
// Bit and bittree decoders // Bit and bittree decoders
for (uint32_t i = 0; i < STATES; ++i) { for (i = 0; i < STATES; ++i) {
for (uint32_t j = 0; j <= coder->pos_mask; ++j) { for (j = 0; j <= coder->pos_mask; ++j) {
bit_reset(coder->is_match[i][j]); bit_reset(coder->is_match[i][j]);
bit_reset(coder->is_rep0_long[i][j]); bit_reset(coder->is_rep0_long[i][j]);
} }
@ -891,22 +907,22 @@ lzma_decoder_reset(lzma_coder *coder, const void *opt)
bit_reset(coder->is_rep2[i]); bit_reset(coder->is_rep2[i]);
} }
for (uint32_t i = 0; i < LEN_TO_POS_STATES; ++i) for (i = 0; i < LEN_TO_POS_STATES; ++i)
bittree_reset(coder->pos_slot[i], POS_SLOT_BITS); bittree_reset(coder->pos_slot[i], POS_SLOT_BITS);
for (uint32_t i = 0; i < FULL_DISTANCES - END_POS_MODEL_INDEX; ++i) for (i = 0; i < FULL_DISTANCES - END_POS_MODEL_INDEX; ++i)
bit_reset(coder->pos_special[i]); bit_reset(coder->pos_special[i]);
bittree_reset(coder->pos_align, ALIGN_BITS); bittree_reset(coder->pos_align, ALIGN_BITS);
// Len decoders (also bit/bittree) // Len decoders (also bit/bittree)
const uint32_t num_pos_states = 1U << options->pb; num_pos_states = 1U << options->pb;
bit_reset(coder->match_len_decoder.choice); bit_reset(coder->match_len_decoder.choice);
bit_reset(coder->match_len_decoder.choice2); bit_reset(coder->match_len_decoder.choice2);
bit_reset(coder->rep_len_decoder.choice); bit_reset(coder->rep_len_decoder.choice);
bit_reset(coder->rep_len_decoder.choice2); bit_reset(coder->rep_len_decoder.choice2);
for (uint32_t pos_state = 0; pos_state < num_pos_states; ++pos_state) { for (pos_state = 0; pos_state < num_pos_states; ++pos_state) {
bittree_reset(coder->match_len_decoder.low[pos_state], bittree_reset(coder->match_len_decoder.low[pos_state],
LEN_LOW_BITS); LEN_LOW_BITS);
bittree_reset(coder->match_len_decoder.mid[pos_state], bittree_reset(coder->match_len_decoder.mid[pos_state],
@ -936,6 +952,8 @@ extern lzma_ret
lzma_lzma_decoder_create(lzma_lz_decoder *lz, lzma_allocator *allocator, lzma_lzma_decoder_create(lzma_lz_decoder *lz, lzma_allocator *allocator,
const void *opt, lzma_lz_options *lz_options) const void *opt, lzma_lz_options *lz_options)
{ {
const lzma_options_lzma *options = opt;
if (lz->coder == NULL) { if (lz->coder == NULL) {
lz->coder = lzma_alloc(sizeof(lzma_coder), allocator); lz->coder = lzma_alloc(sizeof(lzma_coder), allocator);
if (lz->coder == NULL) if (lz->coder == NULL)
@ -948,7 +966,6 @@ lzma_lzma_decoder_create(lzma_lz_decoder *lz, lzma_allocator *allocator,
// All dictionary sizes are OK here. LZ decoder will take care of // All dictionary sizes are OK here. LZ decoder will take care of
// the special cases. // the special cases.
const lzma_options_lzma *options = opt;
lz_options->dict_size = options->dict_size; lz_options->dict_size = options->dict_size;
lz_options->preset_dict = options->preset_dict; lz_options->preset_dict = options->preset_dict;
lz_options->preset_dict_size = options->preset_dict_size; lz_options->preset_dict_size = options->preset_dict_size;
@ -1028,11 +1045,12 @@ extern lzma_ret
lzma_lzma_props_decode(void **options, lzma_allocator *allocator, lzma_lzma_props_decode(void **options, lzma_allocator *allocator,
const uint8_t *props, size_t props_size) const uint8_t *props, size_t props_size)
{ {
lzma_options_lzma *opt;
if (props_size != 5) if (props_size != 5)
return LZMA_OPTIONS_ERROR; return LZMA_OPTIONS_ERROR;
lzma_options_lzma *opt opt = lzma_alloc(sizeof(lzma_options_lzma), allocator);
= lzma_alloc(sizeof(lzma_options_lzma), allocator);
if (opt == NULL) if (opt == NULL)
return LZMA_MEM_ERROR; return LZMA_MEM_ERROR;

View File

@ -28,11 +28,14 @@ literal_matched(lzma_range_encoder *rc, probability *subcoder,
symbol += UINT32_C(1) << 8; symbol += UINT32_C(1) << 8;
do { do {
uint32_t match_bit;
uint32_t subcoder_index;
uint32_t bit;
match_byte <<= 1; match_byte <<= 1;
const uint32_t match_bit = match_byte & offset; match_bit = match_byte & offset;
const uint32_t subcoder_index subcoder_index = offset + match_bit + (symbol >> 8);
= offset + match_bit + (symbol >> 8); bit = (symbol >> 7) & 1;
const uint32_t bit = (symbol >> 7) & 1;
rc_bit(rc, &subcoder[subcoder_index], bit); rc_bit(rc, &subcoder[subcoder_index], bit);
symbol <<= 1; symbol <<= 1;
@ -77,16 +80,19 @@ literal(lzma_coder *coder, lzma_mf *mf, uint32_t position)
static void static void
length_update_prices(lzma_length_encoder *lc, const uint32_t pos_state) length_update_prices(lzma_length_encoder *lc, const uint32_t pos_state)
{ {
uint32_t a0, a1, b0, b1;
uint32_t *prices;
uint32_t i;
const uint32_t table_size = lc->table_size; const uint32_t table_size = lc->table_size;
lc->counters[pos_state] = table_size; lc->counters[pos_state] = table_size;
const uint32_t a0 = rc_bit_0_price(lc->choice); a0 = rc_bit_0_price(lc->choice);
const uint32_t a1 = rc_bit_1_price(lc->choice); a1 = rc_bit_1_price(lc->choice);
const uint32_t b0 = a1 + rc_bit_0_price(lc->choice2); b0 = a1 + rc_bit_0_price(lc->choice2);
const uint32_t b1 = a1 + rc_bit_1_price(lc->choice2); b1 = a1 + rc_bit_1_price(lc->choice2);
uint32_t *const prices = lc->prices[pos_state]; prices = lc->prices[pos_state];
uint32_t i;
for (i = 0; i < table_size && i < LEN_LOW_SYMBOLS; ++i) for (i = 0; i < table_size && i < LEN_LOW_SYMBOLS; ++i)
prices[i] = a0 + rc_bittree_price(lc->low[pos_state], prices[i] = a0 + rc_bittree_price(lc->low[pos_state],
LEN_LOW_BITS, i); LEN_LOW_BITS, i);
@ -143,13 +149,16 @@ static inline void
match(lzma_coder *coder, const uint32_t pos_state, match(lzma_coder *coder, const uint32_t pos_state,
const uint32_t distance, const uint32_t len) const uint32_t distance, const uint32_t len)
{ {
uint32_t pos_slot;
uint32_t len_to_pos_state;
update_match(coder->state); update_match(coder->state);
length(&coder->rc, &coder->match_len_encoder, pos_state, len, length(&coder->rc, &coder->match_len_encoder, pos_state, len,
coder->fast_mode); coder->fast_mode);
const uint32_t pos_slot = get_pos_slot(distance); pos_slot = get_pos_slot(distance);
const uint32_t len_to_pos_state = get_len_to_pos_state(len); len_to_pos_state = get_len_to_pos_state(len);
rc_bittree(&coder->rc, coder->pos_slot[len_to_pos_state], rc_bittree(&coder->rc, coder->pos_slot[len_to_pos_state],
POS_SLOT_BITS, pos_slot); POS_SLOT_BITS, pos_slot);
@ -313,14 +322,19 @@ lzma_lzma_encode(lzma_coder *restrict coder, lzma_mf *restrict mf,
uint8_t *restrict out, size_t *restrict out_pos, uint8_t *restrict out, size_t *restrict out_pos,
size_t out_size, uint32_t limit) size_t out_size, uint32_t limit)
{ {
uint32_t position;
// Initialize the stream if no data has been encoded yet. // Initialize the stream if no data has been encoded yet.
if (!coder->is_initialized && !encode_init(coder, mf)) if (!coder->is_initialized && !encode_init(coder, mf))
return LZMA_OK; return LZMA_OK;
// Get the lowest bits of the uncompressed offset from the LZ layer. // Get the lowest bits of the uncompressed offset from the LZ layer.
uint32_t position = mf_position(mf); position = mf_position(mf);
while (true) { while (true) {
uint32_t len;
uint32_t back;
// Encode pending bits, if any. Calling this before encoding // Encode pending bits, if any. Calling this before encoding
// the next symbol is needed only with plain LZMA, since // the next symbol is needed only with plain LZMA, since
// LZMA2 always provides big enough buffer to flush // LZMA2 always provides big enough buffer to flush
@ -359,8 +373,6 @@ lzma_lzma_encode(lzma_coder *restrict coder, lzma_mf *restrict mf,
// - UINT32_MAX: not a match but a literal // - UINT32_MAX: not a match but a literal
// Value ranges for len: // Value ranges for len:
// - [MATCH_LEN_MIN, MATCH_LEN_MAX] // - [MATCH_LEN_MIN, MATCH_LEN_MAX]
uint32_t len;
uint32_t back;
if (coder->fast_mode) if (coder->fast_mode)
lzma_lzma_optimum_fast(coder, mf, &back, &len); lzma_lzma_optimum_fast(coder, mf, &back, &len);
@ -453,10 +465,12 @@ static void
length_encoder_reset(lzma_length_encoder *lencoder, length_encoder_reset(lzma_length_encoder *lencoder,
const uint32_t num_pos_states, const bool fast_mode) const uint32_t num_pos_states, const bool fast_mode)
{ {
size_t pos_state;
bit_reset(lencoder->choice); bit_reset(lencoder->choice);
bit_reset(lencoder->choice2); bit_reset(lencoder->choice2);
for (size_t pos_state = 0; pos_state < num_pos_states; ++pos_state) { for (pos_state = 0; pos_state < num_pos_states; ++pos_state) {
bittree_reset(lencoder->low[pos_state], LEN_LOW_BITS); bittree_reset(lencoder->low[pos_state], LEN_LOW_BITS);
bittree_reset(lencoder->mid[pos_state], LEN_MID_BITS); bittree_reset(lencoder->mid[pos_state], LEN_MID_BITS);
} }
@ -464,7 +478,7 @@ length_encoder_reset(lzma_length_encoder *lencoder,
bittree_reset(lencoder->high, LEN_HIGH_BITS); bittree_reset(lencoder->high, LEN_HIGH_BITS);
if (!fast_mode) if (!fast_mode)
for (size_t pos_state = 0; pos_state < num_pos_states; for (pos_state = 0; pos_state < num_pos_states;
++pos_state) ++pos_state)
length_update_prices(lencoder, pos_state); length_update_prices(lencoder, pos_state);
@ -475,6 +489,8 @@ length_encoder_reset(lzma_length_encoder *lencoder,
extern lzma_ret extern lzma_ret
lzma_lzma_encoder_reset(lzma_coder *coder, const lzma_options_lzma *options) lzma_lzma_encoder_reset(lzma_coder *coder, const lzma_options_lzma *options)
{ {
size_t i, j;
if (!is_options_valid(options)) if (!is_options_valid(options))
return LZMA_OPTIONS_ERROR; return LZMA_OPTIONS_ERROR;
@ -487,14 +503,14 @@ lzma_lzma_encoder_reset(lzma_coder *coder, const lzma_options_lzma *options)
// State // State
coder->state = STATE_LIT_LIT; coder->state = STATE_LIT_LIT;
for (size_t i = 0; i < REP_DISTANCES; ++i) for (i = 0; i < REP_DISTANCES; ++i)
coder->reps[i] = 0; coder->reps[i] = 0;
literal_init(coder->literal, options->lc, options->lp); literal_init(coder->literal, options->lc, options->lp);
// Bit encoders // Bit encoders
for (size_t i = 0; i < STATES; ++i) { for (i = 0; i < STATES; ++i) {
for (size_t j = 0; j <= coder->pos_mask; ++j) { for (j = 0; j <= coder->pos_mask; ++j) {
bit_reset(coder->is_match[i][j]); bit_reset(coder->is_match[i][j]);
bit_reset(coder->is_rep0_long[i][j]); bit_reset(coder->is_rep0_long[i][j]);
} }
@ -505,11 +521,11 @@ lzma_lzma_encoder_reset(lzma_coder *coder, const lzma_options_lzma *options)
bit_reset(coder->is_rep2[i]); bit_reset(coder->is_rep2[i]);
} }
for (size_t i = 0; i < FULL_DISTANCES - END_POS_MODEL_INDEX; ++i) for (i = 0; i < FULL_DISTANCES - END_POS_MODEL_INDEX; ++i)
bit_reset(coder->pos_special[i]); bit_reset(coder->pos_special[i]);
// Bit tree encoders // Bit tree encoders
for (size_t i = 0; i < LEN_TO_POS_STATES; ++i) for (i = 0; i < LEN_TO_POS_STATES; ++i)
bittree_reset(coder->pos_slot[i], POS_SLOT_BITS); bittree_reset(coder->pos_slot[i], POS_SLOT_BITS);
bittree_reset(coder->pos_align, ALIGN_BITS); bittree_reset(coder->pos_align, ALIGN_BITS);
@ -548,6 +564,9 @@ extern lzma_ret
lzma_lzma_encoder_create(lzma_coder **coder_ptr, lzma_allocator *allocator, lzma_lzma_encoder_create(lzma_coder **coder_ptr, lzma_allocator *allocator,
const lzma_options_lzma *options, lzma_lz_options *lz_options) const lzma_options_lzma *options, lzma_lz_options *lz_options)
{ {
lzma_coder *coder;
uint32_t log_size = 0;
// Allocate lzma_coder if it wasn't already allocated. // Allocate lzma_coder if it wasn't already allocated.
if (*coder_ptr == NULL) { if (*coder_ptr == NULL) {
*coder_ptr = lzma_alloc(sizeof(lzma_coder), allocator); *coder_ptr = lzma_alloc(sizeof(lzma_coder), allocator);
@ -555,7 +574,7 @@ lzma_lzma_encoder_create(lzma_coder **coder_ptr, lzma_allocator *allocator,
return LZMA_MEM_ERROR; return LZMA_MEM_ERROR;
} }
lzma_coder *coder = *coder_ptr; coder = *coder_ptr;
// Set compression mode. We haven't validates the options yet, // Set compression mode. We haven't validates the options yet,
// but it's OK here, since nothing bad happens with invalid // but it's OK here, since nothing bad happens with invalid
@ -571,7 +590,6 @@ lzma_lzma_encoder_create(lzma_coder **coder_ptr, lzma_allocator *allocator,
// Set dist_table_size. // Set dist_table_size.
// Round the dictionary size up to next 2^n. // Round the dictionary size up to next 2^n.
uint32_t log_size = 0;
while ((UINT32_C(1) << log_size) < options->dict_size) while ((UINT32_C(1) << log_size) < options->dict_size)
++log_size; ++log_size;
@ -625,13 +643,15 @@ lzma_lzma_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
extern uint64_t extern uint64_t
lzma_lzma_encoder_memusage(const void *options) lzma_lzma_encoder_memusage(const void *options)
{ {
lzma_lz_options lz_options;
uint64_t lz_memusage;
if (!is_options_valid(options)) if (!is_options_valid(options))
return UINT64_MAX; return UINT64_MAX;
lzma_lz_options lz_options;
set_lz_options(&lz_options, options); set_lz_options(&lz_options, options);
const uint64_t lz_memusage = lzma_lz_encoder_memusage(&lz_options); lz_memusage = lzma_lz_encoder_memusage(&lz_options);
if (lz_memusage == UINT64_MAX) if (lz_memusage == UINT64_MAX)
return UINT64_MAX; return UINT64_MAX;

View File

@ -20,6 +20,14 @@ extern void
lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf, lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf,
uint32_t *restrict back_res, uint32_t *restrict len_res) uint32_t *restrict back_res, uint32_t *restrict len_res)
{ {
const uint8_t *buf;
uint32_t buf_avail;
uint32_t i;
uint32_t rep_len = 0;
uint32_t rep_index = 0;
uint32_t back_main = 0;
uint32_t limit;
const uint32_t nice_len = mf->nice_len; const uint32_t nice_len = mf->nice_len;
uint32_t len_main; uint32_t len_main;
@ -32,8 +40,8 @@ lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf,
matches_count = coder->matches_count; matches_count = coder->matches_count;
} }
const uint8_t *buf = mf_ptr(mf) - 1; buf = mf_ptr(mf) - 1;
const uint32_t buf_avail = my_min(mf_avail(mf) + 1, MATCH_LEN_MAX); buf_avail = my_min(mf_avail(mf) + 1, MATCH_LEN_MAX);
if (buf_avail < 2) { if (buf_avail < 2) {
// There's not enough input left to encode a match. // There's not enough input left to encode a match.
@ -43,10 +51,9 @@ lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf,
} }
// Look for repeated matches; scan the previous four match distances // Look for repeated matches; scan the previous four match distances
uint32_t rep_len = 0; for (i = 0; i < REP_DISTANCES; ++i) {
uint32_t rep_index = 0; uint32_t len;
for (uint32_t i = 0; i < REP_DISTANCES; ++i) {
// Pointer to the beginning of the match candidate // Pointer to the beginning of the match candidate
const uint8_t *const buf_back = buf - coder->reps[i] - 1; const uint8_t *const buf_back = buf - coder->reps[i] - 1;
@ -57,7 +64,6 @@ lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf,
// The first two bytes matched. // The first two bytes matched.
// Calculate the length of the match. // Calculate the length of the match.
uint32_t len;
for (len = 2; len < buf_avail for (len = 2; len < buf_avail
&& buf[len] == buf_back[len]; ++len) ; && buf[len] == buf_back[len]; ++len) ;
@ -86,7 +92,6 @@ lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf,
return; return;
} }
uint32_t back_main = 0;
if (len_main >= 2) { if (len_main >= 2) {
back_main = coder->matches[matches_count - 1].dist; back_main = coder->matches[matches_count - 1].dist;
@ -153,15 +158,16 @@ lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf,
// the old buf pointer instead of recalculating it with mf_ptr(). // the old buf pointer instead of recalculating it with mf_ptr().
++buf; ++buf;
const uint32_t limit = len_main - 1; limit = len_main - 1;
for (i = 0; i < REP_DISTANCES; ++i) {
uint32_t len;
for (uint32_t i = 0; i < REP_DISTANCES; ++i) {
const uint8_t *const buf_back = buf - coder->reps[i] - 1; const uint8_t *const buf_back = buf - coder->reps[i] - 1;
if (not_equal_16(buf, buf_back)) if (not_equal_16(buf, buf_back))
continue; continue;
uint32_t len;
for (len = 2; len < limit for (len = 2; len < limit
&& buf[len] == buf_back[len]; ++len) ; && buf[len] == buf_back[len]; ++len) ;

View File

@ -35,12 +35,15 @@ get_literal_price(const lzma_coder *const coder, const uint32_t pos,
symbol += UINT32_C(1) << 8; symbol += UINT32_C(1) << 8;
do { do {
uint32_t match_bit;
uint32_t subcoder_index;
uint32_t bit;
match_byte <<= 1; match_byte <<= 1;
const uint32_t match_bit = match_byte & offset; match_bit = match_byte & offset;
const uint32_t subcoder_index subcoder_index = offset + match_bit + (symbol >> 8);
= offset + match_bit + (symbol >> 8); bit = (symbol >> 7) & 1;
const uint32_t bit = (symbol >> 7) & 1;
price += rc_bit_price(subcoder[subcoder_index], bit); price += rc_bit_price(subcoder[subcoder_index], bit);
symbol <<= 1; symbol <<= 1;
@ -131,7 +134,11 @@ get_pos_len_price(const lzma_coder *const coder, const uint32_t pos,
static void static void
fill_distances_prices(lzma_coder *coder) fill_distances_prices(lzma_coder *coder)
{ {
for (uint32_t len_to_pos_state = 0; uint32_t len_to_pos_state;
uint32_t pos_slot;
uint32_t i;
for (len_to_pos_state = 0;
len_to_pos_state < LEN_TO_POS_STATES; len_to_pos_state < LEN_TO_POS_STATES;
++len_to_pos_state) { ++len_to_pos_state) {
@ -139,7 +146,7 @@ fill_distances_prices(lzma_coder *coder)
= coder->pos_slot_prices[len_to_pos_state]; = coder->pos_slot_prices[len_to_pos_state];
// Price to encode the pos_slot. // Price to encode the pos_slot.
for (uint32_t pos_slot = 0; for (pos_slot = 0;
pos_slot < coder->dist_table_size; ++pos_slot) pos_slot < coder->dist_table_size; ++pos_slot)
pos_slot_prices[pos_slot] = rc_bittree_price( pos_slot_prices[pos_slot] = rc_bittree_price(
coder->pos_slot[len_to_pos_state], coder->pos_slot[len_to_pos_state],
@ -148,7 +155,7 @@ fill_distances_prices(lzma_coder *coder)
// For matches with distance >= FULL_DISTANCES, add the price // For matches with distance >= FULL_DISTANCES, add the price
// of the direct bits part of the match distance. (Align bits // of the direct bits part of the match distance. (Align bits
// are handled by fill_align_prices()). // are handled by fill_align_prices()).
for (uint32_t pos_slot = END_POS_MODEL_INDEX; for (pos_slot = END_POS_MODEL_INDEX;
pos_slot < coder->dist_table_size; ++pos_slot) pos_slot < coder->dist_table_size; ++pos_slot)
pos_slot_prices[pos_slot] += rc_direct_price( pos_slot_prices[pos_slot] += rc_direct_price(
((pos_slot >> 1) - 1) - ALIGN_BITS); ((pos_slot >> 1) - 1) - ALIGN_BITS);
@ -156,7 +163,7 @@ fill_distances_prices(lzma_coder *coder)
// Distances in the range [0, 3] are fully encoded with // Distances in the range [0, 3] are fully encoded with
// pos_slot, so they are used for coder->distances_prices // pos_slot, so they are used for coder->distances_prices
// as is. // as is.
for (uint32_t i = 0; i < START_POS_MODEL_INDEX; ++i) for (i = 0; i < START_POS_MODEL_INDEX; ++i)
coder->distances_prices[len_to_pos_state][i] coder->distances_prices[len_to_pos_state][i]
= pos_slot_prices[i]; = pos_slot_prices[i];
} }
@ -164,7 +171,7 @@ fill_distances_prices(lzma_coder *coder)
// Distances in the range [4, 127] depend on pos_slot and pos_special. // Distances in the range [4, 127] depend on pos_slot and pos_special.
// We do this in a loop separate from the above loop to avoid // We do this in a loop separate from the above loop to avoid
// redundant calls to get_pos_slot(). // redundant calls to get_pos_slot().
for (uint32_t i = START_POS_MODEL_INDEX; i < FULL_DISTANCES; ++i) { for (i = START_POS_MODEL_INDEX; i < FULL_DISTANCES; ++i) {
const uint32_t pos_slot = get_pos_slot(i); const uint32_t pos_slot = get_pos_slot(i);
const uint32_t footer_bits = ((pos_slot >> 1) - 1); const uint32_t footer_bits = ((pos_slot >> 1) - 1);
const uint32_t base = (2 | (pos_slot & 1)) << footer_bits; const uint32_t base = (2 | (pos_slot & 1)) << footer_bits;
@ -172,7 +179,7 @@ fill_distances_prices(lzma_coder *coder)
coder->pos_special + base - pos_slot - 1, coder->pos_special + base - pos_slot - 1,
footer_bits, i - base); footer_bits, i - base);
for (uint32_t len_to_pos_state = 0; for (len_to_pos_state = 0;
len_to_pos_state < LEN_TO_POS_STATES; len_to_pos_state < LEN_TO_POS_STATES;
++len_to_pos_state) ++len_to_pos_state)
coder->distances_prices[len_to_pos_state][i] coder->distances_prices[len_to_pos_state][i]
@ -188,7 +195,8 @@ fill_distances_prices(lzma_coder *coder)
static void static void
fill_align_prices(lzma_coder *coder) fill_align_prices(lzma_coder *coder)
{ {
for (uint32_t i = 0; i < ALIGN_TABLE_SIZE; ++i) uint32_t i;
for (i = 0; i < ALIGN_TABLE_SIZE; ++i)
coder->align_prices[i] = rc_bittree_reverse_price( coder->align_prices[i] = rc_bittree_reverse_price(
coder->pos_align, ALIGN_BITS, i); coder->pos_align, ALIGN_BITS, i);
@ -225,12 +233,15 @@ static void
backward(lzma_coder *restrict coder, uint32_t *restrict len_res, backward(lzma_coder *restrict coder, uint32_t *restrict len_res,
uint32_t *restrict back_res, uint32_t cur) uint32_t *restrict back_res, uint32_t cur)
{ {
coder->opts_end_index = cur;
uint32_t pos_mem = coder->opts[cur].pos_prev; uint32_t pos_mem = coder->opts[cur].pos_prev;
uint32_t back_mem = coder->opts[cur].back_prev; uint32_t back_mem = coder->opts[cur].back_prev;
coder->opts_end_index = cur;
do { do {
const uint32_t pos_prev = pos_mem;
const uint32_t back_cur = back_mem;
if (coder->opts[cur].prev_1_is_literal) { if (coder->opts[cur].prev_1_is_literal) {
make_literal(&coder->opts[pos_mem]); make_literal(&coder->opts[pos_mem]);
coder->opts[pos_mem].pos_prev = pos_mem - 1; coder->opts[pos_mem].pos_prev = pos_mem - 1;
@ -245,9 +256,6 @@ backward(lzma_coder *restrict coder, uint32_t *restrict len_res,
} }
} }
const uint32_t pos_prev = pos_mem;
const uint32_t back_cur = back_mem;
back_mem = coder->opts[pos_prev].back_prev; back_mem = coder->opts[pos_prev].back_prev;
pos_mem = coder->opts[pos_prev].pos_prev; pos_mem = coder->opts[pos_prev].pos_prev;
@ -274,6 +282,23 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
uint32_t *restrict back_res, uint32_t *restrict len_res, uint32_t *restrict back_res, uint32_t *restrict len_res,
uint32_t position) uint32_t position)
{ {
uint32_t buf_avail;
const uint8_t *buf;
uint32_t rep_lens[REP_DISTANCES];
uint32_t rep_max_index = 0;
uint32_t i;
uint8_t current_byte;
uint8_t match_byte;
uint32_t pos_state;
uint32_t match_price;
uint32_t rep_match_price;
uint32_t len_end;
uint32_t len;
uint32_t normal_match_price;
const uint32_t nice_len = mf->nice_len; const uint32_t nice_len = mf->nice_len;
uint32_t len_main; uint32_t len_main;
@ -287,19 +312,18 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
matches_count = coder->matches_count; matches_count = coder->matches_count;
} }
const uint32_t buf_avail = my_min(mf_avail(mf) + 1, MATCH_LEN_MAX); buf_avail = my_min(mf_avail(mf) + 1, MATCH_LEN_MAX);
if (buf_avail < 2) { if (buf_avail < 2) {
*back_res = UINT32_MAX; *back_res = UINT32_MAX;
*len_res = 1; *len_res = 1;
return UINT32_MAX; return UINT32_MAX;
} }
const uint8_t *const buf = mf_ptr(mf) - 1; buf = mf_ptr(mf) - 1;
uint32_t rep_lens[REP_DISTANCES]; for (i = 0; i < REP_DISTANCES; ++i) {
uint32_t rep_max_index = 0; uint32_t len_test;
for (uint32_t i = 0; i < REP_DISTANCES; ++i) {
const uint8_t *const buf_back = buf - coder->reps[i] - 1; const uint8_t *const buf_back = buf - coder->reps[i] - 1;
if (not_equal_16(buf, buf_back)) { if (not_equal_16(buf, buf_back)) {
@ -307,7 +331,6 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
continue; continue;
} }
uint32_t len_test;
for (len_test = 2; len_test < buf_avail for (len_test = 2; len_test < buf_avail
&& buf[len_test] == buf_back[len_test]; && buf[len_test] == buf_back[len_test];
++len_test) ; ++len_test) ;
@ -333,8 +356,8 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
return UINT32_MAX; return UINT32_MAX;
} }
const uint8_t current_byte = *buf; current_byte = *buf;
const uint8_t match_byte = *(buf - coder->reps[0] - 1); match_byte = *(buf - coder->reps[0] - 1);
if (len_main < 2 && current_byte != match_byte if (len_main < 2 && current_byte != match_byte
&& rep_lens[rep_max_index] < 2) { && rep_lens[rep_max_index] < 2) {
@ -345,7 +368,7 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
coder->opts[0].state = coder->state; coder->opts[0].state = coder->state;
const uint32_t pos_state = position & coder->pos_mask; pos_state = position & coder->pos_mask;
coder->opts[1].price = rc_bit_0_price( coder->opts[1].price = rc_bit_0_price(
coder->is_match[coder->state][pos_state]) coder->is_match[coder->state][pos_state])
@ -355,9 +378,9 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
make_literal(&coder->opts[1]); make_literal(&coder->opts[1]);
const uint32_t match_price = rc_bit_1_price( match_price = rc_bit_1_price(
coder->is_match[coder->state][pos_state]); coder->is_match[coder->state][pos_state]);
const uint32_t rep_match_price = match_price rep_match_price = match_price
+ rc_bit_1_price(coder->is_rep[coder->state]); + rc_bit_1_price(coder->is_rep[coder->state]);
if (match_byte == current_byte) { if (match_byte == current_byte) {
@ -371,7 +394,7 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
} }
} }
const uint32_t len_end = my_max(len_main, rep_lens[rep_max_index]); len_end = my_max(len_main, rep_lens[rep_max_index]);
if (len_end < 2) { if (len_end < 2) {
*back_res = coder->opts[1].back_prev; *back_res = coder->opts[1].back_prev;
@ -381,21 +404,23 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
coder->opts[1].pos_prev = 0; coder->opts[1].pos_prev = 0;
for (uint32_t i = 0; i < REP_DISTANCES; ++i) for (i = 0; i < REP_DISTANCES; ++i)
coder->opts[0].backs[i] = coder->reps[i]; coder->opts[0].backs[i] = coder->reps[i];
uint32_t len = len_end; len = len_end;
do { do {
coder->opts[len].price = RC_INFINITY_PRICE; coder->opts[len].price = RC_INFINITY_PRICE;
} while (--len >= 2); } while (--len >= 2);
for (uint32_t i = 0; i < REP_DISTANCES; ++i) { for (i = 0; i < REP_DISTANCES; ++i) {
uint32_t price;
uint32_t rep_len = rep_lens[i]; uint32_t rep_len = rep_lens[i];
if (rep_len < 2) if (rep_len < 2)
continue; continue;
const uint32_t price = rep_match_price + get_pure_rep_price( price = rep_match_price + get_pure_rep_price(
coder, i, coder->state, pos_state); coder, i, coder->state, pos_state);
do { do {
@ -414,7 +439,7 @@ helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
} }
const uint32_t normal_match_price = match_price normal_match_price = match_price
+ rc_bit_0_price(coder->is_rep[coder->state]); + rc_bit_0_price(coder->is_rep[coder->state]);
len = rep_lens[0] >= 2 ? rep_lens[0] + 1 : 2; len = rep_lens[0] >= 2 ? rep_lens[0] + 1 : 2;
@ -456,6 +481,19 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
uint32_t new_len = coder->longest_match_length; uint32_t new_len = coder->longest_match_length;
uint32_t pos_prev = coder->opts[cur].pos_prev; uint32_t pos_prev = coder->opts[cur].pos_prev;
lzma_lzma_state state; lzma_lzma_state state;
uint32_t buf_avail;
uint32_t rep_index;
uint32_t i;
uint32_t cur_price;
uint8_t current_byte;
uint8_t match_byte;
uint32_t pos_state;
uint32_t cur_and_1_price;
bool next_is_literal = false;
uint32_t match_price;
uint32_t rep_match_price;
uint32_t start_len = 2;
if (coder->opts[cur].prev_1_is_literal) { if (coder->opts[cur].prev_1_is_literal) {
--pos_prev; --pos_prev;
@ -499,9 +537,10 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
} }
if (pos < REP_DISTANCES) { if (pos < REP_DISTANCES) {
uint32_t i;
reps[0] = coder->opts[pos_prev].backs[pos]; reps[0] = coder->opts[pos_prev].backs[pos];
uint32_t i;
for (i = 1; i <= pos; ++i) for (i = 1; i <= pos; ++i)
reps[i] = coder->opts[pos_prev].backs[i - 1]; reps[i] = coder->opts[pos_prev].backs[i - 1];
@ -511,30 +550,28 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
} else { } else {
reps[0] = pos - REP_DISTANCES; reps[0] = pos - REP_DISTANCES;
for (uint32_t i = 1; i < REP_DISTANCES; ++i) for (i = 1; i < REP_DISTANCES; ++i)
reps[i] = coder->opts[pos_prev].backs[i - 1]; reps[i] = coder->opts[pos_prev].backs[i - 1];
} }
} }
coder->opts[cur].state = state; coder->opts[cur].state = state;
for (uint32_t i = 0; i < REP_DISTANCES; ++i) for (i = 0; i < REP_DISTANCES; ++i)
coder->opts[cur].backs[i] = reps[i]; coder->opts[cur].backs[i] = reps[i];
const uint32_t cur_price = coder->opts[cur].price; cur_price = coder->opts[cur].price;
const uint8_t current_byte = *buf; current_byte = *buf;
const uint8_t match_byte = *(buf - reps[0] - 1); match_byte = *(buf - reps[0] - 1);
const uint32_t pos_state = position & coder->pos_mask; pos_state = position & coder->pos_mask;
const uint32_t cur_and_1_price = cur_price cur_and_1_price = cur_price
+ rc_bit_0_price(coder->is_match[state][pos_state]) + rc_bit_0_price(coder->is_match[state][pos_state])
+ get_literal_price(coder, position, buf[-1], + get_literal_price(coder, position, buf[-1],
!is_literal_state(state), match_byte, current_byte); !is_literal_state(state), match_byte, current_byte);
bool next_is_literal = false;
if (cur_and_1_price < coder->opts[cur + 1].price) { if (cur_and_1_price < coder->opts[cur + 1].price) {
coder->opts[cur + 1].price = cur_and_1_price; coder->opts[cur + 1].price = cur_and_1_price;
coder->opts[cur + 1].pos_prev = cur; coder->opts[cur + 1].pos_prev = cur;
@ -542,9 +579,9 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
next_is_literal = true; next_is_literal = true;
} }
const uint32_t match_price = cur_price match_price = cur_price
+ rc_bit_1_price(coder->is_match[state][pos_state]); + rc_bit_1_price(coder->is_match[state][pos_state]);
const uint32_t rep_match_price = match_price rep_match_price = match_price
+ rc_bit_1_price(coder->is_rep[state]); + rc_bit_1_price(coder->is_rep[state]);
if (match_byte == current_byte if (match_byte == current_byte
@ -565,7 +602,7 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
if (buf_avail_full < 2) if (buf_avail_full < 2)
return len_end; return len_end;
const uint32_t buf_avail = my_min(buf_avail_full, nice_len); buf_avail = my_min(buf_avail_full, nice_len);
if (!next_is_literal && match_byte != current_byte) { // speed optimization if (!next_is_literal && match_byte != current_byte) { // speed optimization
// try literal + rep0 // try literal + rep0
@ -579,21 +616,26 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
--len_test; --len_test;
if (len_test >= 2) { if (len_test >= 2) {
uint32_t pos_state_next;
uint32_t next_rep_match_price;
uint32_t offset;
uint32_t cur_and_len_price;
lzma_lzma_state state_2 = state; lzma_lzma_state state_2 = state;
update_literal(state_2); update_literal(state_2);
const uint32_t pos_state_next = (position + 1) & coder->pos_mask; pos_state_next = (position + 1) & coder->pos_mask;
const uint32_t next_rep_match_price = cur_and_1_price next_rep_match_price = cur_and_1_price
+ rc_bit_1_price(coder->is_match[state_2][pos_state_next]) + rc_bit_1_price(coder->is_match[state_2][pos_state_next])
+ rc_bit_1_price(coder->is_rep[state_2]); + rc_bit_1_price(coder->is_rep[state_2]);
//for (; len_test >= 2; --len_test) { //for (; len_test >= 2; --len_test) {
const uint32_t offset = cur + 1 + len_test; offset = cur + 1 + len_test;
while (len_end < offset) while (len_end < offset)
coder->opts[++len_end].price = RC_INFINITY_PRICE; coder->opts[++len_end].price = RC_INFINITY_PRICE;
const uint32_t cur_and_len_price = next_rep_match_price cur_and_len_price = next_rep_match_price
+ get_rep_price(coder, 0, len_test, + get_rep_price(coder, 0, len_test,
state_2, pos_state_next); state_2, pos_state_next);
@ -609,14 +651,14 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
} }
uint32_t start_len = 2; // speed optimization for (rep_index = 0; rep_index < REP_DISTANCES; ++rep_index) {
uint32_t len_test, len_test_2, len_test_temp;
uint32_t price, limit;
for (uint32_t rep_index = 0; rep_index < REP_DISTANCES; ++rep_index) {
const uint8_t *const buf_back = buf - reps[rep_index] - 1; const uint8_t *const buf_back = buf - reps[rep_index] - 1;
if (not_equal_16(buf, buf_back)) if (not_equal_16(buf, buf_back))
continue; continue;
uint32_t len_test;
for (len_test = 2; len_test < buf_avail for (len_test = 2; len_test < buf_avail
&& buf[len_test] == buf_back[len_test]; && buf[len_test] == buf_back[len_test];
++len_test) ; ++len_test) ;
@ -624,8 +666,8 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
while (len_end < cur + len_test) while (len_end < cur + len_test)
coder->opts[++len_end].price = RC_INFINITY_PRICE; coder->opts[++len_end].price = RC_INFINITY_PRICE;
const uint32_t len_test_temp = len_test; len_test_temp = len_test;
const uint32_t price = rep_match_price + get_pure_rep_price( price = rep_match_price + get_pure_rep_price(
coder, rep_index, state, pos_state); coder, rep_index, state, pos_state);
do { do {
@ -647,8 +689,8 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
start_len = len_test + 1; start_len = len_test + 1;
uint32_t len_test_2 = len_test + 1; len_test_2 = len_test + 1;
const uint32_t limit = my_min(buf_avail_full, limit = my_min(buf_avail_full,
len_test_2 + nice_len); len_test_2 + nice_len);
for (; len_test_2 < limit for (; len_test_2 < limit
&& buf[len_test_2] == buf_back[len_test_2]; && buf[len_test_2] == buf_back[len_test_2];
@ -657,12 +699,18 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
len_test_2 -= len_test + 1; len_test_2 -= len_test + 1;
if (len_test_2 >= 2) { if (len_test_2 >= 2) {
uint32_t pos_state_next;
uint32_t cur_and_len_literal_price;
uint32_t next_rep_match_price;
uint32_t offset;
uint32_t cur_and_len_price;
lzma_lzma_state state_2 = state; lzma_lzma_state state_2 = state;
update_long_rep(state_2); update_long_rep(state_2);
uint32_t pos_state_next = (position + len_test) & coder->pos_mask; pos_state_next = (position + len_test) & coder->pos_mask;
const uint32_t cur_and_len_literal_price = price cur_and_len_literal_price = price
+ get_len_price(&coder->rep_len_encoder, + get_len_price(&coder->rep_len_encoder,
len_test, pos_state) len_test, pos_state)
+ rc_bit_0_price(coder->is_match[state_2][pos_state_next]) + rc_bit_0_price(coder->is_match[state_2][pos_state_next])
@ -674,17 +722,17 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
pos_state_next = (position + len_test + 1) & coder->pos_mask; pos_state_next = (position + len_test + 1) & coder->pos_mask;
const uint32_t next_rep_match_price = cur_and_len_literal_price next_rep_match_price = cur_and_len_literal_price
+ rc_bit_1_price(coder->is_match[state_2][pos_state_next]) + rc_bit_1_price(coder->is_match[state_2][pos_state_next])
+ rc_bit_1_price(coder->is_rep[state_2]); + rc_bit_1_price(coder->is_rep[state_2]);
//for(; len_test_2 >= 2; len_test_2--) { //for(; len_test_2 >= 2; len_test_2--) {
const uint32_t offset = cur + len_test + 1 + len_test_2; offset = cur + len_test + 1 + len_test_2;
while (len_end < offset) while (len_end < offset)
coder->opts[++len_end].price = RC_INFINITY_PRICE; coder->opts[++len_end].price = RC_INFINITY_PRICE;
const uint32_t cur_and_len_price = next_rep_match_price cur_and_len_price = next_rep_match_price
+ get_rep_price(coder, 0, len_test_2, + get_rep_price(coder, 0, len_test_2,
state_2, pos_state_next); state_2, pos_state_next);
@ -715,17 +763,19 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
if (new_len >= start_len) { if (new_len >= start_len) {
uint32_t len_test;
uint32_t i = 0;
const uint32_t normal_match_price = match_price const uint32_t normal_match_price = match_price
+ rc_bit_0_price(coder->is_rep[state]); + rc_bit_0_price(coder->is_rep[state]);
while (len_end < cur + new_len) while (len_end < cur + new_len)
coder->opts[++len_end].price = RC_INFINITY_PRICE; coder->opts[++len_end].price = RC_INFINITY_PRICE;
uint32_t i = 0;
while (start_len > coder->matches[i].len) while (start_len > coder->matches[i].len)
++i; ++i;
for (uint32_t len_test = start_len; ; ++len_test) { for (len_test = start_len; ; ++len_test) {
const uint32_t cur_back = coder->matches[i].dist; const uint32_t cur_back = coder->matches[i].dist;
uint32_t cur_and_len_price = normal_match_price uint32_t cur_and_len_price = normal_match_price
+ get_pos_len_price(coder, + get_pos_len_price(coder,
@ -753,12 +803,16 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
len_test_2 -= len_test + 1; len_test_2 -= len_test + 1;
if (len_test_2 >= 2) { if (len_test_2 >= 2) {
uint32_t pos_state_next;
uint32_t cur_and_len_literal_price;
uint32_t next_rep_match_price;
uint32_t offset;
lzma_lzma_state state_2 = state; lzma_lzma_state state_2 = state;
update_match(state_2); update_match(state_2);
uint32_t pos_state_next pos_state_next = (position + len_test) & coder->pos_mask;
= (position + len_test) & coder->pos_mask;
const uint32_t cur_and_len_literal_price = cur_and_len_price cur_and_len_literal_price = cur_and_len_price
+ rc_bit_0_price( + rc_bit_0_price(
coder->is_match[state_2][pos_state_next]) coder->is_match[state_2][pos_state_next])
+ get_literal_price(coder, + get_literal_price(coder,
@ -771,14 +825,14 @@ helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
update_literal(state_2); update_literal(state_2);
pos_state_next = (pos_state_next + 1) & coder->pos_mask; pos_state_next = (pos_state_next + 1) & coder->pos_mask;
const uint32_t next_rep_match_price next_rep_match_price
= cur_and_len_literal_price = cur_and_len_literal_price
+ rc_bit_1_price( + rc_bit_1_price(
coder->is_match[state_2][pos_state_next]) coder->is_match[state_2][pos_state_next])
+ rc_bit_1_price(coder->is_rep[state_2]); + rc_bit_1_price(coder->is_rep[state_2]);
// for(; len_test_2 >= 2; --len_test_2) { // for(; len_test_2 >= 2; --len_test_2) {
const uint32_t offset = cur + len_test + 1 + len_test_2; offset = cur + len_test + 1 + len_test_2;
while (len_end < offset) while (len_end < offset)
coder->opts[++len_end].price = RC_INFINITY_PRICE; coder->opts[++len_end].price = RC_INFINITY_PRICE;
@ -815,6 +869,10 @@ lzma_lzma_optimum_normal(lzma_coder *restrict coder, lzma_mf *restrict mf,
uint32_t *restrict back_res, uint32_t *restrict len_res, uint32_t *restrict back_res, uint32_t *restrict len_res,
uint32_t position) uint32_t position)
{ {
uint32_t reps[REP_DISTANCES];
uint32_t len_end;
uint32_t cur;
// If we have symbols pending, return the next pending symbol. // If we have symbols pending, return the next pending symbol.
if (coder->opts_end_index != coder->opts_current_index) { if (coder->opts_end_index != coder->opts_current_index) {
assert(mf->read_ahead > 0); assert(mf->read_ahead > 0);
@ -841,14 +899,13 @@ lzma_lzma_optimum_normal(lzma_coder *restrict coder, lzma_mf *restrict mf,
// the original function into two pieces makes it at least a little // the original function into two pieces makes it at least a little
// more readable, since those two parts don't share many variables. // more readable, since those two parts don't share many variables.
uint32_t len_end = helper1(coder, mf, back_res, len_res, position); len_end = helper1(coder, mf, back_res, len_res, position);
if (len_end == UINT32_MAX) if (len_end == UINT32_MAX)
return; return;
uint32_t reps[REP_DISTANCES];
memcpy(reps, coder->reps, sizeof(reps)); memcpy(reps, coder->reps, sizeof(reps));
uint32_t cur;
for (cur = 1; cur < len_end; ++cur) { for (cur = 1; cur < len_end; ++cur) {
assert(cur < OPTS); assert(cur < OPTS);

View File

@ -16,6 +16,9 @@
extern LZMA_API(lzma_bool) extern LZMA_API(lzma_bool)
lzma_lzma_preset(lzma_options_lzma *options, uint32_t preset) lzma_lzma_preset(lzma_options_lzma *options, uint32_t preset)
{ {
static const uint8_t dict_size_values[] = { 18, 20, 21, 22, 22, 23, 23, 24, 25, 26 };
static const uint8_t depth_values[] = { 4, 8, 24, 48 };
const uint32_t level = preset & LZMA_PRESET_LEVEL_MASK; const uint32_t level = preset & LZMA_PRESET_LEVEL_MASK;
const uint32_t flags = preset & ~LZMA_PRESET_LEVEL_MASK; const uint32_t flags = preset & ~LZMA_PRESET_LEVEL_MASK;
const uint32_t supported_flags = LZMA_PRESET_EXTREME; const uint32_t supported_flags = LZMA_PRESET_EXTREME;
@ -30,14 +33,13 @@ lzma_lzma_preset(lzma_options_lzma *options, uint32_t preset)
options->lp = LZMA_LP_DEFAULT; options->lp = LZMA_LP_DEFAULT;
options->pb = LZMA_PB_DEFAULT; options->pb = LZMA_PB_DEFAULT;
options->dict_size = UINT32_C(1) << (uint8_t []){ options->dict_size = UINT32_C(1) << dict_size_values[level];
18, 20, 21, 22, 22, 23, 23, 24, 25, 26 }[level];
if (level <= 3) { if (level <= 3) {
options->mode = LZMA_MODE_FAST; options->mode = LZMA_MODE_FAST;
options->mf = level == 0 ? LZMA_MF_HC3 : LZMA_MF_HC4; options->mf = level == 0 ? LZMA_MF_HC3 : LZMA_MF_HC4;
options->nice_len = level <= 1 ? 128 : 273; options->nice_len = level <= 1 ? 128 : 273;
options->depth = (uint8_t []){ 4, 8, 24, 48 }[level]; options->depth = depth_values[level];
} else { } else {
options->mode = LZMA_MODE_NORMAL; options->mode = LZMA_MODE_NORMAL;
options->mf = LZMA_MF_BT4; options->mf = LZMA_MF_BT4;

View File

@ -40,8 +40,11 @@
// This does the same for a complete bit tree. // This does the same for a complete bit tree.
// (A tree represented as an array.) // (A tree represented as an array.)
#define bittree_reset(probs, bit_levels) \ #define bittree_reset(probs, bit_levels) \
for (uint32_t bt_i = 0; bt_i < (1 << (bit_levels)); ++bt_i) \ do { \
bit_reset((probs)[bt_i]) uint32_t bt_i; \
for (bt_i = 0; bt_i < (1 << (bit_levels)); ++bt_i) \
bit_reset((probs)[bt_i]); \
} while (0)
////////////////////// //////////////////////

View File

@ -115,7 +115,8 @@ rc_direct(lzma_range_encoder *rc,
static inline void static inline void
rc_flush(lzma_range_encoder *rc) rc_flush(lzma_range_encoder *rc)
{ {
for (size_t i = 0; i < 5; ++i) size_t i;
for (i = 0; i < 5; ++i)
rc->symbols[rc->count++] = RC_FLUSH; rc->symbols[rc->count++] = RC_FLUSH;
} }

View File

@ -22,12 +22,12 @@ arm_code(lzma_simple *simple lzma_attribute((__unused__)),
size_t i; size_t i;
for (i = 0; i + 4 <= size; i += 4) { for (i = 0; i + 4 <= size; i += 4) {
if (buffer[i + 3] == 0xEB) { if (buffer[i + 3] == 0xEB) {
uint32_t dest;
uint32_t src = (buffer[i + 2] << 16) uint32_t src = (buffer[i + 2] << 16)
| (buffer[i + 1] << 8) | (buffer[i + 1] << 8)
| (buffer[i + 0]); | (buffer[i + 0]);
src <<= 2; src <<= 2;
uint32_t dest;
if (is_encoder) if (is_encoder)
dest = now_pos + (uint32_t)(i) + 8 + src; dest = now_pos + (uint32_t)(i) + 8 + src;
else else

View File

@ -23,6 +23,7 @@ armthumb_code(lzma_simple *simple lzma_attribute((__unused__)),
for (i = 0; i + 4 <= size; i += 2) { for (i = 0; i + 4 <= size; i += 2) {
if ((buffer[i + 1] & 0xF8) == 0xF0 if ((buffer[i + 1] & 0xF8) == 0xF0
&& (buffer[i + 3] & 0xF8) == 0xF8) { && (buffer[i + 3] & 0xF8) == 0xF8) {
uint32_t dest;
uint32_t src = ((buffer[i + 1] & 0x7) << 19) uint32_t src = ((buffer[i + 1] & 0x7) << 19)
| (buffer[i + 0] << 11) | (buffer[i + 0] << 11)
| ((buffer[i + 3] & 0x7) << 8) | ((buffer[i + 3] & 0x7) << 8)
@ -30,7 +31,6 @@ armthumb_code(lzma_simple *simple lzma_attribute((__unused__)),
src <<= 1; src <<= 1;
uint32_t dest;
if (is_encoder) if (is_encoder)
dest = now_pos + (uint32_t)(i) + 4 + src; dest = now_pos + (uint32_t)(i) + 4 + src;
else else

View File

@ -28,36 +28,42 @@ ia64_code(lzma_simple *simple lzma_attribute((__unused__)),
size_t i; size_t i;
for (i = 0; i + 16 <= size; i += 16) { for (i = 0; i + 16 <= size; i += 16) {
size_t slot;
const uint32_t instr_template = buffer[i] & 0x1F; const uint32_t instr_template = buffer[i] & 0x1F;
const uint32_t mask = BRANCH_TABLE[instr_template]; const uint32_t mask = BRANCH_TABLE[instr_template];
uint32_t bit_pos = 5; uint32_t bit_pos = 5;
for (size_t slot = 0; slot < 3; ++slot, bit_pos += 41) { for (slot = 0; slot < 3; ++slot, bit_pos += 41) {
if (((mask >> slot) & 1) == 0)
continue;
const size_t byte_pos = (bit_pos >> 3); const size_t byte_pos = (bit_pos >> 3);
const uint32_t bit_res = bit_pos & 0x7; const uint32_t bit_res = bit_pos & 0x7;
uint64_t instruction = 0; uint64_t instruction = 0;
uint64_t inst_norm;
size_t j;
for (size_t j = 0; j < 6; ++j) if (((mask >> slot) & 1) == 0)
continue;
for (j = 0; j < 6; ++j)
instruction += (uint64_t)( instruction += (uint64_t)(
buffer[i + j + byte_pos]) buffer[i + j + byte_pos])
<< (8 * j); << (8 * j);
uint64_t inst_norm = instruction >> bit_res; inst_norm = instruction >> bit_res;
if (((inst_norm >> 37) & 0xF) == 0x5 if (((inst_norm >> 37) & 0xF) == 0x5
&& ((inst_norm >> 9) & 0x7) == 0 && ((inst_norm >> 9) & 0x7) == 0
/* && (inst_norm & 0x3F)== 0 */ /* && (inst_norm & 0x3F)== 0 */
) { ) {
uint32_t dest;
size_t j;
uint32_t src = (uint32_t)( uint32_t src = (uint32_t)(
(inst_norm >> 13) & 0xFFFFF); (inst_norm >> 13) & 0xFFFFF);
src |= ((inst_norm >> 36) & 1) << 20; src |= ((inst_norm >> 36) & 1) << 20;
src <<= 4; src <<= 4;
uint32_t dest;
if (is_encoder) if (is_encoder)
dest = now_pos + (uint32_t)(i) + src; dest = now_pos + (uint32_t)(i) + src;
else else
@ -73,7 +79,7 @@ ia64_code(lzma_simple *simple lzma_attribute((__unused__)),
instruction &= (1 << bit_res) - 1; instruction &= (1 << bit_res) - 1;
instruction |= (inst_norm << bit_res); instruction |= (inst_norm << bit_res);
for (size_t j = 0; j < 6; j++) for (j = 0; j < 6; j++)
buffer[i + j + byte_pos] = (uint8_t)( buffer[i + j + byte_pos] = (uint8_t)(
instruction instruction
>> (8 * j)); >> (8 * j));

View File

@ -71,6 +71,9 @@ simple_code(lzma_coder *coder, lzma_allocator *allocator,
size_t in_size, uint8_t *restrict out, size_t in_size, uint8_t *restrict out,
size_t *restrict out_pos, size_t out_size, lzma_action action) size_t *restrict out_pos, size_t out_size, lzma_action action)
{ {
size_t out_avail;
size_t buf_avail;
// TODO: Add partial support for LZMA_SYNC_FLUSH. We can support it // TODO: Add partial support for LZMA_SYNC_FLUSH. We can support it
// in cases when the filter is able to filter everything. With most // in cases when the filter is able to filter everything. With most
// simple filters it can be done at offset that is a multiple of 2, // simple filters it can be done at offset that is a multiple of 2,
@ -105,9 +108,13 @@ simple_code(lzma_coder *coder, lzma_allocator *allocator,
// more data to out[] hopefully filling it completely. Then filter // more data to out[] hopefully filling it completely. Then filter
// the data in out[]. This step is where most of the data gets // the data in out[]. This step is where most of the data gets
// filtered if the buffer sizes used by the application are reasonable. // filtered if the buffer sizes used by the application are reasonable.
const size_t out_avail = out_size - *out_pos; out_avail = out_size - *out_pos;
const size_t buf_avail = coder->size - coder->pos; buf_avail = coder->size - coder->pos;
if (out_avail > buf_avail || buf_avail == 0) { if (out_avail > buf_avail || buf_avail == 0) {
size_t size;
size_t filtered;
size_t unfiltered;
// Store the old position so that we know from which byte // Store the old position so that we know from which byte
// to start filtering. // to start filtering.
const size_t out_start = *out_pos; const size_t out_start = *out_pos;
@ -130,11 +137,10 @@ simple_code(lzma_coder *coder, lzma_allocator *allocator,
} }
// Filter out[]. // Filter out[].
const size_t size = *out_pos - out_start; size = *out_pos - out_start;
const size_t filtered = call_filter( filtered = call_filter(coder, out + out_start, size);
coder, out + out_start, size);
const size_t unfiltered = size - filtered; unfiltered = size - filtered;
assert(unfiltered <= coder->allocated / 2); assert(unfiltered <= coder->allocated / 2);
// Now we can update coder->pos and coder->size, because // Now we can update coder->pos and coder->size, because

View File

@ -17,14 +17,15 @@ extern lzma_ret
lzma_simple_props_decode(void **options, lzma_allocator *allocator, lzma_simple_props_decode(void **options, lzma_allocator *allocator,
const uint8_t *props, size_t props_size) const uint8_t *props, size_t props_size)
{ {
lzma_options_bcj *opt;
if (props_size == 0) if (props_size == 0)
return LZMA_OK; return LZMA_OK;
if (props_size != 4) if (props_size != 4)
return LZMA_OPTIONS_ERROR; return LZMA_OPTIONS_ERROR;
lzma_options_bcj *opt = lzma_alloc( opt = lzma_alloc(sizeof(lzma_options_bcj), allocator);
sizeof(lzma_options_bcj), allocator);
if (opt == NULL) if (opt == NULL)
return LZMA_MEM_ERROR; return LZMA_MEM_ERROR;

View File

@ -26,6 +26,8 @@ sparc_code(lzma_simple *simple lzma_attribute((__unused__)),
|| (buffer[i] == 0x7F || (buffer[i] == 0x7F
&& (buffer[i + 1] & 0xC0) == 0xC0)) { && (buffer[i + 1] & 0xC0) == 0xC0)) {
uint32_t dest;
uint32_t src = ((uint32_t)buffer[i + 0] << 24) uint32_t src = ((uint32_t)buffer[i + 0] << 24)
| ((uint32_t)buffer[i + 1] << 16) | ((uint32_t)buffer[i + 1] << 16)
| ((uint32_t)buffer[i + 2] << 8) | ((uint32_t)buffer[i + 2] << 8)
@ -33,7 +35,6 @@ sparc_code(lzma_simple *simple lzma_attribute((__unused__)),
src <<= 2; src <<= 2;
uint32_t dest;
if (is_encoder) if (is_encoder)
dest = now_pos + (uint32_t)(i) + src; dest = now_pos + (uint32_t)(i) + src;
else else

View File

@ -36,30 +36,36 @@ x86_code(lzma_simple *simple, uint32_t now_pos, bool is_encoder,
uint32_t prev_mask = simple->prev_mask; uint32_t prev_mask = simple->prev_mask;
uint32_t prev_pos = simple->prev_pos; uint32_t prev_pos = simple->prev_pos;
size_t limit;
size_t buffer_pos;
if (size < 5) if (size < 5)
return 0; return 0;
if (now_pos - prev_pos > 5) if (now_pos - prev_pos > 5)
prev_pos = now_pos - 5; prev_pos = now_pos - 5;
const size_t limit = size - 5; limit = size - 5;
size_t buffer_pos = 0; buffer_pos = 0;
while (buffer_pos <= limit) { while (buffer_pos <= limit) {
uint32_t offset;
uint32_t i;
uint8_t b = buffer[buffer_pos]; uint8_t b = buffer[buffer_pos];
if (b != 0xE8 && b != 0xE9) { if (b != 0xE8 && b != 0xE9) {
++buffer_pos; ++buffer_pos;
continue; continue;
} }
const uint32_t offset = now_pos + (uint32_t)(buffer_pos) offset = now_pos + (uint32_t)(buffer_pos)
- prev_pos; - prev_pos;
prev_pos = now_pos + (uint32_t)(buffer_pos); prev_pos = now_pos + (uint32_t)(buffer_pos);
if (offset > 5) { if (offset > 5) {
prev_mask = 0; prev_mask = 0;
} else { } else {
for (uint32_t i = 0; i < offset; ++i) { for (i = 0; i < offset; ++i) {
prev_mask &= 0x77; prev_mask &= 0x77;
prev_mask <<= 1; prev_mask <<= 1;
} }
@ -78,6 +84,8 @@ x86_code(lzma_simple *simple, uint32_t now_pos, bool is_encoder,
uint32_t dest; uint32_t dest;
while (true) { while (true) {
uint32_t i;
if (is_encoder) if (is_encoder)
dest = src + (now_pos + (uint32_t)( dest = src + (now_pos + (uint32_t)(
buffer_pos) + 5); buffer_pos) + 5);
@ -88,8 +96,7 @@ x86_code(lzma_simple *simple, uint32_t now_pos, bool is_encoder,
if (prev_mask == 0) if (prev_mask == 0)
break; break;
const uint32_t i = MASK_TO_BIT_NUMBER[ i = MASK_TO_BIT_NUMBER[prev_mask >> 1];
prev_mask >> 1];
b = (uint8_t)(dest >> (24 - i * 8)); b = (uint8_t)(dest >> (24 - i * 8));