mirror of
https://github.com/odin-lang/Odin.git
synced 2026-02-15 23:54:07 +00:00
ZLIB: cleanup.
This commit is contained in:
@@ -132,17 +132,16 @@ Context_Memory_Input :: struct #packed {
|
||||
output: ^bytes.Buffer,
|
||||
bytes_written: i64,
|
||||
|
||||
code_buffer: u64,
|
||||
num_bits: u64,
|
||||
code_buffer: u64,
|
||||
num_bits: u64,
|
||||
|
||||
/*
|
||||
If we know the data size, we can optimize the reads and writes.
|
||||
*/
|
||||
size_packed: i64,
|
||||
size_unpacked: i64,
|
||||
|
||||
padding: [1]u8,
|
||||
size_packed: i64,
|
||||
size_unpacked: i64,
|
||||
}
|
||||
#assert(size_of(Context_Memory_Input) == 64);
|
||||
|
||||
Context_Stream_Input :: struct #packed {
|
||||
input_data: []u8,
|
||||
@@ -150,14 +149,14 @@ Context_Stream_Input :: struct #packed {
|
||||
output: ^bytes.Buffer,
|
||||
bytes_written: i64,
|
||||
|
||||
code_buffer: u64,
|
||||
num_bits: u64,
|
||||
code_buffer: u64,
|
||||
num_bits: u64,
|
||||
|
||||
/*
|
||||
If we know the data size, we can optimize the reads and writes.
|
||||
*/
|
||||
size_packed: i64,
|
||||
size_unpacked: i64,
|
||||
size_packed: i64,
|
||||
size_unpacked: i64,
|
||||
|
||||
/*
|
||||
Flags:
|
||||
@@ -170,11 +169,8 @@ Context_Stream_Input :: struct #packed {
|
||||
padding: [1]u8,
|
||||
}
|
||||
|
||||
// Stream helpers
|
||||
/*
|
||||
TODO: These need to be optimized.
|
||||
|
||||
Streams should really only check if a certain method is available once, perhaps even during setup.
|
||||
TODO: The stream versions should really only check if a certain method is available once, perhaps even during setup.
|
||||
|
||||
Bit and byte readers may be merged so that reading bytes will grab them from the bit buffer first.
|
||||
This simplifies end-of-stream handling where bits may be left in the bit buffer.
|
||||
@@ -256,6 +252,25 @@ read_u8_from_stream :: #force_inline proc(z: ^Context_Stream_Input) -> (res: u8,
|
||||
|
||||
read_u8 :: proc{read_u8_from_memory, read_u8_from_stream};
|
||||
|
||||
/*
|
||||
You would typically only use this at the end of Inflate, to drain bits from the code buffer
|
||||
preferentially.
|
||||
*/
|
||||
@(optimization_mode="speed")
|
||||
read_u8_prefer_code_buffer_lsb :: #force_inline proc(z: ^$C) -> (res: u8, err: io.Error) {
|
||||
if z.num_bits >= 8 {
|
||||
res = u8(read_bits_no_refill_lsb(z, 8));
|
||||
} else {
|
||||
size, _ := input_size(z);
|
||||
if size > 0 {
|
||||
res, err = read_u8(z);
|
||||
} else {
|
||||
err = .EOF;
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@(optimization_mode="speed")
|
||||
peek_data_from_memory :: #force_inline proc(z: ^Context_Memory_Input, $T: typeid) -> (res: T, err: io.Error) {
|
||||
size :: size_of(T);
|
||||
@@ -314,28 +329,27 @@ peek_back_byte :: #force_inline proc(z: ^$C, offset: i64) -> (res: u8, err: io.E
|
||||
|
||||
// Generalized bit reader LSB
|
||||
@(optimization_mode="speed")
|
||||
refill_lsb_from_memory :: proc(z: ^Context_Memory_Input, width := i8(24)) {
|
||||
refill_lsb_from_memory :: #force_inline proc(z: ^Context_Memory_Input, width := i8(48)) {
|
||||
refill := u64(width);
|
||||
b := u64(0);
|
||||
|
||||
if z.num_bits > refill {
|
||||
return;
|
||||
}
|
||||
|
||||
for {
|
||||
if len(z.input_data) != 0 {
|
||||
b = u64(z.input_data[0]);
|
||||
z.input_data = z.input_data[1:];
|
||||
} else {
|
||||
b = 0;
|
||||
}
|
||||
|
||||
z.code_buffer |= b << u8(z.num_bits);
|
||||
z.num_bits += 8;
|
||||
if z.num_bits > refill {
|
||||
break;
|
||||
}
|
||||
if z.code_buffer == 0 && z.num_bits > 63 {
|
||||
z.num_bits = 0;
|
||||
}
|
||||
if z.code_buffer >= 1 << uint(z.num_bits) {
|
||||
// Code buffer is malformed.
|
||||
z.num_bits = max(u64);
|
||||
return;
|
||||
}
|
||||
b, err := read_u8(z);
|
||||
if err != .None {
|
||||
// This is fine at the end of the file.
|
||||
return;
|
||||
}
|
||||
z.code_buffer |= (u64(b) << u8(z.num_bits));
|
||||
z.num_bits += 8;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -417,8 +431,8 @@ peek_bits_no_refill_lsb :: proc{peek_bits_no_refill_lsb_from_memory, peek_bits_n
|
||||
|
||||
@(optimization_mode="speed")
|
||||
read_bits_lsb_from_memory :: #force_inline proc(z: ^Context_Memory_Input, width: u8) -> u32 {
|
||||
k := peek_bits_lsb(z, width);
|
||||
consume_bits_lsb(z, width);
|
||||
k := #force_inline peek_bits_lsb(z, width);
|
||||
#force_inline consume_bits_lsb(z, width);
|
||||
return k;
|
||||
}
|
||||
|
||||
@@ -433,8 +447,8 @@ read_bits_lsb :: proc{read_bits_lsb_from_memory, read_bits_lsb_from_stream};
|
||||
|
||||
@(optimization_mode="speed")
|
||||
read_bits_no_refill_lsb_from_memory :: #force_inline proc(z: ^Context_Memory_Input, width: u8) -> u32 {
|
||||
k := peek_bits_no_refill_lsb(z, width);
|
||||
consume_bits_lsb(z, width);
|
||||
k := #force_inline peek_bits_no_refill_lsb(z, width);
|
||||
#force_inline consume_bits_lsb(z, width);
|
||||
return k;
|
||||
}
|
||||
|
||||
@@ -451,7 +465,7 @@ read_bits_no_refill_lsb :: proc{read_bits_no_refill_lsb_from_memory, read_bits_n
|
||||
@(optimization_mode="speed")
|
||||
discard_to_next_byte_lsb_from_memory :: proc(z: ^Context_Memory_Input) {
|
||||
discard := u8(z.num_bits & 7);
|
||||
consume_bits_lsb(z, discard);
|
||||
#force_inline consume_bits_lsb(z, discard);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -21,8 +21,6 @@ import "core:io"
|
||||
import "core:bytes"
|
||||
import "core:hash"
|
||||
|
||||
// import "core:fmt"
|
||||
|
||||
Magic :: enum u16le {
|
||||
GZIP = 0x8b << 8 | 0x1f,
|
||||
}
|
||||
@@ -346,26 +344,23 @@ load_from_context :: proc(z: ^$C, buf: ^bytes.Buffer, known_gzip_size := -1, exp
|
||||
|
||||
payload_crc_b: [4]u8;
|
||||
for _, i in payload_crc_b {
|
||||
if z.num_bits >= 8 {
|
||||
payload_crc_b[i] = u8(compress.read_bits_lsb(z, 8));
|
||||
} else {
|
||||
payload_crc_b[i], footer_error = compress.read_u8(z);
|
||||
}
|
||||
payload_crc_b[i], footer_error = compress.read_u8_prefer_code_buffer_lsb(z);
|
||||
}
|
||||
payload_crc := transmute(u32le)payload_crc_b;
|
||||
payload_u32le, footer_error = compress.read_data(z, u32le);
|
||||
|
||||
payload := bytes.buffer_to_bytes(buf);
|
||||
|
||||
// fmt.printf("GZIP payload: %v\n", string(payload));
|
||||
|
||||
crc32 := u32le(hash.crc32(payload));
|
||||
|
||||
crc32 := u32le(hash.crc32(payload));
|
||||
if crc32 != payload_crc {
|
||||
return E_GZIP.Payload_CRC_Invalid;
|
||||
}
|
||||
|
||||
if len(payload) != int(payload_u32le) {
|
||||
payload_len_b: [4]u8;
|
||||
for _, i in payload_len_b {
|
||||
payload_len_b[i], footer_error = compress.read_u8_prefer_code_buffer_lsb(z);
|
||||
}
|
||||
payload_len := transmute(u32le)payload_len_b;
|
||||
|
||||
if len(payload) != int(payload_len) {
|
||||
return E_GZIP.Payload_Length_Invalid;
|
||||
}
|
||||
return nil;
|
||||
|
||||
@@ -13,7 +13,6 @@ package zlib
|
||||
|
||||
import "core:bytes"
|
||||
import "core:fmt"
|
||||
import "core:compress"
|
||||
|
||||
main :: proc() {
|
||||
|
||||
@@ -38,8 +37,6 @@ main :: proc() {
|
||||
};
|
||||
OUTPUT_SIZE :: 438;
|
||||
|
||||
fmt.printf("size_of(Context): %v\n", size_of(compress.Context_Memory_Input));
|
||||
|
||||
buf: bytes.Buffer;
|
||||
|
||||
// We can pass ", true" to inflate a raw DEFLATE stream instead of a ZLIB wrapped one.
|
||||
|
||||
@@ -15,7 +15,6 @@ import "core:mem"
|
||||
import "core:io"
|
||||
import "core:hash"
|
||||
import "core:bytes"
|
||||
// import "core:fmt"
|
||||
|
||||
/*
|
||||
zlib.inflate decompresses a ZLIB stream passed in as a []u8 or io.Stream.
|
||||
@@ -147,7 +146,6 @@ grow_buffer :: proc(buf: ^[dynamic]u8) -> (err: compress.Error) {
|
||||
Double until we reach the maximum allowed.
|
||||
*/
|
||||
new_size := min(len(buf) << 1, compress.COMPRESS_OUTPUT_ALLOCATE_MAX);
|
||||
|
||||
resize(buf, new_size);
|
||||
if len(buf) != new_size {
|
||||
/*
|
||||
@@ -482,11 +480,16 @@ inflate_from_context :: proc(using ctx: ^compress.Context_Memory_Input, raw := f
|
||||
|
||||
if !raw {
|
||||
compress.discard_to_next_byte_lsb(ctx);
|
||||
adler32 := compress.read_bits_lsb(ctx, 8) << 24 | compress.read_bits_lsb(ctx, 8) << 16 | compress.read_bits_lsb(ctx, 8) << 8 | compress.read_bits_lsb(ctx, 8);
|
||||
|
||||
adler_b: [4]u8;
|
||||
for _, i in adler_b {
|
||||
adler_b[i], _ = compress.read_u8_prefer_code_buffer_lsb(ctx);
|
||||
}
|
||||
adler := transmute(u32be)adler_b;
|
||||
|
||||
output_hash := hash.adler32(ctx.output.buf[:]);
|
||||
|
||||
if output_hash != u32(adler32) {
|
||||
if output_hash != u32(adler) {
|
||||
return E_General.Checksum_Failed;
|
||||
}
|
||||
}
|
||||
@@ -684,7 +687,6 @@ inflate_raw :: proc(z: ^$C, expected_output_size := -1, allocator := context.all
|
||||
}
|
||||
}
|
||||
|
||||
// fmt.printf("ZLIB: Bytes written: %v\n", z.bytes_written);
|
||||
if int(z.bytes_written) != len(z.output.buf) {
|
||||
resize(&z.output.buf, int(z.bytes_written));
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ main :: proc() {
|
||||
demo :: proc() {
|
||||
file: string;
|
||||
|
||||
options := image.Options{.return_metadata};
|
||||
options := image.Options{}; // {.return_metadata};
|
||||
err: compress.Error;
|
||||
img: ^image.Image;
|
||||
|
||||
@@ -56,9 +56,9 @@ demo :: proc() {
|
||||
v: ^Info;
|
||||
|
||||
fmt.printf("Image: %vx%vx%v, %v-bit.\n", img.width, img.height, img.channels, img.depth);
|
||||
|
||||
if img.metadata_ptr != nil && img.metadata_type == Info {
|
||||
v = (^Info)(img.metadata_ptr);
|
||||
|
||||
// Handle ancillary chunks as you wish.
|
||||
// We provide helper functions for a few types.
|
||||
for c in v.chunks {
|
||||
|
||||
Reference in New Issue
Block a user