mirror of
https://github.com/odin-lang/Odin.git
synced 2026-01-08 22:13:17 +00:00
Add compress and image to core.
This commit is contained in:
203
core/compress/common.odin
Normal file
203
core/compress/common.odin
Normal file
@@ -0,0 +1,203 @@
|
||||
package compress
|
||||
|
||||
import "core:io"
|
||||
import "core:image"
|
||||
|
||||
// Error helper, e.g. is_kind(err, General_Error.OK);
|
||||
is_kind :: proc(u: $U, x: $V) -> bool {
|
||||
v, ok := u.(V);
|
||||
return ok && v == x;
|
||||
}
|
||||
|
||||
Error :: union {
|
||||
General_Error,
|
||||
Deflate_Error,
|
||||
ZLIB_Error,
|
||||
GZIP_Error,
|
||||
ZIP_Error,
|
||||
/*
|
||||
This is here because png.load will return a this type of error union,
|
||||
as it may involve an I/O error, a Deflate error, etc.
|
||||
*/
|
||||
image.PNG_Error,
|
||||
}
|
||||
|
||||
General_Error :: enum {
|
||||
OK = 0,
|
||||
File_Not_Found,
|
||||
Cannot_Open_File,
|
||||
File_Too_Short,
|
||||
Stream_Too_Short,
|
||||
Output_Too_Short,
|
||||
Unknown_Compression_Method,
|
||||
Checksum_Failed,
|
||||
Incompatible_Options,
|
||||
Unimplemented,
|
||||
}
|
||||
|
||||
GZIP_Error :: enum {
|
||||
Invalid_GZIP_Signature,
|
||||
Reserved_Flag_Set,
|
||||
Invalid_Extra_Data,
|
||||
Original_Name_Too_Long,
|
||||
Comment_Too_Long,
|
||||
Payload_Length_Invalid,
|
||||
Payload_CRC_Invalid,
|
||||
}
|
||||
|
||||
ZIP_Error :: enum {
|
||||
Invalid_ZIP_File_Signature,
|
||||
Unexpected_Signature,
|
||||
Insert_Next_Disk,
|
||||
Expected_End_of_Central_Directory_Record,
|
||||
}
|
||||
|
||||
ZLIB_Error :: enum {
|
||||
Unsupported_Window_Size,
|
||||
FDICT_Unsupported,
|
||||
Unsupported_Compression_Level,
|
||||
Code_Buffer_Malformed,
|
||||
}
|
||||
|
||||
Deflate_Error :: enum {
|
||||
Huffman_Bad_Sizes,
|
||||
Huffman_Bad_Code_Lengths,
|
||||
Inflate_Error,
|
||||
Bad_Distance,
|
||||
Bad_Huffman_Code,
|
||||
Len_Nlen_Mismatch,
|
||||
BType_3,
|
||||
}
|
||||
|
||||
// General context for ZLIB, LZW, etc.
|
||||
Context :: struct {
|
||||
code_buffer: u32,
|
||||
num_bits: i8,
|
||||
/*
|
||||
num_bits will be set to -100 if the buffer is malformed
|
||||
*/
|
||||
eof: b8,
|
||||
|
||||
input: io.Stream,
|
||||
output: io.Stream,
|
||||
bytes_written: i64,
|
||||
// Used to update hash as we write instead of all at once
|
||||
rolling_hash: u32,
|
||||
|
||||
// Sliding window buffer. Size must be a power of two.
|
||||
window_size: i64,
|
||||
last: ^[dynamic]byte,
|
||||
}
|
||||
|
||||
// Stream helpers
|
||||
/*
|
||||
TODO: These need to be optimized.
|
||||
|
||||
Streams should really only check if a certain method is available once, perhaps even during setup.
|
||||
|
||||
Bit and byte readers may be merged so that reading bytes will grab them from the bit buffer first.
|
||||
This simplifies end-of-stream handling where bits may be left in the bit buffer.
|
||||
*/
|
||||
|
||||
read_data :: #force_inline proc(c: ^Context, $T: typeid) -> (res: T, err: io.Error) {
|
||||
b := make([]u8, size_of(T), context.temp_allocator);
|
||||
r, e1 := io.to_reader(c.input);
|
||||
_, e2 := io.read(r, b);
|
||||
if !e1 || e2 != .None {
|
||||
return T{}, e2;
|
||||
}
|
||||
|
||||
res = (^T)(raw_data(b))^;
|
||||
return res, .None;
|
||||
}
|
||||
|
||||
read_u8 :: #force_inline proc(z: ^Context) -> (res: u8, err: io.Error) {
|
||||
return read_data(z, u8);
|
||||
}
|
||||
|
||||
peek_data :: #force_inline proc(c: ^Context, $T: typeid) -> (res: T, err: io.Error) {
|
||||
// Get current position to read from.
|
||||
curr, e1 := c.input->impl_seek(0, .Current);
|
||||
if e1 != .None {
|
||||
return T{}, e1;
|
||||
}
|
||||
r, e2 := io.to_reader_at(c.input);
|
||||
if !e2 {
|
||||
return T{}, .Empty;
|
||||
}
|
||||
b := make([]u8, size_of(T), context.temp_allocator);
|
||||
_, e3 := io.read_at(r, b, curr);
|
||||
if e3 != .None {
|
||||
return T{}, .Empty;
|
||||
}
|
||||
|
||||
res = (^T)(raw_data(b))^;
|
||||
return res, .None;
|
||||
}
|
||||
|
||||
// Sliding window read back
|
||||
peek_back_byte :: proc(c: ^Context, offset: i64) -> (res: u8, err: io.Error) {
|
||||
// Look back into the sliding window.
|
||||
return c.last[offset % c.window_size], .None;
|
||||
}
|
||||
|
||||
// Generalized bit reader LSB
|
||||
refill_lsb :: proc(z: ^Context, width := i8(24)) {
|
||||
for {
|
||||
if z.num_bits > width {
|
||||
break;
|
||||
}
|
||||
if z.code_buffer == 0 && z.num_bits == -1 {
|
||||
z.num_bits = 0;
|
||||
}
|
||||
if z.code_buffer >= 1 << uint(z.num_bits) {
|
||||
// Code buffer is malformed.
|
||||
z.num_bits = -100;
|
||||
return;
|
||||
}
|
||||
c, err := read_u8(z);
|
||||
if err != .None {
|
||||
// This is fine at the end of the file.
|
||||
z.num_bits = -42;
|
||||
z.eof = true;
|
||||
return;
|
||||
}
|
||||
z.code_buffer |= (u32(c) << u8(z.num_bits));
|
||||
z.num_bits += 8;
|
||||
}
|
||||
}
|
||||
|
||||
consume_bits_lsb :: #force_inline proc(z: ^Context, width: u8) {
|
||||
z.code_buffer >>= width;
|
||||
z.num_bits -= i8(width);
|
||||
}
|
||||
|
||||
peek_bits_lsb :: #force_inline proc(z: ^Context, width: u8) -> u32 {
|
||||
if z.num_bits < i8(width) {
|
||||
refill_lsb(z);
|
||||
}
|
||||
// assert(z.num_bits >= i8(width));
|
||||
return z.code_buffer & ~(~u32(0) << width);
|
||||
}
|
||||
|
||||
peek_bits_no_refill_lsb :: #force_inline proc(z: ^Context, width: u8) -> u32 {
|
||||
assert(z.num_bits >= i8(width));
|
||||
return z.code_buffer & ~(~u32(0) << width);
|
||||
}
|
||||
|
||||
read_bits_lsb :: #force_inline proc(z: ^Context, width: u8) -> u32 {
|
||||
k := peek_bits_lsb(z, width);
|
||||
consume_bits_lsb(z, width);
|
||||
return k;
|
||||
}
|
||||
|
||||
read_bits_no_refill_lsb :: #force_inline proc(z: ^Context, width: u8) -> u32 {
|
||||
k := peek_bits_no_refill_lsb(z, width);
|
||||
consume_bits_lsb(z, width);
|
||||
return k;
|
||||
}
|
||||
|
||||
discard_to_next_byte_lsb :: proc(z: ^Context) {
|
||||
discard := u8(z.num_bits & 7);
|
||||
consume_bits_lsb(z, discard);
|
||||
}
|
||||
70
core/compress/gzip/example.odin
Normal file
70
core/compress/gzip/example.odin
Normal file
@@ -0,0 +1,70 @@
|
||||
//+ignore
|
||||
package gzip
|
||||
|
||||
import "core:compress/gzip"
|
||||
import "core:bytes"
|
||||
import "core:os"
|
||||
|
||||
// Small GZIP file with fextra, fname and fcomment present.
|
||||
@private
|
||||
TEST: []u8 = {
|
||||
0x1f, 0x8b, 0x08, 0x1c, 0xcb, 0x3b, 0x3a, 0x5a,
|
||||
0x02, 0x03, 0x07, 0x00, 0x61, 0x62, 0x03, 0x00,
|
||||
0x63, 0x64, 0x65, 0x66, 0x69, 0x6c, 0x65, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x00, 0x54, 0x68, 0x69, 0x73,
|
||||
0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x63, 0x6f,
|
||||
0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x00, 0x2b, 0x48,
|
||||
0xac, 0xcc, 0xc9, 0x4f, 0x4c, 0x01, 0x00, 0x15,
|
||||
0x6a, 0x2c, 0x42, 0x07, 0x00, 0x00, 0x00,
|
||||
};
|
||||
|
||||
main :: proc() {
|
||||
// Set up output buffer.
|
||||
buf: bytes.Buffer;
|
||||
defer bytes.buffer_destroy(&buf);
|
||||
|
||||
stdout :: proc(s: string) {
|
||||
os.write_string(os.stdout, s);
|
||||
}
|
||||
stderr :: proc(s: string) {
|
||||
os.write_string(os.stderr, s);
|
||||
}
|
||||
|
||||
args := os.args;
|
||||
|
||||
if len(args) < 2 {
|
||||
stderr("No input file specified.\n");
|
||||
err := gzip.load(&TEST, &buf);
|
||||
if gzip.is_kind(err, gzip.E_General.OK) {
|
||||
stdout("Displaying test vector: ");
|
||||
stdout(bytes.buffer_to_string(&buf));
|
||||
stdout("\n");
|
||||
}
|
||||
}
|
||||
|
||||
// The rest are all files.
|
||||
args = args[1:];
|
||||
err: gzip.Error;
|
||||
|
||||
for file in args {
|
||||
if file == "-" {
|
||||
// Read from stdin
|
||||
s := os.stream_from_handle(os.stdin);
|
||||
err = gzip.load(&s, &buf);
|
||||
} else {
|
||||
err = gzip.load(file, &buf);
|
||||
}
|
||||
if !gzip.is_kind(err, gzip.E_General.OK) {
|
||||
if gzip.is_kind(err, gzip.E_General.File_Not_Found) {
|
||||
stderr("File not found: ");
|
||||
stderr(file);
|
||||
stderr("\n");
|
||||
os.exit(1);
|
||||
}
|
||||
stderr("GZIP returned an error.\n");
|
||||
os.exit(2);
|
||||
}
|
||||
stdout(bytes.buffer_to_string(&buf));
|
||||
}
|
||||
os.exit(0);
|
||||
}
|
||||
314
core/compress/gzip/gzip.odin
Normal file
314
core/compress/gzip/gzip.odin
Normal file
@@ -0,0 +1,314 @@
|
||||
package gzip
|
||||
|
||||
import "core:compress/zlib"
|
||||
import "core:compress"
|
||||
import "core:os"
|
||||
import "core:io"
|
||||
import "core:bytes"
|
||||
import "core:hash"
|
||||
|
||||
/*
|
||||
|
||||
This package implements support for the GZIP file format v4.3,
|
||||
as specified in RFC 1952.
|
||||
|
||||
It is implemented in such a way that it lends itself naturally
|
||||
to be the input to a complementary TAR implementation.
|
||||
|
||||
*/
|
||||
|
||||
Magic :: enum u16le {
|
||||
GZIP = 0x8b << 8 | 0x1f,
|
||||
}
|
||||
|
||||
Header :: struct #packed {
|
||||
magic: Magic,
|
||||
compression_method: Compression,
|
||||
flags: Header_Flags,
|
||||
modification_time: u32le,
|
||||
xfl: Compression_Flags,
|
||||
os: OS,
|
||||
}
|
||||
#assert(size_of(Header) == 10);
|
||||
|
||||
Header_Flag :: enum u8 {
|
||||
// Order is important
|
||||
text = 0,
|
||||
header_crc = 1,
|
||||
extra = 2,
|
||||
name = 3,
|
||||
comment = 4,
|
||||
reserved_1 = 5,
|
||||
reserved_2 = 6,
|
||||
reserved_3 = 7,
|
||||
}
|
||||
Header_Flags :: distinct bit_set[Header_Flag; u8];
|
||||
|
||||
OS :: enum u8 {
|
||||
FAT = 0,
|
||||
Amiga = 1,
|
||||
VMS = 2,
|
||||
Unix = 3,
|
||||
VM_CMS = 4,
|
||||
Atari_TOS = 5,
|
||||
HPFS = 6,
|
||||
Macintosh = 7,
|
||||
Z_System = 8,
|
||||
CP_M = 9,
|
||||
TOPS_20 = 10,
|
||||
NTFS = 11,
|
||||
QDOS = 12,
|
||||
Acorn_RISCOS = 13,
|
||||
_Unknown = 14,
|
||||
Unknown = 255,
|
||||
}
|
||||
OS_Name :: #partial [OS]string{
|
||||
.FAT = "FAT",
|
||||
.Amiga = "Amiga",
|
||||
.VMS = "VMS/OpenVMS",
|
||||
.Unix = "Unix",
|
||||
.VM_CMS = "VM/CMS",
|
||||
.Atari_TOS = "Atari TOS",
|
||||
.HPFS = "HPFS",
|
||||
.Macintosh = "Macintosh",
|
||||
.Z_System = "Z-System",
|
||||
.CP_M = "CP/M",
|
||||
.TOPS_20 = "TOPS-20",
|
||||
.NTFS = "NTFS",
|
||||
.QDOS = "QDOS",
|
||||
.Acorn_RISCOS = "Acorn RISCOS",
|
||||
.Unknown = "Unknown",
|
||||
};
|
||||
|
||||
Compression :: enum u8 {
|
||||
DEFLATE = 8,
|
||||
}
|
||||
|
||||
Compression_Flags :: enum u8 {
|
||||
Maximum_Compression = 2,
|
||||
Fastest_Compression = 4,
|
||||
}
|
||||
|
||||
Error :: compress.Error;
|
||||
E_General :: compress.General_Error;
|
||||
E_GZIP :: compress.GZIP_Error;
|
||||
E_ZLIB :: compress.ZLIB_Error;
|
||||
E_Deflate :: compress.Deflate_Error;
|
||||
is_kind :: compress.is_kind;
|
||||
|
||||
load_from_slice :: proc(slice: ^[]u8, buf: ^bytes.Buffer, allocator := context.allocator) -> (err: Error) {
|
||||
|
||||
r := bytes.Reader{};
|
||||
bytes.reader_init(&r, slice^);
|
||||
stream := bytes.reader_to_stream(&r);
|
||||
|
||||
err = load_from_stream(&stream, buf, allocator);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
load_from_file :: proc(filename: string, buf: ^bytes.Buffer, allocator := context.allocator) -> (err: Error) {
|
||||
data, ok := os.read_entire_file(filename, context.temp_allocator);
|
||||
if ok {
|
||||
err = load_from_slice(&data, buf, allocator);
|
||||
return;
|
||||
} else {
|
||||
return E_General.File_Not_Found;
|
||||
}
|
||||
}
|
||||
|
||||
load_from_stream :: proc(stream: ^io.Stream, buf: ^bytes.Buffer, allocator := context.allocator) -> (err: Error) {
|
||||
|
||||
ctx := compress.Context{
|
||||
input = stream^,
|
||||
};
|
||||
buf := buf;
|
||||
ws := bytes.buffer_to_stream(buf);
|
||||
ctx.output = ws;
|
||||
|
||||
header, e := compress.read_data(&ctx, Header);
|
||||
if e != .None {
|
||||
return E_General.File_Too_Short;
|
||||
}
|
||||
|
||||
if header.magic != .GZIP {
|
||||
return E_GZIP.Invalid_GZIP_Signature;
|
||||
}
|
||||
if header.compression_method != .DEFLATE {
|
||||
return E_General.Unknown_Compression_Method;
|
||||
}
|
||||
|
||||
if header.os >= ._Unknown {
|
||||
header.os = .Unknown;
|
||||
}
|
||||
|
||||
if .reserved_1 in header.flags || .reserved_2 in header.flags || .reserved_3 in header.flags {
|
||||
return E_GZIP.Reserved_Flag_Set;
|
||||
}
|
||||
|
||||
// printf("signature: %v\n", header.magic);
|
||||
// printf("compression: %v\n", header.compression_method);
|
||||
// printf("flags: %v\n", header.flags);
|
||||
// printf("modification time: %v\n", time.unix(i64(header.modification_time), 0));
|
||||
// printf("xfl: %v (%v)\n", header.xfl, int(header.xfl));
|
||||
// printf("os: %v\n", OS_Name[header.os]);
|
||||
|
||||
if .extra in header.flags {
|
||||
xlen, e_extra := compress.read_data(&ctx, u16le);
|
||||
if e_extra != .None {
|
||||
return E_General.Stream_Too_Short;
|
||||
}
|
||||
// printf("Extra data present (%v bytes)\n", xlen);
|
||||
if xlen < 4 {
|
||||
// Minimum length is 2 for ID + 2 for a field length, if set to zero.
|
||||
return E_GZIP.Invalid_Extra_Data;
|
||||
}
|
||||
|
||||
field_id: [2]u8;
|
||||
field_length: u16le;
|
||||
field_error: io.Error;
|
||||
|
||||
for xlen >= 4 {
|
||||
// println("Parsing Extra field(s).");
|
||||
field_id, field_error = compress.read_data(&ctx, [2]u8);
|
||||
if field_error != .None {
|
||||
// printf("Parsing Extra returned: %v\n", field_error);
|
||||
return E_General.Stream_Too_Short;
|
||||
}
|
||||
xlen -= 2;
|
||||
|
||||
field_length, field_error = compress.read_data(&ctx, u16le);
|
||||
if field_error != .None {
|
||||
// printf("Parsing Extra returned: %v\n", field_error);
|
||||
return E_General.Stream_Too_Short;
|
||||
}
|
||||
xlen -= 2;
|
||||
|
||||
if xlen <= 0 {
|
||||
// We're not going to try and recover by scanning for a ZLIB header.
|
||||
// Who knows what else is wrong with this file.
|
||||
return E_GZIP.Invalid_Extra_Data;
|
||||
}
|
||||
|
||||
// printf(" Field \"%v\" of length %v found: ", string(field_id[:]), field_length);
|
||||
if field_length > 0 {
|
||||
field_data := make([]u8, field_length, context.temp_allocator);
|
||||
_, field_error = ctx.input->impl_read(field_data);
|
||||
if field_error != .None {
|
||||
// printf("Parsing Extra returned: %v\n", field_error);
|
||||
return E_General.Stream_Too_Short;
|
||||
}
|
||||
xlen -= field_length;
|
||||
|
||||
// printf("%v\n", string(field_data));
|
||||
}
|
||||
|
||||
if xlen != 0 {
|
||||
return E_GZIP.Invalid_Extra_Data;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if .name in header.flags {
|
||||
// Should be enough.
|
||||
name: [1024]u8;
|
||||
b: [1]u8;
|
||||
i := 0;
|
||||
name_error: io.Error;
|
||||
|
||||
for i < len(name) {
|
||||
_, name_error = ctx.input->impl_read(b[:]);
|
||||
if name_error != .None {
|
||||
return E_General.Stream_Too_Short;
|
||||
}
|
||||
if b == 0 {
|
||||
break;
|
||||
}
|
||||
name[i] = b[0];
|
||||
i += 1;
|
||||
if i >= len(name) {
|
||||
return E_GZIP.Original_Name_Too_Long;
|
||||
}
|
||||
}
|
||||
// printf("Original filename: %v\n", string(name[:i]));
|
||||
}
|
||||
|
||||
if .comment in header.flags {
|
||||
// Should be enough.
|
||||
comment: [1024]u8;
|
||||
b: [1]u8;
|
||||
i := 0;
|
||||
comment_error: io.Error;
|
||||
|
||||
for i < len(comment) {
|
||||
_, comment_error = ctx.input->impl_read(b[:]);
|
||||
if comment_error != .None {
|
||||
return E_General.Stream_Too_Short;
|
||||
}
|
||||
if b == 0 {
|
||||
break;
|
||||
}
|
||||
comment[i] = b[0];
|
||||
i += 1;
|
||||
if i >= len(comment) {
|
||||
return E_GZIP.Comment_Too_Long;
|
||||
}
|
||||
}
|
||||
// printf("Comment: %v\n", string(comment[:i]));
|
||||
}
|
||||
|
||||
if .header_crc in header.flags {
|
||||
crc16: [2]u8;
|
||||
crc_error: io.Error;
|
||||
_, crc_error = ctx.input->impl_read(crc16[:]);
|
||||
if crc_error != .None {
|
||||
return E_General.Stream_Too_Short;
|
||||
}
|
||||
/*
|
||||
We don't actually check the CRC16 (lower 2 bytes of CRC32 of header data until the CRC field).
|
||||
If we find a gzip file in the wild that sets this field, we can add proper support for it.
|
||||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
We should have arrived at the ZLIB payload.
|
||||
*/
|
||||
|
||||
zlib_error := zlib.inflate_raw(&ctx);
|
||||
|
||||
// fmt.printf("ZLIB returned: %v\n", zlib_error);
|
||||
|
||||
if !is_kind(zlib_error, E_General.OK) || zlib_error == nil {
|
||||
return zlib_error;
|
||||
}
|
||||
|
||||
/*
|
||||
Read CRC32 using the ctx bit reader because zlib may leave bytes in there.
|
||||
*/
|
||||
compress.discard_to_next_byte_lsb(&ctx);
|
||||
|
||||
payload_crc_b: [4]u8;
|
||||
payload_len_b: [4]u8;
|
||||
for i in 0..3 {
|
||||
payload_crc_b[i] = u8(compress.read_bits_lsb(&ctx, 8));
|
||||
}
|
||||
payload_crc := transmute(u32le)payload_crc_b;
|
||||
for i in 0..3 {
|
||||
payload_len_b[i] = u8(compress.read_bits_lsb(&ctx, 8));
|
||||
}
|
||||
payload_len := int(transmute(u32le)payload_len_b);
|
||||
|
||||
payload := bytes.buffer_to_bytes(buf);
|
||||
crc32 := u32le(hash.crc32(payload));
|
||||
|
||||
if crc32 != payload_crc {
|
||||
return E_GZIP.Payload_CRC_Invalid;
|
||||
}
|
||||
|
||||
if len(payload) != payload_len {
|
||||
return E_GZIP.Payload_Length_Invalid;
|
||||
}
|
||||
return E_General.OK;
|
||||
}
|
||||
|
||||
load :: proc{load_from_file, load_from_slice, load_from_stream};
|
||||
42
core/compress/zlib/example.odin
Normal file
42
core/compress/zlib/example.odin
Normal file
@@ -0,0 +1,42 @@
|
||||
//+ignore
|
||||
package zlib
|
||||
|
||||
import "core:compress/zlib"
|
||||
import "core:bytes"
|
||||
import "core:fmt"
|
||||
|
||||
main :: proc() {
|
||||
|
||||
ODIN_DEMO: []u8 = {
|
||||
120, 156, 101, 144, 77, 110, 131, 48, 16, 133, 215, 204, 41, 158, 44,
|
||||
69, 73, 32, 148, 182, 75, 35, 14, 208, 125, 47, 96, 185, 195, 143,
|
||||
130, 13, 50, 38, 81, 84, 101, 213, 75, 116, 215, 43, 246, 8, 53,
|
||||
82, 126, 8, 181, 188, 152, 153, 111, 222, 147, 159, 123, 165, 247, 170,
|
||||
98, 24, 213, 88, 162, 198, 244, 157, 243, 16, 186, 115, 44, 75, 227,
|
||||
5, 77, 115, 72, 137, 222, 117, 122, 179, 197, 39, 69, 161, 170, 156,
|
||||
50, 144, 5, 68, 130, 4, 49, 126, 127, 190, 191, 144, 34, 19, 57,
|
||||
69, 74, 235, 209, 140, 173, 242, 157, 155, 54, 158, 115, 162, 168, 12,
|
||||
181, 239, 246, 108, 17, 188, 174, 242, 224, 20, 13, 199, 198, 235, 250,
|
||||
194, 166, 129, 86, 3, 99, 157, 172, 37, 230, 62, 73, 129, 151, 252,
|
||||
70, 211, 5, 77, 31, 104, 188, 160, 113, 129, 215, 59, 205, 22, 52,
|
||||
123, 160, 83, 142, 255, 242, 89, 123, 93, 149, 200, 50, 188, 85, 54,
|
||||
252, 18, 248, 192, 238, 228, 235, 198, 86, 224, 118, 224, 176, 113, 166,
|
||||
112, 67, 106, 227, 159, 122, 215, 88, 95, 110, 196, 123, 205, 183, 224,
|
||||
98, 53, 8, 104, 213, 234, 201, 147, 7, 248, 192, 14, 170, 29, 25,
|
||||
171, 15, 18, 59, 138, 112, 63, 23, 205, 110, 254, 136, 109, 78, 231,
|
||||
63, 234, 138, 133, 204,
|
||||
};
|
||||
|
||||
buf: bytes.Buffer;
|
||||
|
||||
// We can pass ", true" to inflate a raw DEFLATE stream instead of a ZLIB wrapped one.
|
||||
err := zlib.inflate(&ODIN_DEMO, &buf);
|
||||
defer bytes.buffer_destroy(&buf);
|
||||
|
||||
if !zlib.is_kind(err, zlib.E_General.OK) {
|
||||
fmt.printf("\nError: %v\n", err);
|
||||
}
|
||||
s := bytes.buffer_to_string(&buf);
|
||||
fmt.printf("Input: %v bytes, output (%v bytes):\n%v\n", len(ODIN_DEMO), len(s), s);
|
||||
assert(len(s) == 438);
|
||||
}
|
||||
602
core/compress/zlib/zlib.odin
Normal file
602
core/compress/zlib/zlib.odin
Normal file
@@ -0,0 +1,602 @@
|
||||
package zlib
|
||||
|
||||
import "core:compress"
|
||||
|
||||
import "core:mem"
|
||||
import "core:io"
|
||||
import "core:bytes"
|
||||
import "core:hash"
|
||||
/*
|
||||
zlib.inflate decompresses a ZLIB stream passed in as a []u8 or io.Stream.
|
||||
Returns: Error. You can use zlib.is_kind or compress.is_kind to easily test for OK.
|
||||
*/
|
||||
|
||||
Context :: compress.Context;
|
||||
|
||||
Compression_Method :: enum u8 {
|
||||
DEFLATE = 8,
|
||||
Reserved = 15,
|
||||
}
|
||||
|
||||
Compression_Level :: enum u8 {
|
||||
Fastest = 0,
|
||||
Fast = 1,
|
||||
Default = 2,
|
||||
Maximum = 3,
|
||||
}
|
||||
|
||||
Options :: struct {
|
||||
window_size: u16,
|
||||
level: u8,
|
||||
}
|
||||
|
||||
Error :: compress.Error;
|
||||
E_General :: compress.General_Error;
|
||||
E_ZLIB :: compress.ZLIB_Error;
|
||||
E_Deflate :: compress.Deflate_Error;
|
||||
is_kind :: compress.is_kind;
|
||||
|
||||
DEFLATE_MAX_CHUNK_SIZE :: 65535;
|
||||
DEFLATE_MAX_LITERAL_SIZE :: 65535;
|
||||
DEFLATE_MAX_DISTANCE :: 32768;
|
||||
DEFLATE_MAX_LENGTH :: 258;
|
||||
|
||||
HUFFMAN_MAX_BITS :: 16;
|
||||
HUFFMAN_FAST_BITS :: 9;
|
||||
HUFFMAN_FAST_MASK :: ((1 << HUFFMAN_FAST_BITS) - 1);
|
||||
|
||||
Z_LENGTH_BASE := [31]u16{
|
||||
3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,
|
||||
67,83,99,115,131,163,195,227,258,0,0,
|
||||
};
|
||||
|
||||
Z_LENGTH_EXTRA := [31]u8{
|
||||
0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0,
|
||||
};
|
||||
|
||||
Z_DIST_BASE := [32]u16{
|
||||
1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,
|
||||
257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0,
|
||||
};
|
||||
|
||||
Z_DIST_EXTRA := [32]u8{
|
||||
0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13,0,0,
|
||||
};
|
||||
|
||||
Z_LENGTH_DEZIGZAG := []u8{
|
||||
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15,
|
||||
};
|
||||
|
||||
Z_FIXED_LENGTH := [288]u8{
|
||||
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
|
||||
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
|
||||
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
|
||||
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
|
||||
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
|
||||
9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
|
||||
9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
|
||||
9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
|
||||
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,
|
||||
};
|
||||
|
||||
Z_FIXED_DIST := [32]u8{
|
||||
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
|
||||
};
|
||||
|
||||
/*
|
||||
Accelerate all cases in default tables.
|
||||
*/
|
||||
ZFAST_BITS :: 9;
|
||||
ZFAST_MASK :: ((1 << ZFAST_BITS) - 1);
|
||||
|
||||
/*
|
||||
ZLIB-style Huffman encoding.
|
||||
JPEG packs from left, ZLIB from right. We can't share code.
|
||||
*/
|
||||
Huffman_Table :: struct {
|
||||
fast: [1 << ZFAST_BITS]u16,
|
||||
firstcode: [16]u16,
|
||||
maxcode: [17]int,
|
||||
firstsymbol: [16]u16,
|
||||
size: [288]u8,
|
||||
value: [288]u16,
|
||||
};
|
||||
|
||||
// Implementation starts here
|
||||
|
||||
z_bit_reverse :: #force_inline proc(n: u16, bits: u8) -> (r: u16) {
|
||||
assert(bits <= 16);
|
||||
// NOTE: Can optimize with llvm.bitreverse.i64 or some bit twiddling
|
||||
// by reversing all of the bits and masking out the unneeded ones.
|
||||
r = n;
|
||||
r = ((r & 0xAAAA) >> 1) | ((r & 0x5555) << 1);
|
||||
r = ((r & 0xCCCC) >> 2) | ((r & 0x3333) << 2);
|
||||
r = ((r & 0xF0F0) >> 4) | ((r & 0x0F0F) << 4);
|
||||
r = ((r & 0xFF00) >> 8) | ((r & 0x00FF) << 8);
|
||||
|
||||
r >>= (16 - bits);
|
||||
return;
|
||||
}
|
||||
|
||||
write_byte :: #force_inline proc(z: ^Context, c: u8) -> (err: io.Error) #no_bounds_check {
|
||||
c := c;
|
||||
buf := transmute([]u8)mem.Raw_Slice{data=&c, len=1};
|
||||
z.rolling_hash = hash.adler32(buf, z.rolling_hash);
|
||||
|
||||
_, e := z.output->impl_write(buf);
|
||||
if e != .None {
|
||||
return e;
|
||||
}
|
||||
z.last[z.bytes_written % z.window_size] = c;
|
||||
|
||||
z.bytes_written += 1;
|
||||
return .None;
|
||||
}
|
||||
|
||||
allocate_huffman_table :: proc(allocator := context.allocator) -> (z: ^Huffman_Table, err: Error) {
|
||||
|
||||
z = new(Huffman_Table, allocator);
|
||||
return z, E_General.OK;
|
||||
}
|
||||
|
||||
build_huffman :: proc(z: ^Huffman_Table, code_lengths: []u8) -> (err: Error) {
|
||||
sizes: [HUFFMAN_MAX_BITS+1]int;
|
||||
next_code: [HUFFMAN_MAX_BITS]int;
|
||||
|
||||
k := int(0);
|
||||
|
||||
mem.zero_slice(sizes[:]);
|
||||
mem.zero_slice(z.fast[:]);
|
||||
|
||||
for v, _ in code_lengths {
|
||||
sizes[v] += 1;
|
||||
}
|
||||
sizes[0] = 0;
|
||||
|
||||
for i in 1..16 {
|
||||
if sizes[i] > (1 << uint(i)) {
|
||||
return E_Deflate.Huffman_Bad_Sizes;
|
||||
}
|
||||
}
|
||||
code := int(0);
|
||||
|
||||
for i in 1..<16 {
|
||||
next_code[i] = code;
|
||||
z.firstcode[i] = u16(code);
|
||||
z.firstsymbol[i] = u16(k);
|
||||
code = code + sizes[i];
|
||||
if sizes[i] != 0 {
|
||||
if (code - 1 >= (1 << u16(i))) {
|
||||
return E_Deflate.Huffman_Bad_Code_Lengths;
|
||||
}
|
||||
}
|
||||
z.maxcode[i] = code << (16 - uint(i));
|
||||
code <<= 1;
|
||||
k += int(sizes[i]);
|
||||
}
|
||||
|
||||
z.maxcode[16] = 0x10000; // Sentinel
|
||||
c: int;
|
||||
|
||||
for v, ci in code_lengths {
|
||||
if v != 0 {
|
||||
c = next_code[v] - int(z.firstcode[v]) + int(z.firstsymbol[v]);
|
||||
fastv := u16((u16(v) << 9) | u16(ci));
|
||||
z.size[c] = u8(v);
|
||||
z.value[c] = u16(ci);
|
||||
if (v <= ZFAST_BITS) {
|
||||
j := z_bit_reverse(u16(next_code[v]), v);
|
||||
for j < (1 << ZFAST_BITS) {
|
||||
z.fast[j] = fastv;
|
||||
j += (1 << v);
|
||||
}
|
||||
}
|
||||
next_code[v] += 1;
|
||||
}
|
||||
}
|
||||
return E_General.OK;
|
||||
}
|
||||
|
||||
decode_huffman_slowpath :: proc(z: ^Context, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
|
||||
|
||||
r = 0;
|
||||
err = E_General.OK;
|
||||
|
||||
k: int;
|
||||
s: u8;
|
||||
|
||||
code := u16(compress.peek_bits_lsb(z, 16));
|
||||
|
||||
k = int(z_bit_reverse(code, 16));
|
||||
|
||||
#no_bounds_check for s = HUFFMAN_FAST_BITS+1; ; {
|
||||
if k < t.maxcode[s] {
|
||||
break;
|
||||
}
|
||||
s += 1;
|
||||
}
|
||||
if (s >= 16) {
|
||||
return 0, E_Deflate.Bad_Huffman_Code;
|
||||
}
|
||||
// code size is s, so:
|
||||
b := (k >> (16-s)) - int(t.firstcode[s]) + int(t.firstsymbol[s]);
|
||||
if b >= size_of(t.size) {
|
||||
return 0, E_Deflate.Bad_Huffman_Code;
|
||||
}
|
||||
if t.size[b] != s {
|
||||
return 0, E_Deflate.Bad_Huffman_Code;
|
||||
}
|
||||
|
||||
compress.consume_bits_lsb(z, s);
|
||||
|
||||
r = t.value[b];
|
||||
return r, E_General.OK;
|
||||
}
|
||||
|
||||
decode_huffman :: proc(z: ^Context, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
|
||||
|
||||
if z.num_bits < 16 {
|
||||
if z.num_bits == -100 {
|
||||
return 0, E_ZLIB.Code_Buffer_Malformed;
|
||||
}
|
||||
compress.refill_lsb(z);
|
||||
if z.eof {
|
||||
return 0, E_General.Stream_Too_Short;
|
||||
}
|
||||
}
|
||||
#no_bounds_check b := t.fast[z.code_buffer & ZFAST_MASK];
|
||||
if b != 0 {
|
||||
s := u8(b >> ZFAST_BITS);
|
||||
compress.consume_bits_lsb(z, s);
|
||||
return b & 511, E_General.OK;
|
||||
}
|
||||
return decode_huffman_slowpath(z, t);
|
||||
}
|
||||
|
||||
parse_huffman_block :: proc(z: ^Context, z_repeat, z_offset: ^Huffman_Table) -> (err: Error) #no_bounds_check {
|
||||
|
||||
#no_bounds_check for {
|
||||
value, e := decode_huffman(z, z_repeat);
|
||||
if !is_kind(e, E_General.OK) {
|
||||
return err;
|
||||
}
|
||||
if value < 256 {
|
||||
e := write_byte(z, u8(value));
|
||||
if e != .None {
|
||||
return E_General.Output_Too_Short;
|
||||
}
|
||||
} else {
|
||||
if value == 256 {
|
||||
// End of block
|
||||
return E_General.OK;
|
||||
}
|
||||
|
||||
value -= 257;
|
||||
length := Z_LENGTH_BASE[value];
|
||||
if Z_LENGTH_EXTRA[value] > 0 {
|
||||
length += u16(compress.read_bits_lsb(z, Z_LENGTH_EXTRA[value]));
|
||||
}
|
||||
|
||||
value, e = decode_huffman(z, z_offset);
|
||||
if !is_kind(e, E_General.OK) {
|
||||
return E_Deflate.Bad_Huffman_Code;
|
||||
}
|
||||
|
||||
distance := Z_DIST_BASE[value];
|
||||
if Z_DIST_EXTRA[value] > 0 {
|
||||
distance += u16(compress.read_bits_lsb(z, Z_DIST_EXTRA[value]));
|
||||
}
|
||||
|
||||
if z.bytes_written < i64(distance) {
|
||||
// Distance is longer than we've decoded so far.
|
||||
return E_Deflate.Bad_Distance;
|
||||
}
|
||||
|
||||
offset := i64(z.bytes_written - i64(distance));
|
||||
/*
|
||||
These might be sped up with a repl_byte call that copies
|
||||
from the already written output more directly, and that
|
||||
update the Adler checksum once after.
|
||||
|
||||
That way we'd suffer less Stream vtable overhead.
|
||||
*/
|
||||
if distance == 1 {
|
||||
/*
|
||||
Replicate the last outputted byte, length times.
|
||||
*/
|
||||
if length > 0 {
|
||||
b, e := compress.peek_back_byte(z, offset);
|
||||
if e != .None {
|
||||
return E_General.Output_Too_Short;
|
||||
}
|
||||
#no_bounds_check for _ in 0..<length {
|
||||
write_byte(z, b);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if length > 0 {
|
||||
#no_bounds_check for _ in 0..<length {
|
||||
b, e := compress.peek_back_byte(z, offset);
|
||||
if e != .None {
|
||||
return E_General.Output_Too_Short;
|
||||
}
|
||||
write_byte(z, b);
|
||||
offset += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inflate_from_stream :: proc(using ctx: ^Context, raw := false, allocator := context.allocator) -> (err: Error) #no_bounds_check {
|
||||
/*
|
||||
ctx.input must be an io.Stream backed by an implementation that supports:
|
||||
- read
|
||||
- size
|
||||
|
||||
ctx.output must be an io.Stream backed by an implementation that supports:
|
||||
- write
|
||||
|
||||
raw determines whether the ZLIB header is processed, or we're inflating a raw
|
||||
DEFLATE stream.
|
||||
*/
|
||||
|
||||
if !raw {
|
||||
data_size := io.size(ctx.input);
|
||||
if data_size < 6 {
|
||||
return E_General.Stream_Too_Short;
|
||||
}
|
||||
|
||||
cmf, _ := compress.read_u8(ctx);
|
||||
|
||||
method := Compression_Method(cmf & 0xf);
|
||||
if method != .DEFLATE {
|
||||
return E_General.Unknown_Compression_Method;
|
||||
}
|
||||
|
||||
cinfo := (cmf >> 4) & 0xf;
|
||||
if cinfo > 7 {
|
||||
return E_ZLIB.Unsupported_Window_Size;
|
||||
}
|
||||
ctx.window_size = 1 << (cinfo + 8);
|
||||
|
||||
flg, _ := compress.read_u8(ctx);
|
||||
|
||||
fcheck := flg & 0x1f;
|
||||
fcheck_computed := (cmf << 8 | flg) & 0x1f;
|
||||
if fcheck != fcheck_computed {
|
||||
return E_General.Checksum_Failed;
|
||||
}
|
||||
|
||||
fdict := (flg >> 5) & 1;
|
||||
/*
|
||||
We don't handle built-in dictionaries for now.
|
||||
They're application specific and PNG doesn't use them.
|
||||
*/
|
||||
if fdict != 0 {
|
||||
return E_ZLIB.FDICT_Unsupported;
|
||||
}
|
||||
|
||||
// flevel := Compression_Level((flg >> 6) & 3);
|
||||
/*
|
||||
Inflate can consume bits belonging to the Adler checksum.
|
||||
We pass the entire stream to Inflate and will unget bytes if we need to
|
||||
at the end to compare checksums.
|
||||
*/
|
||||
|
||||
// Seed the Adler32 rolling checksum.
|
||||
ctx.rolling_hash = 1;
|
||||
}
|
||||
|
||||
// Parse ZLIB stream without header.
|
||||
err = inflate_raw(ctx);
|
||||
if !is_kind(err, E_General.OK) {
|
||||
return err;
|
||||
}
|
||||
|
||||
if !raw {
|
||||
compress.discard_to_next_byte_lsb(ctx);
|
||||
|
||||
adler32 := compress.read_bits_lsb(ctx, 8) << 24 | compress.read_bits_lsb(ctx, 8) << 16 | compress.read_bits_lsb(ctx, 8) << 8 | compress.read_bits_lsb(ctx, 8);
|
||||
if ctx.rolling_hash != u32(adler32) {
|
||||
return E_General.Checksum_Failed;
|
||||
}
|
||||
}
|
||||
return E_General.OK;
|
||||
}
|
||||
|
||||
// @(optimization_mode="speed")
|
||||
inflate_from_stream_raw :: proc(z: ^Context, allocator := context.allocator) -> (err: Error) #no_bounds_check {
|
||||
final := u32(0);
|
||||
type := u32(0);
|
||||
|
||||
z.num_bits = 0;
|
||||
z.code_buffer = 0;
|
||||
|
||||
z_repeat: ^Huffman_Table;
|
||||
z_offset: ^Huffman_Table;
|
||||
codelength_ht: ^Huffman_Table;
|
||||
|
||||
z_repeat, err = allocate_huffman_table(allocator=context.allocator);
|
||||
if !is_kind(err, E_General.OK) {
|
||||
return err;
|
||||
}
|
||||
z_offset, err = allocate_huffman_table(allocator=context.allocator);
|
||||
if !is_kind(err, E_General.OK) {
|
||||
return err;
|
||||
}
|
||||
codelength_ht, err = allocate_huffman_table(allocator=context.allocator);
|
||||
if !is_kind(err, E_General.OK) {
|
||||
return err;
|
||||
}
|
||||
defer free(z_repeat);
|
||||
defer free(z_offset);
|
||||
defer free(codelength_ht);
|
||||
|
||||
if z.window_size == 0 {
|
||||
z.window_size = DEFLATE_MAX_DISTANCE;
|
||||
}
|
||||
|
||||
// Allocate rolling window buffer.
|
||||
last_b := mem.make_dynamic_array_len_cap([dynamic]u8, z.window_size, z.window_size, allocator);
|
||||
z.last = &last_b;
|
||||
defer delete(last_b);
|
||||
|
||||
for {
|
||||
final = compress.read_bits_lsb(z, 1);
|
||||
type = compress.read_bits_lsb(z, 2);
|
||||
|
||||
// log.debugf("Final: %v | Type: %v\n", final, type);
|
||||
|
||||
if type == 0 {
|
||||
// Uncompressed block
|
||||
|
||||
// Discard bits until next byte boundary
|
||||
compress.discard_to_next_byte_lsb(z);
|
||||
|
||||
uncompressed_len := int(compress.read_bits_lsb(z, 16));
|
||||
length_check := int(compress.read_bits_lsb(z, 16));
|
||||
if uncompressed_len != ~length_check {
|
||||
return E_Deflate.Len_Nlen_Mismatch;
|
||||
}
|
||||
|
||||
/*
|
||||
TODO: Maybe speed this up with a stream-to-stream copy (read_from)
|
||||
and a single Adler32 update after.
|
||||
*/
|
||||
#no_bounds_check for uncompressed_len > 0 {
|
||||
compress.refill_lsb(z);
|
||||
lit := compress.read_bits_lsb(z, 8);
|
||||
write_byte(z, u8(lit));
|
||||
uncompressed_len -= 1;
|
||||
}
|
||||
} else if type == 3 {
|
||||
return E_Deflate.BType_3;
|
||||
} else {
|
||||
// log.debugf("Err: %v | Final: %v | Type: %v\n", err, final, type);
|
||||
if type == 1 {
|
||||
// Use fixed code lengths.
|
||||
err = build_huffman(z_repeat, Z_FIXED_LENGTH[:]);
|
||||
if !is_kind(err, E_General.OK) {
|
||||
return err;
|
||||
}
|
||||
err = build_huffman(z_offset, Z_FIXED_DIST[:]);
|
||||
if !is_kind(err, E_General.OK) {
|
||||
return err;
|
||||
}
|
||||
} else {
|
||||
lencodes: [286+32+137]u8;
|
||||
codelength_sizes: [19]u8;
|
||||
|
||||
//i: u32;
|
||||
n: u32;
|
||||
|
||||
compress.refill_lsb(z, 14);
|
||||
hlit := compress.read_bits_no_refill_lsb(z, 5) + 257;
|
||||
hdist := compress.read_bits_no_refill_lsb(z, 5) + 1;
|
||||
hclen := compress.read_bits_no_refill_lsb(z, 4) + 4;
|
||||
ntot := hlit + hdist;
|
||||
|
||||
#no_bounds_check for i in 0..<hclen {
|
||||
s := compress.read_bits_lsb(z, 3);
|
||||
codelength_sizes[Z_LENGTH_DEZIGZAG[i]] = u8(s);
|
||||
}
|
||||
err = build_huffman(codelength_ht, codelength_sizes[:]);
|
||||
if !is_kind(err, E_General.OK) {
|
||||
return err;
|
||||
}
|
||||
|
||||
n = 0;
|
||||
c: u16;
|
||||
|
||||
for n < ntot {
|
||||
c, err = decode_huffman(z, codelength_ht);
|
||||
if !is_kind(err, E_General.OK) {
|
||||
return err;
|
||||
}
|
||||
|
||||
if c < 0 || c >= 19 {
|
||||
return E_Deflate.Huffman_Bad_Code_Lengths;
|
||||
}
|
||||
if c < 16 {
|
||||
lencodes[n] = u8(c);
|
||||
n += 1;
|
||||
} else {
|
||||
fill := u8(0);
|
||||
compress.refill_lsb(z, 7);
|
||||
if c == 16 {
|
||||
c = u16(compress.read_bits_no_refill_lsb(z, 2) + 3);
|
||||
if n == 0 {
|
||||
return E_Deflate.Huffman_Bad_Code_Lengths;
|
||||
}
|
||||
fill = lencodes[n - 1];
|
||||
} else if c == 17 {
|
||||
c = u16(compress.read_bits_no_refill_lsb(z, 3) + 3);
|
||||
} else if c == 18 {
|
||||
c = u16(compress.read_bits_no_refill_lsb(z, 7) + 11);
|
||||
} else {
|
||||
return E_Deflate.Huffman_Bad_Code_Lengths;
|
||||
}
|
||||
|
||||
if ntot - n < u32(c) {
|
||||
return E_Deflate.Huffman_Bad_Code_Lengths;
|
||||
}
|
||||
|
||||
nc := n + u32(c);
|
||||
#no_bounds_check for ; n < nc; n += 1 {
|
||||
lencodes[n] = fill;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if n != ntot {
|
||||
return E_Deflate.Huffman_Bad_Code_Lengths;
|
||||
}
|
||||
|
||||
err = build_huffman(z_repeat, lencodes[:hlit]);
|
||||
if !is_kind(err, E_General.OK) {
|
||||
return err;
|
||||
}
|
||||
|
||||
err = build_huffman(z_offset, lencodes[hlit:ntot]);
|
||||
if !is_kind(err, E_General.OK) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
err = parse_huffman_block(z, z_repeat, z_offset);
|
||||
// log.debugf("Err: %v | Final: %v | Type: %v\n", err, final, type);
|
||||
if !is_kind(err, E_General.OK) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
if final == 1 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return E_General.OK;
|
||||
}
|
||||
|
||||
inflate_from_byte_array :: proc(input: ^[]u8, buf: ^bytes.Buffer, raw := false) -> (err: Error) {
|
||||
ctx := Context{};
|
||||
|
||||
r := bytes.Reader{};
|
||||
bytes.reader_init(&r, input^);
|
||||
rs := bytes.reader_to_stream(&r);
|
||||
ctx.input = rs;
|
||||
|
||||
buf := buf;
|
||||
ws := bytes.buffer_to_stream(buf);
|
||||
ctx.output = ws;
|
||||
|
||||
err = inflate_from_stream(&ctx, raw);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
inflate_from_byte_array_raw :: proc(input: ^[]u8, buf: ^bytes.Buffer, raw := false) -> (err: Error) {
|
||||
return inflate_from_byte_array(input, buf, true);
|
||||
}
|
||||
|
||||
inflate :: proc{inflate_from_stream, inflate_from_byte_array};
|
||||
inflate_raw :: proc{inflate_from_stream_raw, inflate_from_byte_array_raw};
|
||||
107
core/image/common.odin
Normal file
107
core/image/common.odin
Normal file
@@ -0,0 +1,107 @@
|
||||
package image
|
||||
|
||||
import "core:bytes"
|
||||
|
||||
Image :: struct {
|
||||
width: int,
|
||||
height: int,
|
||||
channels: int,
|
||||
depth: u8,
|
||||
pixels: bytes.Buffer,
|
||||
/*
|
||||
Some image loaders/writers can return/take an optional background color.
|
||||
For convenience, we return them as u16 so we don't need to switch on the type
|
||||
in our viewer, and can just test against nil.
|
||||
*/
|
||||
background: Maybe([3]u16),
|
||||
sidecar: any,
|
||||
}
|
||||
|
||||
/*
|
||||
Image_Option:
|
||||
`.info`
|
||||
This option behaves as `return_ihdr` and `do_not_decompress_image` and can be used
|
||||
to gather an image's dimensions and color information.
|
||||
|
||||
`.return_header`
|
||||
Fill out img.sidecar.header with the image's format-specific header struct.
|
||||
If we only care about the image specs, we can set `return_header` +
|
||||
`do_not_decompress_image`, or `.info`, which works as if both of these were set.
|
||||
|
||||
`.return_metadata`
|
||||
Returns all chunks not needed to decode the data.
|
||||
It also returns the header as if `.return_header` is set.
|
||||
|
||||
`do_not_decompress_image`
|
||||
Skip decompressing IDAT chunk, defiltering and the rest.
|
||||
|
||||
`alpha_add_if_missing`
|
||||
If the image has no alpha channel, it'll add one set to max(type).
|
||||
Turns RGB into RGBA and Gray into Gray+Alpha
|
||||
|
||||
`alpha_drop_if_present`
|
||||
If the image has an alpha channel, drop it.
|
||||
You may want to use `alpha_premultiply` in this case.
|
||||
|
||||
NOTE: For PNG, this also skips handling of the tRNS chunk, if present,
|
||||
unless you select `alpha_premultiply`.
|
||||
In this case it'll premultiply the specified pixels in question only,
|
||||
as the others are implicitly fully opaque.
|
||||
|
||||
`alpha_premultiply`
|
||||
If the image has an alpha channel, returns image data as follows:
|
||||
RGB *= A, Gray = Gray *= A
|
||||
|
||||
`blend_background`
|
||||
If a bKGD chunk is present in a PNG, we normally just set `img.background`
|
||||
with its value and leave it up to the application to decide how to display the image,
|
||||
as per the PNG specification.
|
||||
|
||||
With `blend_background` selected, we blend the image against the background
|
||||
color. As this negates the use for an alpha channel, we'll drop it _unless_
|
||||
you also specify `alpha_add_if_missing`.
|
||||
|
||||
Options that don't apply to an image format will be ignored by their loader.
|
||||
*/
|
||||
|
||||
Option :: enum {
|
||||
info = 0,
|
||||
do_not_decompress_image,
|
||||
return_header,
|
||||
return_metadata,
|
||||
alpha_add_if_missing,
|
||||
alpha_drop_if_present,
|
||||
alpha_premultiply,
|
||||
blend_background,
|
||||
}
|
||||
Options :: distinct bit_set[Option];
|
||||
|
||||
PNG_Error :: enum {
|
||||
Invalid_PNG_Signature,
|
||||
IHDR_Not_First_Chunk,
|
||||
IHDR_Corrupt,
|
||||
IDAT_Missing,
|
||||
IDAT_Must_Be_Contiguous,
|
||||
IDAT_Corrupt,
|
||||
PNG_Does_Not_Adhere_to_Spec,
|
||||
PLTE_Encountered_Unexpectedly,
|
||||
PLTE_Invalid_Length,
|
||||
TRNS_Encountered_Unexpectedly,
|
||||
BKGD_Invalid_Length,
|
||||
Invalid_Image_Dimensions,
|
||||
Unknown_Color_Type,
|
||||
Invalid_Color_Bit_Depth_Combo,
|
||||
Unknown_Filter_Method,
|
||||
Unknown_Interlace_Method,
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Functions to help with image buffer calculations
|
||||
*/
|
||||
|
||||
compute_buffer_size :: proc(width, height, channels, depth: int, extra_row_bytes := int(0)) -> (size: int) {
|
||||
|
||||
size = ((((channels * width * depth) + 7) >> 3) + extra_row_bytes) * height;
|
||||
return;
|
||||
}
|
||||
327
core/image/png/example.odin
Normal file
327
core/image/png/example.odin
Normal file
@@ -0,0 +1,327 @@
|
||||
//+ignore
|
||||
package png
|
||||
|
||||
import "core:compress"
|
||||
import "core:image"
|
||||
import "core:image/png"
|
||||
import "core:bytes"
|
||||
import "core:fmt"
|
||||
|
||||
// For PPM writer
|
||||
import "core:mem"
|
||||
import "core:os"
|
||||
|
||||
main :: proc() {
|
||||
file: string;
|
||||
|
||||
options := image.Options{};
|
||||
err: compress.Error;
|
||||
img: ^image.Image;
|
||||
|
||||
file = "../../../misc/logo-slim.png";
|
||||
|
||||
img, err = png.load(file, options);
|
||||
defer png.destroy(img);
|
||||
|
||||
if !png.is_kind(err, png.E_General.OK) {
|
||||
fmt.printf("Trying to read PNG file %v returned %v\n", file, err);
|
||||
} else {
|
||||
v: png.Info;
|
||||
ok: bool;
|
||||
|
||||
fmt.printf("Image: %vx%vx%v, %v-bit.\n", img.width, img.height, img.channels, img.depth);
|
||||
|
||||
if v, ok = img.sidecar.(png.Info); ok {
|
||||
// Handle ancillary chunks as you wish.
|
||||
// We provide helper functions for a few types.
|
||||
for c in v.chunks {
|
||||
#partial switch (c.header.type) {
|
||||
case .tIME:
|
||||
t, _ := png.core_time(c);
|
||||
fmt.printf("[tIME]: %v\n", t);
|
||||
case .gAMA:
|
||||
fmt.printf("[gAMA]: %v\n", png.gamma(c));
|
||||
case .pHYs:
|
||||
phys := png.phys(c);
|
||||
if phys.unit == .Meter {
|
||||
xm := f32(img.width) / f32(phys.ppu_x);
|
||||
ym := f32(img.height) / f32(phys.ppu_y);
|
||||
dpi_x, dpi_y := png.phys_to_dpi(phys);
|
||||
fmt.printf("[pHYs] Image resolution is %v x %v pixels per meter.\n", phys.ppu_x, phys.ppu_y);
|
||||
fmt.printf("[pHYs] Image resolution is %v x %v DPI.\n", dpi_x, dpi_y);
|
||||
fmt.printf("[pHYs] Image dimensions are %v x %v meters.\n", xm, ym);
|
||||
} else {
|
||||
fmt.printf("[pHYs] x: %v, y: %v pixels per unknown unit.\n", phys.ppu_x, phys.ppu_y);
|
||||
}
|
||||
case .iTXt, .zTXt, .tEXt:
|
||||
res, ok_text := png.text(c);
|
||||
if ok_text {
|
||||
if c.header.type == .iTXt {
|
||||
fmt.printf("[iTXt] %v (%v:%v): %v\n", res.keyword, res.language, res.keyword_localized, res.text);
|
||||
} else {
|
||||
fmt.printf("[tEXt/zTXt] %v: %v\n", res.keyword, res.text);
|
||||
}
|
||||
}
|
||||
defer png.text_destroy(res);
|
||||
case .bKGD:
|
||||
fmt.printf("[bKGD] %v\n", img.background);
|
||||
case .eXIf:
|
||||
res, ok_exif := png.exif(c);
|
||||
if ok_exif {
|
||||
/*
|
||||
Other than checking the signature and byte order, we don't handle Exif data.
|
||||
If you wish to interpret it, pass it to an Exif parser.
|
||||
*/
|
||||
fmt.printf("[eXIf] %v\n", res);
|
||||
}
|
||||
case .PLTE:
|
||||
plte, plte_ok := png.plte(c);
|
||||
if plte_ok {
|
||||
fmt.printf("[PLTE] %v\n", plte);
|
||||
} else {
|
||||
fmt.printf("[PLTE] Error\n");
|
||||
}
|
||||
case .hIST:
|
||||
res, ok_hist := png.hist(c);
|
||||
if ok_hist {
|
||||
fmt.printf("[hIST] %v\n", res);
|
||||
}
|
||||
case .cHRM:
|
||||
res, ok_chrm := png.chrm(c);
|
||||
if ok_chrm {
|
||||
fmt.printf("[cHRM] %v\n", res);
|
||||
}
|
||||
case .sPLT:
|
||||
res, ok_splt := png.splt(c);
|
||||
if ok_splt {
|
||||
fmt.printf("[sPLT] %v\n", res);
|
||||
}
|
||||
png.splt_destroy(res);
|
||||
case .sBIT:
|
||||
if res, ok_sbit := png.sbit(c); ok_sbit {
|
||||
fmt.printf("[sBIT] %v\n", res);
|
||||
}
|
||||
case .iCCP:
|
||||
res, ok_iccp := png.iccp(c);
|
||||
if ok_iccp {
|
||||
fmt.printf("[iCCP] %v\n", res);
|
||||
}
|
||||
png.iccp_destroy(res);
|
||||
case .sRGB:
|
||||
if res, ok_srgb := png.srgb(c); ok_srgb {
|
||||
fmt.printf("[sRGB] Rendering intent: %v\n", res);
|
||||
}
|
||||
case:
|
||||
type := c.header.type;
|
||||
name := png.chunk_type_to_name(&type);
|
||||
fmt.printf("[%v]: %v\n", name, c.data);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if is_kind(err, E_General.OK) && .do_not_decompress_image not_in options && .info not_in options {
|
||||
if ok := write_image_as_ppm("out.ppm", img); ok {
|
||||
fmt.println("Saved decoded image.");
|
||||
} else {
|
||||
fmt.println("Error saving out.ppm.");
|
||||
fmt.println(img);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Crappy PPM writer used during testing. Don't use in production.
|
||||
write_image_as_ppm :: proc(filename: string, image: ^image.Image) -> (success: bool) {
|
||||
|
||||
_bg :: proc(bg: Maybe([3]u16), x, y: int, high := true) -> (res: [3]u16) {
|
||||
if v, ok := bg.?; ok {
|
||||
res = v;
|
||||
} else {
|
||||
if high {
|
||||
l := u16(30 * 256 + 30);
|
||||
|
||||
if (x & 4 == 0) ~ (y & 4 == 0) {
|
||||
res = [3]u16{l, 0, l};
|
||||
} else {
|
||||
res = [3]u16{l >> 1, 0, l >> 1};
|
||||
}
|
||||
} else {
|
||||
if (x & 4 == 0) ~ (y & 4 == 0) {
|
||||
res = [3]u16{30, 30, 30};
|
||||
} else {
|
||||
res = [3]u16{15, 15, 15};
|
||||
}
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// profiler.timed_proc();
|
||||
using image;
|
||||
using os;
|
||||
|
||||
flags: int = O_WRONLY|O_CREATE|O_TRUNC;
|
||||
|
||||
img := image;
|
||||
|
||||
// PBM 16-bit images are big endian
|
||||
when ODIN_ENDIAN == "little" {
|
||||
if img.depth == 16 {
|
||||
// The pixel components are in Big Endian. Let's byteswap back.
|
||||
input := mem.slice_data_cast([]u16, img.pixels.buf[:]);
|
||||
output := mem.slice_data_cast([]u16be, img.pixels.buf[:]);
|
||||
#no_bounds_check for v, i in input {
|
||||
output[i] = u16be(v);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pix := bytes.buffer_to_bytes(&img.pixels);
|
||||
|
||||
if len(pix) == 0 || len(pix) < image.width * image.height * int(image.channels) {
|
||||
return false;
|
||||
}
|
||||
|
||||
mode: int = 0;
|
||||
when ODIN_OS == "linux" || ODIN_OS == "darwin" {
|
||||
// NOTE(justasd): 644 (owner read, write; group read; others read)
|
||||
mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
|
||||
}
|
||||
|
||||
fd, err := open(filename, flags, mode);
|
||||
if err != 0 {
|
||||
return false;
|
||||
}
|
||||
defer close(fd);
|
||||
|
||||
write_string(fd,
|
||||
fmt.tprintf("P6\n%v %v\n%v\n", width, height, (1 << depth -1)),
|
||||
);
|
||||
|
||||
if channels == 3 {
|
||||
// We don't handle transparency here...
|
||||
write_ptr(fd, raw_data(pix), len(pix));
|
||||
} else {
|
||||
bpp := depth == 16 ? 2 : 1;
|
||||
bytes_needed := width * height * 3 * bpp;
|
||||
|
||||
op := bytes.Buffer{};
|
||||
bytes.buffer_init_allocator(&op, bytes_needed, bytes_needed);
|
||||
defer bytes.buffer_destroy(&op);
|
||||
|
||||
if channels == 1 {
|
||||
if depth == 16 {
|
||||
assert(len(pix) == width * height * 2);
|
||||
p16 := mem.slice_data_cast([]u16, pix);
|
||||
o16 := mem.slice_data_cast([]u16, op.buf[:]);
|
||||
#no_bounds_check for len(p16) != 0 {
|
||||
r := u16(p16[0]);
|
||||
o16[0] = r;
|
||||
o16[1] = r;
|
||||
o16[2] = r;
|
||||
p16 = p16[1:];
|
||||
o16 = o16[3:];
|
||||
}
|
||||
} else {
|
||||
o := 0;
|
||||
for i := 0; i < len(pix); i += 1 {
|
||||
r := pix[i];
|
||||
op.buf[o ] = r;
|
||||
op.buf[o+1] = r;
|
||||
op.buf[o+2] = r;
|
||||
o += 3;
|
||||
}
|
||||
}
|
||||
write_ptr(fd, raw_data(op.buf), len(op.buf));
|
||||
} else if channels == 2 {
|
||||
if depth == 16 {
|
||||
p16 := mem.slice_data_cast([]u16, pix);
|
||||
o16 := mem.slice_data_cast([]u16, op.buf[:]);
|
||||
|
||||
bgcol := img.background;
|
||||
|
||||
#no_bounds_check for len(p16) != 0 {
|
||||
r := f64(u16(p16[0]));
|
||||
bg: f64;
|
||||
if bgcol != nil {
|
||||
v := bgcol.([3]u16)[0];
|
||||
bg = f64(v);
|
||||
}
|
||||
a := f64(u16(p16[1])) / 65535.0;
|
||||
l := (a * r) + (1 - a) * bg;
|
||||
|
||||
o16[0] = u16(l);
|
||||
o16[1] = u16(l);
|
||||
o16[2] = u16(l);
|
||||
|
||||
p16 = p16[2:];
|
||||
o16 = o16[3:];
|
||||
}
|
||||
} else {
|
||||
o := 0;
|
||||
for i := 0; i < len(pix); i += 2 {
|
||||
r := pix[i]; a := pix[i+1]; a1 := f32(a) / 255.0;
|
||||
c := u8(f32(r) * a1);
|
||||
op.buf[o ] = c;
|
||||
op.buf[o+1] = c;
|
||||
op.buf[o+2] = c;
|
||||
o += 3;
|
||||
}
|
||||
}
|
||||
write_ptr(fd, raw_data(op.buf), len(op.buf));
|
||||
} else if channels == 4 {
|
||||
if depth == 16 {
|
||||
p16 := mem.slice_data_cast([]u16be, pix);
|
||||
o16 := mem.slice_data_cast([]u16be, op.buf[:]);
|
||||
|
||||
#no_bounds_check for len(p16) != 0 {
|
||||
|
||||
bg := _bg(img.background, 0, 0);
|
||||
r := f32(p16[0]);
|
||||
g := f32(p16[1]);
|
||||
b := f32(p16[2]);
|
||||
a := f32(p16[3]) / 65535.0;
|
||||
|
||||
lr := (a * r) + (1 - a) * f32(bg[0]);
|
||||
lg := (a * g) + (1 - a) * f32(bg[1]);
|
||||
lb := (a * b) + (1 - a) * f32(bg[2]);
|
||||
|
||||
o16[0] = u16be(lr);
|
||||
o16[1] = u16be(lg);
|
||||
o16[2] = u16be(lb);
|
||||
|
||||
p16 = p16[4:];
|
||||
o16 = o16[3:];
|
||||
}
|
||||
} else {
|
||||
o := 0;
|
||||
|
||||
for i := 0; i < len(pix); i += 4 {
|
||||
|
||||
x := (i / 4) % width;
|
||||
y := i / width / 4;
|
||||
|
||||
_b := _bg(img.background, x, y, false);
|
||||
bgcol := [3]u8{u8(_b[0]), u8(_b[1]), u8(_b[2])};
|
||||
|
||||
r := f32(pix[i]);
|
||||
g := f32(pix[i+1]);
|
||||
b := f32(pix[i+2]);
|
||||
a := f32(pix[i+3]) / 255.0;
|
||||
|
||||
lr := u8(f32(r) * a + (1 - a) * f32(bgcol[0]));
|
||||
lg := u8(f32(g) * a + (1 - a) * f32(bgcol[1]));
|
||||
lb := u8(f32(b) * a + (1 - a) * f32(bgcol[2]));
|
||||
op.buf[o ] = lr;
|
||||
op.buf[o+1] = lg;
|
||||
op.buf[o+2] = lb;
|
||||
o += 3;
|
||||
}
|
||||
}
|
||||
write_ptr(fd, raw_data(op.buf), len(op.buf));
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
521
core/image/png/helpers.odin
Normal file
521
core/image/png/helpers.odin
Normal file
@@ -0,0 +1,521 @@
|
||||
package png
|
||||
|
||||
import "core:image"
|
||||
import "core:compress/zlib"
|
||||
import coretime "core:time"
|
||||
import "core:strings"
|
||||
import "core:bytes"
|
||||
import "core:mem"
|
||||
|
||||
/*
|
||||
These are a few useful utility functions to work with PNG images.
|
||||
*/
|
||||
|
||||
/*
|
||||
Cleanup of image-specific data.
|
||||
There are other helpers for cleanup of PNG-specific data.
|
||||
Those are named *_destroy, where * is the name of the helper.
|
||||
*/
|
||||
|
||||
destroy :: proc(img: ^Image) {
|
||||
if img == nil {
|
||||
/*
|
||||
Nothing to do.
|
||||
Load must've returned with an error.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
bytes.buffer_destroy(&img.pixels);
|
||||
|
||||
/*
|
||||
We don't need to do anything for the individual chunks.
|
||||
They're allocated on the temp allocator, as is info.chunks
|
||||
|
||||
See read_chunk.
|
||||
*/
|
||||
free(img);
|
||||
}
|
||||
|
||||
/*
|
||||
Chunk helpers
|
||||
*/
|
||||
|
||||
gamma :: proc(c: Chunk) -> f32 {
|
||||
assert(c.header.type == .gAMA);
|
||||
res := (^gAMA)(raw_data(c.data))^;
|
||||
when true {
|
||||
// Returns the wrong result on old backend
|
||||
// Fixed for -llvm-api
|
||||
return f32(res.gamma_100k) / 100_000.0;
|
||||
} else {
|
||||
return f32(u32(res.gamma_100k)) / 100_000.0;
|
||||
}
|
||||
}
|
||||
|
||||
INCHES_PER_METER :: 1000.0 / 25.4;
|
||||
|
||||
phys :: proc(c: Chunk) -> pHYs {
|
||||
assert(c.header.type == .pHYs);
|
||||
res := (^pHYs)(raw_data(c.data))^;
|
||||
return res;
|
||||
}
|
||||
|
||||
phys_to_dpi :: proc(p: pHYs) -> (x_dpi, y_dpi: f32) {
|
||||
return f32(p.ppu_x) / INCHES_PER_METER, f32(p.ppu_y) / INCHES_PER_METER;
|
||||
}
|
||||
|
||||
time :: proc(c: Chunk) -> tIME {
|
||||
assert(c.header.type == .tIME);
|
||||
res := (^tIME)(raw_data(c.data))^;
|
||||
return res;
|
||||
}
|
||||
|
||||
core_time :: proc(c: Chunk) -> (t: coretime.Time, ok: bool) {
|
||||
png_time := time(c);
|
||||
using png_time;
|
||||
return coretime.datetime_to_time(
|
||||
int(year), int(month), int(day),
|
||||
int(hour), int(minute), int(second));
|
||||
}
|
||||
|
||||
text :: proc(c: Chunk) -> (res: Text, ok: bool) {
|
||||
#partial switch c.header.type {
|
||||
case .tEXt:
|
||||
ok = true;
|
||||
|
||||
fields := bytes.split(s=c.data, sep=[]u8{0}, allocator=context.temp_allocator);
|
||||
if len(fields) == 2 {
|
||||
res.keyword = strings.clone(string(fields[0]));
|
||||
res.text = strings.clone(string(fields[1]));
|
||||
} else {
|
||||
ok = false;
|
||||
}
|
||||
return;
|
||||
case .zTXt:
|
||||
ok = true;
|
||||
|
||||
fields := bytes.split_n(s=c.data, sep=[]u8{0}, n=3, allocator=context.temp_allocator);
|
||||
if len(fields) != 3 || len(fields[1]) != 0 {
|
||||
// Compression method must be 0=Deflate, which thanks to the split above turns
|
||||
// into an empty slice
|
||||
ok = false; return;
|
||||
}
|
||||
|
||||
// Set up ZLIB context and decompress text payload.
|
||||
buf: bytes.Buffer;
|
||||
zlib_error := zlib.inflate_from_byte_array(&fields[2], &buf);
|
||||
defer bytes.buffer_destroy(&buf);
|
||||
if !is_kind(zlib_error, E_General.OK) {
|
||||
ok = false; return;
|
||||
}
|
||||
|
||||
res.keyword = strings.clone(string(fields[0]));
|
||||
res.text = strings.clone(bytes.buffer_to_string(&buf));
|
||||
return;
|
||||
case .iTXt:
|
||||
ok = true;
|
||||
|
||||
s := string(c.data);
|
||||
null := strings.index_byte(s, 0);
|
||||
if null == -1 {
|
||||
ok = false; return;
|
||||
}
|
||||
if len(c.data) < null + 4 {
|
||||
// At a minimum, including the \0 following the keyword, we require 5 more bytes.
|
||||
ok = false; return;
|
||||
}
|
||||
res.keyword = strings.clone(string(c.data[:null]));
|
||||
rest := c.data[null+1:];
|
||||
|
||||
compression_flag := rest[:1][0];
|
||||
if compression_flag > 1 {
|
||||
ok = false; return;
|
||||
}
|
||||
compression_method := rest[1:2][0];
|
||||
if compression_flag == 1 && compression_method > 0 {
|
||||
// Only Deflate is supported
|
||||
ok = false; return;
|
||||
}
|
||||
rest = rest[2:];
|
||||
|
||||
// We now expect an optional language keyword and translated keyword, both followed by a \0
|
||||
null = strings.index_byte(string(rest), 0);
|
||||
if null == -1 {
|
||||
ok = false; return;
|
||||
}
|
||||
res.language = strings.clone(string(rest[:null]));
|
||||
rest = rest[null+1:];
|
||||
|
||||
null = strings.index_byte(string(rest), 0);
|
||||
if null == -1 {
|
||||
ok = false; return;
|
||||
}
|
||||
res.keyword_localized = strings.clone(string(rest[:null]));
|
||||
rest = rest[null+1:];
|
||||
if compression_flag == 0 {
|
||||
res.text = strings.clone(string(rest));
|
||||
} else {
|
||||
// Set up ZLIB context and decompress text payload.
|
||||
buf: bytes.Buffer;
|
||||
zlib_error := zlib.inflate_from_byte_array(&rest, &buf);
|
||||
defer bytes.buffer_destroy(&buf);
|
||||
if !is_kind(zlib_error, E_General.OK) {
|
||||
|
||||
ok = false; return;
|
||||
}
|
||||
|
||||
res.text = strings.clone(bytes.buffer_to_string(&buf));
|
||||
}
|
||||
return;
|
||||
case:
|
||||
// PNG text helper called with an unrecognized chunk type.
|
||||
ok = false; return;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
text_destroy :: proc(text: Text) {
|
||||
delete(text.keyword);
|
||||
delete(text.keyword_localized);
|
||||
delete(text.language);
|
||||
delete(text.text);
|
||||
}
|
||||
|
||||
iccp :: proc(c: Chunk) -> (res: iCCP, ok: bool) {
|
||||
ok = true;
|
||||
|
||||
fields := bytes.split_n(s=c.data, sep=[]u8{0}, n=3, allocator=context.temp_allocator);
|
||||
|
||||
if len(fields[0]) < 1 || len(fields[0]) > 79 {
|
||||
// Invalid profile name
|
||||
ok = false; return;
|
||||
}
|
||||
|
||||
if len(fields[1]) != 0 {
|
||||
// Compression method should be a zero, which the split turned into an empty slice.
|
||||
ok = false; return;
|
||||
}
|
||||
|
||||
// Set up ZLIB context and decompress iCCP payload
|
||||
buf: bytes.Buffer;
|
||||
zlib_error := zlib.inflate_from_byte_array(&fields[2], &buf);
|
||||
if !is_kind(zlib_error, E_General.OK) {
|
||||
bytes.buffer_destroy(&buf);
|
||||
ok = false; return;
|
||||
}
|
||||
|
||||
res.name = strings.clone(string(fields[0]));
|
||||
res.profile = bytes.buffer_to_bytes(&buf);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
iccp_destroy :: proc(i: iCCP) {
|
||||
delete(i.name);
|
||||
|
||||
delete(i.profile);
|
||||
|
||||
}
|
||||
|
||||
srgb :: proc(c: Chunk) -> (res: sRGB, ok: bool) {
|
||||
ok = true;
|
||||
|
||||
if c.header.type != .sRGB || len(c.data) != 1 {
|
||||
return {}, false;
|
||||
}
|
||||
|
||||
res.intent = sRGB_Rendering_Intent(c.data[0]);
|
||||
if res.intent > max(sRGB_Rendering_Intent) {
|
||||
ok = false; return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
plte :: proc(c: Chunk) -> (res: PLTE, ok: bool) {
|
||||
if c.header.type != .PLTE {
|
||||
return {}, false;
|
||||
}
|
||||
|
||||
i := 0; j := 0; ok = true;
|
||||
for j < int(c.header.length) {
|
||||
res.entries[i] = {c.data[j], c.data[j+1], c.data[j+2]};
|
||||
i += 1; j += 3;
|
||||
}
|
||||
res.used = u16(i);
|
||||
return;
|
||||
}
|
||||
|
||||
splt :: proc(c: Chunk) -> (res: sPLT, ok: bool) {
|
||||
if c.header.type != .sPLT {
|
||||
return {}, false;
|
||||
}
|
||||
ok = true;
|
||||
|
||||
fields := bytes.split_n(s=c.data, sep=[]u8{0}, n=2, allocator=context.temp_allocator);
|
||||
if len(fields) != 2 {
|
||||
return {}, false;
|
||||
}
|
||||
|
||||
res.depth = fields[1][0];
|
||||
if res.depth != 8 && res.depth != 16 {
|
||||
return {}, false;
|
||||
}
|
||||
|
||||
data := fields[1][1:];
|
||||
count: int;
|
||||
|
||||
if res.depth == 8 {
|
||||
if len(data) % 6 != 0 {
|
||||
return {}, false;
|
||||
}
|
||||
count = len(data) / 6;
|
||||
if count > 256 {
|
||||
return {}, false;
|
||||
}
|
||||
|
||||
res.entries = mem.slice_data_cast([][4]u8, data);
|
||||
} else { // res.depth == 16
|
||||
if len(data) % 10 != 0 {
|
||||
return {}, false;
|
||||
}
|
||||
count = len(data) / 10;
|
||||
if count > 256 {
|
||||
return {}, false;
|
||||
}
|
||||
|
||||
res.entries = mem.slice_data_cast([][4]u16, data);
|
||||
}
|
||||
|
||||
res.name = strings.clone(string(fields[0]));
|
||||
res.used = u16(count);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
splt_destroy :: proc(s: sPLT) {
|
||||
delete(s.name);
|
||||
}
|
||||
|
||||
sbit :: proc(c: Chunk) -> (res: [4]u8, ok: bool) {
|
||||
/*
|
||||
Returns [4]u8 with the significant bits in each channel.
|
||||
A channel will contain zero if not applicable to the PNG color type.
|
||||
*/
|
||||
|
||||
if len(c.data) < 1 || len(c.data) > 4 {
|
||||
ok = false; return;
|
||||
}
|
||||
ok = true;
|
||||
|
||||
for i := 0; i < len(c.data); i += 1 {
|
||||
res[i] = c.data[i];
|
||||
}
|
||||
return;
|
||||
|
||||
}
|
||||
|
||||
hist :: proc(c: Chunk) -> (res: hIST, ok: bool) {
|
||||
if c.header.type != .hIST {
|
||||
return {}, false;
|
||||
}
|
||||
if c.header.length & 1 == 1 || c.header.length > 512 {
|
||||
// The entries are u16be, so the length must be even.
|
||||
// At most 256 entries must be present
|
||||
return {}, false;
|
||||
}
|
||||
|
||||
ok = true;
|
||||
data := mem.slice_data_cast([]u16be, c.data);
|
||||
i := 0;
|
||||
for len(data) > 0 {
|
||||
// HIST entries are u16be, we unpack them to machine format
|
||||
res.entries[i] = u16(data[0]);
|
||||
i += 1; data = data[1:];
|
||||
}
|
||||
res.used = u16(i);
|
||||
return;
|
||||
}
|
||||
|
||||
chrm :: proc(c: Chunk) -> (res: cHRM, ok: bool) {
|
||||
ok = true;
|
||||
if c.header.length != size_of(cHRM_Raw) {
|
||||
return {}, false;
|
||||
}
|
||||
chrm := (^cHRM_Raw)(raw_data(c.data))^;
|
||||
|
||||
res.w.x = f32(chrm.w.x) / 100_000.0;
|
||||
res.w.y = f32(chrm.w.y) / 100_000.0;
|
||||
res.r.x = f32(chrm.r.x) / 100_000.0;
|
||||
res.r.y = f32(chrm.r.y) / 100_000.0;
|
||||
res.g.x = f32(chrm.g.x) / 100_000.0;
|
||||
res.g.y = f32(chrm.g.y) / 100_000.0;
|
||||
res.b.x = f32(chrm.b.x) / 100_000.0;
|
||||
res.b.y = f32(chrm.b.y) / 100_000.0;
|
||||
return;
|
||||
}
|
||||
|
||||
exif :: proc(c: Chunk) -> (res: Exif, ok: bool) {
|
||||
|
||||
ok = true;
|
||||
|
||||
if len(c.data) < 4 {
|
||||
ok = false; return;
|
||||
}
|
||||
|
||||
if c.data[0] == 'M' && c.data[1] == 'M' {
|
||||
res.byte_order = .big_endian;
|
||||
if c.data[2] != 0 || c.data[3] != 42 {
|
||||
ok = false; return;
|
||||
}
|
||||
} else if c.data[0] == 'I' && c.data[1] == 'I' {
|
||||
res.byte_order = .little_endian;
|
||||
if c.data[2] != 42 || c.data[3] != 0 {
|
||||
ok = false; return;
|
||||
}
|
||||
} else {
|
||||
ok = false; return;
|
||||
}
|
||||
|
||||
res.data = c.data;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
General helper functions
|
||||
*/
|
||||
|
||||
compute_buffer_size :: image.compute_buffer_size;
|
||||
|
||||
/*
|
||||
PNG save helpers
|
||||
*/
|
||||
|
||||
when false {
|
||||
|
||||
make_chunk :: proc(c: any, t: Chunk_Type) -> (res: Chunk) {
|
||||
|
||||
data: []u8;
|
||||
if v, ok := c.([]u8); ok {
|
||||
data = v;
|
||||
} else {
|
||||
data = mem.any_to_bytes(c);
|
||||
}
|
||||
|
||||
res.header.length = u32be(len(data));
|
||||
res.header.type = t;
|
||||
res.data = data;
|
||||
|
||||
// CRC the type
|
||||
crc := hash.crc32(mem.any_to_bytes(res.header.type));
|
||||
// Extend the CRC with the data
|
||||
res.crc = u32be(hash.crc32(data, crc));
|
||||
return;
|
||||
}
|
||||
|
||||
write_chunk :: proc(fd: os.Handle, chunk: Chunk) {
|
||||
c := chunk;
|
||||
// Write length + type
|
||||
os.write_ptr(fd, &c.header, 8);
|
||||
// Write data
|
||||
os.write_ptr(fd, mem.raw_data(c.data), int(c.header.length));
|
||||
// Write CRC32
|
||||
os.write_ptr(fd, &c.crc, 4);
|
||||
}
|
||||
|
||||
write_image_as_png :: proc(filename: string, image: Image) -> (err: Error) {
|
||||
profiler.timed_proc();
|
||||
using image;
|
||||
using os;
|
||||
flags: int = O_WRONLY|O_CREATE|O_TRUNC;
|
||||
|
||||
if len(image.pixels) == 0 || len(image.pixels) < image.width * image.height * int(image.channels) {
|
||||
return E_PNG.Invalid_Image_Dimensions;
|
||||
}
|
||||
|
||||
mode: int = 0;
|
||||
when ODIN_OS == "linux" || ODIN_OS == "darwin" {
|
||||
// NOTE(justasd): 644 (owner read, write; group read; others read)
|
||||
mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
|
||||
}
|
||||
|
||||
fd, fderr := open(filename, flags, mode);
|
||||
if fderr != 0 {
|
||||
return E_General.Cannot_Open_File;
|
||||
}
|
||||
defer close(fd);
|
||||
|
||||
magic := Signature;
|
||||
|
||||
write_ptr(fd, &magic, 8);
|
||||
|
||||
ihdr := IHDR{
|
||||
width = u32be(width),
|
||||
height = u32be(height),
|
||||
bit_depth = depth,
|
||||
compression_method = 0,
|
||||
filter_method = 0,
|
||||
interlace_method = .None,
|
||||
};
|
||||
|
||||
if channels == 1 {
|
||||
ihdr.color_type = Color_Type{};
|
||||
} else if channels == 2 {
|
||||
ihdr.color_type = Color_Type{.Alpha};
|
||||
} else if channels == 3 {
|
||||
ihdr.color_type = Color_Type{.Color};
|
||||
} else if channels == 4 {
|
||||
ihdr.color_type = Color_Type{.Color, .Alpha};
|
||||
} else {
|
||||
// Unhandled
|
||||
return E_PNG.Unknown_Color_Type;
|
||||
}
|
||||
|
||||
h := make_chunk(ihdr, .IHDR);
|
||||
write_chunk(fd, h);
|
||||
|
||||
bytes_needed := width * height * int(channels) + height;
|
||||
filter_bytes := mem.make_dynamic_array_len_cap([dynamic]u8, bytes_needed, bytes_needed, context.allocator);
|
||||
defer delete(filter_bytes);
|
||||
|
||||
i := 0; j := 0;
|
||||
// Add a filter byte 0 per pixel row
|
||||
for y := 0; y < height; y += 1 {
|
||||
filter_bytes[j] = 0; j += 1;
|
||||
for x := 0; x < width; x += 1 {
|
||||
for z := 0; z < channels; z += 1 {
|
||||
filter_bytes[j+z] = image.pixels[i+z];
|
||||
}
|
||||
i += channels; j += channels;
|
||||
}
|
||||
}
|
||||
assert(j == bytes_needed);
|
||||
|
||||
a: []u8 = filter_bytes[:];
|
||||
|
||||
out_buf: ^[dynamic]u8;
|
||||
defer free(out_buf);
|
||||
|
||||
ctx := zlib.ZLIB_Context{
|
||||
in_buf = &a,
|
||||
out_buf = out_buf,
|
||||
};
|
||||
err = zlib.write_zlib_stream_from_memory(&ctx);
|
||||
|
||||
b: []u8;
|
||||
if is_kind(err, E_General, E_General.OK) {
|
||||
b = ctx.out_buf[:];
|
||||
} else {
|
||||
return err;
|
||||
}
|
||||
|
||||
idat := make_chunk(b, .IDAT);
|
||||
|
||||
write_chunk(fd, idat);
|
||||
|
||||
iend := make_chunk([]u8{}, .IEND);
|
||||
write_chunk(fd, iend);
|
||||
|
||||
return E_General.OK;
|
||||
}
|
||||
}
|
||||
1590
core/image/png/png.odin
Normal file
1590
core/image/png/png.odin
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user