Minor stylistic code changes to compress and image packages

This commit is contained in:
gingerBill
2021-04-30 10:58:29 +01:00
parent 7ef30355cb
commit 5f617c56e1
8 changed files with 584 additions and 589 deletions

View File

@@ -200,4 +200,4 @@ read_bits_no_refill_lsb :: #force_inline proc(z: ^Context, width: u8) -> u32 {
discard_to_next_byte_lsb :: proc(z: ^Context) {
discard := u8(z.num_bits & 7);
consume_bits_lsb(z, discard);
}
}

View File

@@ -34,7 +34,7 @@ main :: proc() {
if len(args) < 2 {
stderr("No input file specified.\n");
err := gzip.load(&TEST, &buf);
err := gzip.load(TEST, &buf);
if gzip.is_kind(err, gzip.E_General.OK) {
stdout("Displaying test vector: ");
stdout(bytes.buffer_to_string(&buf));
@@ -50,7 +50,7 @@ main :: proc() {
if file == "-" {
// Read from stdin
s := os.stream_from_handle(os.stdin);
err = gzip.load(&s, &buf);
err = gzip.load(s, &buf);
} else {
err = gzip.load(file, &buf);
}
@@ -67,4 +67,4 @@ main :: proc() {
stdout(bytes.buffer_to_string(&buf));
}
os.exit(0);
}
}

View File

@@ -45,39 +45,39 @@ Header_Flag :: enum u8 {
Header_Flags :: distinct bit_set[Header_Flag; u8];
OS :: enum u8 {
FAT = 0,
Amiga = 1,
VMS = 2,
Unix = 3,
VM_CMS = 4,
Atari_TOS = 5,
HPFS = 6,
Macintosh = 7,
Z_System = 8,
CP_M = 9,
TOPS_20 = 10,
NTFS = 11,
QDOS = 12,
FAT = 0,
Amiga = 1,
VMS = 2,
Unix = 3,
VM_CMS = 4,
Atari_TOS = 5,
HPFS = 6,
Macintosh = 7,
Z_System = 8,
CP_M = 9,
TOPS_20 = 10,
NTFS = 11,
QDOS = 12,
Acorn_RISCOS = 13,
_Unknown = 14,
Unknown = 255,
_Unknown = 14,
Unknown = 255,
}
OS_Name :: #partial [OS]string{
.FAT = "FAT",
.Amiga = "Amiga",
.VMS = "VMS/OpenVMS",
.Unix = "Unix",
.VM_CMS = "VM/CMS",
.Atari_TOS = "Atari TOS",
.HPFS = "HPFS",
.Macintosh = "Macintosh",
.Z_System = "Z-System",
.CP_M = "CP/M",
.TOPS_20 = "TOPS-20",
.NTFS = "NTFS",
.QDOS = "QDOS",
.FAT = "FAT",
.Amiga = "Amiga",
.VMS = "VMS/OpenVMS",
.Unix = "Unix",
.VM_CMS = "VM/CMS",
.Atari_TOS = "Atari TOS",
.HPFS = "HPFS",
.Macintosh = "Macintosh",
.Z_System = "Z-System",
.CP_M = "CP/M",
.TOPS_20 = "TOPS-20",
.NTFS = "NTFS",
.QDOS = "QDOS",
.Acorn_RISCOS = "Acorn RISCOS",
.Unknown = "Unknown",
.Unknown = "Unknown",
};
Compression :: enum u8 {
@@ -96,13 +96,13 @@ E_ZLIB :: compress.ZLIB_Error;
E_Deflate :: compress.Deflate_Error;
is_kind :: compress.is_kind;
load_from_slice :: proc(slice: ^[]u8, buf: ^bytes.Buffer, allocator := context.allocator) -> (err: Error) {
load_from_slice :: proc(slice: []u8, buf: ^bytes.Buffer, allocator := context.allocator) -> (err: Error) {
r := bytes.Reader{};
bytes.reader_init(&r, slice^);
bytes.reader_init(&r, slice);
stream := bytes.reader_to_stream(&r);
err = load_from_stream(&stream, buf, allocator);
err = load_from_stream(stream, buf, allocator);
return err;
}
@@ -111,18 +111,16 @@ load_from_file :: proc(filename: string, buf: ^bytes.Buffer, allocator := contex
data, ok := os.read_entire_file(filename, allocator);
defer delete(data);
err = E_General.File_Not_Found;
if ok {
err = load_from_slice(&data, buf, allocator);
return;
} else {
return E_General.File_Not_Found;
err = load_from_slice(data, buf, allocator);
}
return;
}
load_from_stream :: proc(stream: ^io.Stream, buf: ^bytes.Buffer, allocator := context.allocator) -> (err: Error) {
load_from_stream :: proc(stream: io.Stream, buf: ^bytes.Buffer, allocator := context.allocator) -> (err: Error) {
ctx := compress.Context{
input = stream^,
input = stream,
};
buf := buf;
ws := bytes.buffer_to_stream(buf);
@@ -313,4 +311,4 @@ load_from_stream :: proc(stream: ^io.Stream, buf: ^bytes.Buffer, allocator := co
return E_General.OK;
}
load :: proc{load_from_file, load_from_slice, load_from_stream};
load :: proc{load_from_file, load_from_slice, load_from_stream};

View File

@@ -7,30 +7,30 @@ import "core:fmt"
main :: proc() {
ODIN_DEMO: []u8 = {
ODIN_DEMO := []u8{
120, 156, 101, 144, 77, 110, 131, 48, 16, 133, 215, 204, 41, 158, 44,
69, 73, 32, 148, 182, 75, 35, 14, 208, 125, 47, 96, 185, 195, 143,
69, 73, 32, 148, 182, 75, 35, 14, 208, 125, 47, 96, 185, 195, 143,
130, 13, 50, 38, 81, 84, 101, 213, 75, 116, 215, 43, 246, 8, 53,
82, 126, 8, 181, 188, 152, 153, 111, 222, 147, 159, 123, 165, 247, 170,
98, 24, 213, 88, 162, 198, 244, 157, 243, 16, 186, 115, 44, 75, 227,
5, 77, 115, 72, 137, 222, 117, 122, 179, 197, 39, 69, 161, 170, 156,
50, 144, 5, 68, 130, 4, 49, 126, 127, 190, 191, 144, 34, 19, 57,
69, 74, 235, 209, 140, 173, 242, 157, 155, 54, 158, 115, 162, 168, 12,
82, 126, 8, 181, 188, 152, 153, 111, 222, 147, 159, 123, 165, 247, 170,
98, 24, 213, 88, 162, 198, 244, 157, 243, 16, 186, 115, 44, 75, 227,
5, 77, 115, 72, 137, 222, 117, 122, 179, 197, 39, 69, 161, 170, 156,
50, 144, 5, 68, 130, 4, 49, 126, 127, 190, 191, 144, 34, 19, 57,
69, 74, 235, 209, 140, 173, 242, 157, 155, 54, 158, 115, 162, 168, 12,
181, 239, 246, 108, 17, 188, 174, 242, 224, 20, 13, 199, 198, 235, 250,
194, 166, 129, 86, 3, 99, 157, 172, 37, 230, 62, 73, 129, 151, 252,
70, 211, 5, 77, 31, 104, 188, 160, 113, 129, 215, 59, 205, 22, 52,
123, 160, 83, 142, 255, 242, 89, 123, 93, 149, 200, 50, 188, 85, 54,
252, 18, 248, 192, 238, 228, 235, 198, 86, 224, 118, 224, 176, 113, 166,
112, 67, 106, 227, 159, 122, 215, 88, 95, 110, 196, 123, 205, 183, 224,
98, 53, 8, 104, 213, 234, 201, 147, 7, 248, 192, 14, 170, 29, 25,
98, 53, 8, 104, 213, 234, 201, 147, 7, 248, 192, 14, 170, 29, 25,
171, 15, 18, 59, 138, 112, 63, 23, 205, 110, 254, 136, 109, 78, 231,
63, 234, 138, 133, 204,
63, 234, 138, 133, 204,
};
buf: bytes.Buffer;
// We can pass ", true" to inflate a raw DEFLATE stream instead of a ZLIB wrapped one.
err := zlib.inflate(&ODIN_DEMO, &buf);
err := zlib.inflate(ODIN_DEMO, &buf);
defer bytes.buffer_destroy(&buf);
if !zlib.is_kind(err, zlib.E_General.OK) {
@@ -39,4 +39,4 @@ main :: proc() {
s := bytes.buffer_to_string(&buf);
fmt.printf("Input: %v bytes, output (%v bytes):\n%v\n", len(ODIN_DEMO), len(s), s);
assert(len(s) == 438);
}
}

View File

@@ -21,8 +21,8 @@ Compression_Method :: enum u8 {
Compression_Level :: enum u8 {
Fastest = 0,
Fast = 1,
Default = 2,
Maximum = 3,
Default = 2,
Maximum = 3,
}
Options :: struct {
@@ -68,19 +68,19 @@ Z_LENGTH_DEZIGZAG := []u8{
};
Z_FIXED_LENGTH := [288]u8{
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,
};
Z_FIXED_DIST := [32]u8{
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
};
/*
@@ -94,12 +94,12 @@ ZFAST_MASK :: ((1 << ZFAST_BITS) - 1);
JPEG packs from left, ZLIB from right. We can't share code.
*/
Huffman_Table :: struct {
fast: [1 << ZFAST_BITS]u16,
firstcode: [16]u16,
maxcode: [17]int,
firstsymbol: [16]u16,
size: [288]u8,
value: [288]u16,
fast: [1 << ZFAST_BITS]u16,
firstcode: [16]u16,
maxcode: [17]int,
firstsymbol: [16]u16,
size: [288]u8,
value: [288]u16,
};
// Implementation starts here
@@ -218,19 +218,19 @@ decode_huffman_slowpath :: proc(z: ^Context, t: ^Huffman_Table) -> (r: u16, err:
if (s >= 16) {
return 0, E_Deflate.Bad_Huffman_Code;
}
// code size is s, so:
b := (k >> (16-s)) - int(t.firstcode[s]) + int(t.firstsymbol[s]);
if b >= size_of(t.size) {
return 0, E_Deflate.Bad_Huffman_Code;
}
if t.size[b] != s {
return 0, E_Deflate.Bad_Huffman_Code;
}
// code size is s, so:
b := (k >> (16-s)) - int(t.firstcode[s]) + int(t.firstsymbol[s]);
if b >= size_of(t.size) {
return 0, E_Deflate.Bad_Huffman_Code;
}
if t.size[b] != s {
return 0, E_Deflate.Bad_Huffman_Code;
}
compress.consume_bits_lsb(z, s);
compress.consume_bits_lsb(z, s);
r = t.value[b];
return r, E_General.OK;
r = t.value[b];
return r, E_General.OK;
}
decode_huffman :: proc(z: ^Context, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
@@ -254,7 +254,6 @@ decode_huffman :: proc(z: ^Context, t: ^Huffman_Table) -> (r: u16, err: Error) #
}
parse_huffman_block :: proc(z: ^Context, z_repeat, z_offset: ^Huffman_Table) -> (err: Error) #no_bounds_check {
#no_bounds_check for {
value, e := decode_huffman(z, z_repeat);
if !is_kind(e, E_General.OK) {
@@ -267,8 +266,8 @@ parse_huffman_block :: proc(z: ^Context, z_repeat, z_offset: ^Huffman_Table) ->
}
} else {
if value == 256 {
// End of block
return E_General.OK;
// End of block
return E_General.OK;
}
value -= 257;
@@ -370,7 +369,7 @@ inflate_from_stream :: proc(using ctx: ^Context, raw := false, allocator := cont
}
fdict := (flg >> 5) & 1;
/*
/*
We don't handle built-in dictionaries for now.
They're application specific and PNG doesn't use them.
*/
@@ -449,7 +448,8 @@ inflate_from_stream_raw :: proc(z: ^Context, allocator := context.allocator) ->
// log.debugf("Final: %v | Type: %v\n", final, type);
if type == 0 {
switch type {
case 0:
// Uncompressed block
// Discard bits until next byte boundary
@@ -471,9 +471,9 @@ inflate_from_stream_raw :: proc(z: ^Context, allocator := context.allocator) ->
write_byte(z, u8(lit));
uncompressed_len -= 1;
}
} else if type == 3 {
case 3:
return E_Deflate.BType_3;
} else {
case:
// log.debugf("Err: %v | Final: %v | Type: %v\n", err, final, type);
if type == 1 {
// Use fixed code lengths.
@@ -487,12 +487,12 @@ inflate_from_stream_raw :: proc(z: ^Context, allocator := context.allocator) ->
}
} else {
lencodes: [286+32+137]u8;
codelength_sizes: [19]u8;
codelength_sizes: [19]u8;
//i: u32;
n: u32;
//i: u32;
n: u32;
compress.refill_lsb(z, 14);
compress.refill_lsb(z, 14);
hlit := compress.read_bits_no_refill_lsb(z, 5) + 257;
hdist := compress.read_bits_no_refill_lsb(z, 5) + 1;
hclen := compress.read_bits_no_refill_lsb(z, 4) + 4;
@@ -525,34 +525,35 @@ inflate_from_stream_raw :: proc(z: ^Context, allocator := context.allocator) ->
} else {
fill := u8(0);
compress.refill_lsb(z, 7);
if c == 16 {
switch c {
case 16:
c = u16(compress.read_bits_no_refill_lsb(z, 2) + 3);
if n == 0 {
return E_Deflate.Huffman_Bad_Code_Lengths;
}
fill = lencodes[n - 1];
} else if c == 17 {
c = u16(compress.read_bits_no_refill_lsb(z, 3) + 3);
} else if c == 18 {
c = u16(compress.read_bits_no_refill_lsb(z, 7) + 11);
} else {
return E_Deflate.Huffman_Bad_Code_Lengths;
}
if n == 0 {
return E_Deflate.Huffman_Bad_Code_Lengths;
}
fill = lencodes[n - 1];
case 17:
c = u16(compress.read_bits_no_refill_lsb(z, 3) + 3);
case 18:
c = u16(compress.read_bits_no_refill_lsb(z, 7) + 11);
case:
return E_Deflate.Huffman_Bad_Code_Lengths;
}
if ntot - n < u32(c) {
return E_Deflate.Huffman_Bad_Code_Lengths;
}
return E_Deflate.Huffman_Bad_Code_Lengths;
}
nc := n + u32(c);
#no_bounds_check for ; n < nc; n += 1 {
lencodes[n] = fill;
}
nc := n + u32(c);
#no_bounds_check for ; n < nc; n += 1 {
lencodes[n] = fill;
}
}
}
if n != ntot {
return E_Deflate.Huffman_Bad_Code_Lengths;
}
return E_Deflate.Huffman_Bad_Code_Lengths;
}
err = build_huffman(z_repeat, lencodes[:hlit]);
if !is_kind(err, E_General.OK) {
@@ -577,11 +578,11 @@ inflate_from_stream_raw :: proc(z: ^Context, allocator := context.allocator) ->
return E_General.OK;
}
inflate_from_byte_array :: proc(input: ^[]u8, buf: ^bytes.Buffer, raw := false) -> (err: Error) {
inflate_from_byte_array :: proc(input: []u8, buf: ^bytes.Buffer, raw := false) -> (err: Error) {
ctx := Context{};
r := bytes.Reader{};
bytes.reader_init(&r, input^);
bytes.reader_init(&r, input);
rs := bytes.reader_to_stream(&r);
ctx.input = rs;
@@ -594,9 +595,9 @@ inflate_from_byte_array :: proc(input: ^[]u8, buf: ^bytes.Buffer, raw := false)
return err;
}
inflate_from_byte_array_raw :: proc(input: ^[]u8, buf: ^bytes.Buffer, raw := false) -> (err: Error) {
inflate_from_byte_array_raw :: proc(input: []u8, buf: ^bytes.Buffer, raw := false) -> (err: Error) {
return inflate_from_byte_array(input, buf, true);
}
inflate :: proc{inflate_from_stream, inflate_from_byte_array};
inflate_raw :: proc{inflate_from_stream_raw, inflate_from_byte_array_raw};
inflate_raw :: proc{inflate_from_stream_raw, inflate_from_byte_array_raw};

View File

@@ -194,7 +194,7 @@ write_image_as_ppm :: proc(filename: string, image: ^image.Image) -> (success: b
}
defer close(fd);
write_string(fd,
write_string(fd,
fmt.tprintf("P6\n%v %v\n%v\n", width, height, (1 << depth -1)),
);
@@ -324,4 +324,4 @@ write_image_as_ppm :: proc(filename: string, image: ^image.Image) -> (success: b
}
}
return true;
}
}

View File

@@ -76,102 +76,102 @@ core_time :: proc(c: Chunk) -> (t: coretime.Time, ok: bool) {
using png_time;
return coretime.datetime_to_time(
int(year), int(month), int(day),
int(hour), int(minute), int(second));
int(hour), int(minute), int(second),
);
}
text :: proc(c: Chunk) -> (res: Text, ok: bool) {
#partial switch c.header.type {
case .tEXt:
ok = true;
case .tEXt:
ok = true;
fields := bytes.split(s=c.data, sep=[]u8{0}, allocator=context.temp_allocator);
if len(fields) == 2 {
res.keyword = strings.clone(string(fields[0]));
res.text = strings.clone(string(fields[1]));
} else {
ok = false;
}
return;
case .zTXt:
ok = true;
fields := bytes.split(s=c.data, sep=[]u8{0}, allocator=context.temp_allocator);
if len(fields) == 2 {
res.keyword = strings.clone(string(fields[0]));
res.text = strings.clone(string(fields[1]));
} else {
ok = false;
}
return;
case .zTXt:
ok = true;
fields := bytes.split_n(s=c.data, sep=[]u8{0}, n=3, allocator=context.temp_allocator);
if len(fields) != 3 || len(fields[1]) != 0 {
// Compression method must be 0=Deflate, which thanks to the split above turns
// into an empty slice
ok = false; return;
}
fields := bytes.split_n(s=c.data, sep=[]u8{0}, n=3, allocator=context.temp_allocator);
if len(fields) != 3 || len(fields[1]) != 0 {
// Compression method must be 0=Deflate, which thanks to the split above turns
// into an empty slice
ok = false; return;
}
// Set up ZLIB context and decompress text payload.
buf: bytes.Buffer;
zlib_error := zlib.inflate_from_byte_array(fields[2], &buf);
defer bytes.buffer_destroy(&buf);
if !is_kind(zlib_error, E_General.OK) {
ok = false; return;
}
res.keyword = strings.clone(string(fields[0]));
res.text = strings.clone(bytes.buffer_to_string(&buf));
return;
case .iTXt:
ok = true;
s := string(c.data);
null := strings.index_byte(s, 0);
if null == -1 {
ok = false; return;
}
if len(c.data) < null + 4 {
// At a minimum, including the \0 following the keyword, we require 5 more bytes.
ok = false; return;
}
res.keyword = strings.clone(string(c.data[:null]));
rest := c.data[null+1:];
compression_flag := rest[:1][0];
if compression_flag > 1 {
ok = false; return;
}
compression_method := rest[1:2][0];
if compression_flag == 1 && compression_method > 0 {
// Only Deflate is supported
ok = false; return;
}
rest = rest[2:];
// We now expect an optional language keyword and translated keyword, both followed by a \0
null = strings.index_byte(string(rest), 0);
if null == -1 {
ok = false; return;
}
res.language = strings.clone(string(rest[:null]));
rest = rest[null+1:];
null = strings.index_byte(string(rest), 0);
if null == -1 {
ok = false; return;
}
res.keyword_localized = strings.clone(string(rest[:null]));
rest = rest[null+1:];
if compression_flag == 0 {
res.text = strings.clone(string(rest));
} else {
// Set up ZLIB context and decompress text payload.
buf: bytes.Buffer;
zlib_error := zlib.inflate_from_byte_array(&fields[2], &buf);
zlib_error := zlib.inflate_from_byte_array(rest, &buf);
defer bytes.buffer_destroy(&buf);
if !is_kind(zlib_error, E_General.OK) {
ok = false; return;
}
res.keyword = strings.clone(string(fields[0]));
res.text = strings.clone(bytes.buffer_to_string(&buf));
return;
case .iTXt:
ok = true;
s := string(c.data);
null := strings.index_byte(s, 0);
if null == -1 {
ok = false; return;
}
if len(c.data) < null + 4 {
// At a minimum, including the \0 following the keyword, we require 5 more bytes.
ok = false; return;
}
res.keyword = strings.clone(string(c.data[:null]));
rest := c.data[null+1:];
compression_flag := rest[:1][0];
if compression_flag > 1 {
ok = false; return;
}
compression_method := rest[1:2][0];
if compression_flag == 1 && compression_method > 0 {
// Only Deflate is supported
ok = false; return;
}
rest = rest[2:];
// We now expect an optional language keyword and translated keyword, both followed by a \0
null = strings.index_byte(string(rest), 0);
if null == -1 {
ok = false; return;
}
res.language = strings.clone(string(rest[:null]));
rest = rest[null+1:];
null = strings.index_byte(string(rest), 0);
if null == -1 {
ok = false; return;
}
res.keyword_localized = strings.clone(string(rest[:null]));
rest = rest[null+1:];
if compression_flag == 0 {
res.text = strings.clone(string(rest));
} else {
// Set up ZLIB context and decompress text payload.
buf: bytes.Buffer;
zlib_error := zlib.inflate_from_byte_array(&rest, &buf);
defer bytes.buffer_destroy(&buf);
if !is_kind(zlib_error, E_General.OK) {
ok = false; return;
}
res.text = strings.clone(bytes.buffer_to_string(&buf));
}
return;
case:
// PNG text helper called with an unrecognized chunk type.
ok = false; return;
}
return;
case:
// PNG text helper called with an unrecognized chunk type.
ok = false; return;
}
}
@@ -199,7 +199,7 @@ iccp :: proc(c: Chunk) -> (res: iCCP, ok: bool) {
// Set up ZLIB context and decompress iCCP payload
buf: bytes.Buffer;
zlib_error := zlib.inflate_from_byte_array(&fields[2], &buf);
zlib_error := zlib.inflate_from_byte_array(fields[2], &buf);
if !is_kind(zlib_error, E_General.OK) {
bytes.buffer_destroy(&buf);
ok = false; return;
@@ -458,19 +458,14 @@ when false {
interlace_method = .None,
};
if channels == 1 {
ihdr.color_type = Color_Type{};
} else if channels == 2 {
ihdr.color_type = Color_Type{.Alpha};
} else if channels == 3 {
ihdr.color_type = Color_Type{.Color};
} else if channels == 4 {
ihdr.color_type = Color_Type{.Color, .Alpha};
} else {
// Unhandled
switch channels {
case 1: ihdr.color_type = Color_Type{};
case 2: ihdr.color_type = Color_Type{.Alpha};
case 3: ihdr.color_type = Color_Type{.Color};
case 4: ihdr.color_type = Color_Type{.Color, .Alpha};
case:// Unhandled
return E_PNG.Unknown_Color_Type;
}
h := make_chunk(ihdr, .IHDR);
write_chunk(fd, h);
@@ -518,4 +513,4 @@ when false {
return E_General.OK;
}
}
}

View File

@@ -350,9 +350,9 @@ chunk_type_to_name :: proc(type: ^Chunk_Type) -> string {
return strings.string_from_ptr(t, 4);
}
load_from_slice :: proc(slice: ^[]u8, options: Options = {}, allocator := context.allocator) -> (img: ^Image, err: Error) {
load_from_slice :: proc(slice: []u8, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
r := bytes.Reader{};
bytes.reader_init(&r, slice^);
bytes.reader_init(&r, slice);
stream := bytes.reader_to_stream(&r);
/*
@@ -360,17 +360,17 @@ load_from_slice :: proc(slice: ^[]u8, options: Options = {}, allocator := contex
This way the stream reader could avoid the copy into the temp memory returned by it,
and instead return a slice into the original memory that's already owned by the caller.
*/
img, err = load_from_stream(&stream, options, allocator);
img, err = load_from_stream(stream, options, allocator);
return img, err;
}
load_from_file :: proc(filename: string, options: Options = {}, allocator := context.allocator) -> (img: ^Image, err: Error) {
load_from_file :: proc(filename: string, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
data, ok := os.read_entire_file(filename, allocator);
defer delete(data);
if ok {
img, err = load_from_slice(&data, options, allocator);
img, err = load_from_slice(data, options, allocator);
return;
} else {
img = new(Image);
@@ -378,7 +378,7 @@ load_from_file :: proc(filename: string, options: Options = {}, allocator := con
}
}
load_from_stream :: proc(stream: ^io.Stream, options: Options = {}, allocator := context.allocator) -> (img: ^Image, err: Error) {
load_from_stream :: proc(stream: io.Stream, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
options := options;
if .info in options {
options |= {.return_metadata, .do_not_decompress_image};
@@ -396,7 +396,7 @@ load_from_stream :: proc(stream: ^io.Stream, options: Options = {}, allocator :=
img.sidecar = nil;
ctx := compress.Context{
input = stream^,
input = stream,
};
signature, io_error := compress.read_data(&ctx, Signature);
@@ -669,7 +669,7 @@ load_from_stream :: proc(stream: ^io.Stream, options: Options = {}, allocator :=
}
buf: bytes.Buffer;
zlib_error := zlib.inflate(&idat, &buf);
zlib_error := zlib.inflate(idat, &buf);
defer bytes.buffer_destroy(&buf);
if !is_kind(zlib_error, E_General.OK) {
@@ -817,8 +817,7 @@ load_from_stream :: proc(stream: ^io.Stream, options: Options = {}, allocator :=
}
}
} else {
// This should be impossible.
assert(false);
unreachable();
}
img.pixels = t;
@@ -845,145 +844,145 @@ load_from_stream :: proc(stream: ^io.Stream, options: Options = {}, allocator :=
o16 := mem.slice_data_cast([]u16, t.buf[:]);
switch (raw_image_channels) {
case 1:
// Gray without Alpha. Might have tRNS alpha.
key := u16(0);
case 1:
// Gray without Alpha. Might have tRNS alpha.
key := u16(0);
if seen_trns {
key = mem.slice_data_cast([]u16, trns.data)[0];
}
for len(p16) > 0 {
r := p16[0];
alpha := u16(1); // Default to full opaque
if seen_trns {
key = mem.slice_data_cast([]u16, trns.data)[0];
}
for len(p16) > 0 {
r := p16[0];
alpha := u16(1); // Default to full opaque
if seen_trns {
if r == key {
if seen_bkgd {
c := img.background.([3]u16);
r = c[0];
} else {
alpha = 0; // Keyed transparency
}
if r == key {
if seen_bkgd {
c := img.background.([3]u16);
r = c[0];
} else {
alpha = 0; // Keyed transparency
}
}
if premultiply {
o16[0] = r * alpha;
o16[1] = r * alpha;
o16[2] = r * alpha;
} else {
o16[0] = r;
o16[1] = r;
o16[2] = r;
}
if out_image_channels == 4 {
o16[3] = alpha * 65535;
}
p16 = p16[1:];
o16 = o16[out_image_channels:];
}
case 2:
// Gray with alpha, we shouldn't have a tRNS chunk.
for len(p16) > 0 {
r := p16[0];
if premultiply {
alpha := p16[1];
c := u16(f32(r) * f32(alpha) / f32(65535));
o16[0] = c;
o16[1] = c;
o16[2] = c;
} else {
o16[0] = r;
o16[1] = r;
o16[2] = r;
}
if .alpha_drop_if_present not_in options {
o16[3] = p16[1];
}
p16 = p16[2:];
o16 = o16[out_image_channels:];
if premultiply {
o16[0] = r * alpha;
o16[1] = r * alpha;
o16[2] = r * alpha;
} else {
o16[0] = r;
o16[1] = r;
o16[2] = r;
}
case 3:
/*
Color without Alpha.
We may still have a tRNS chunk or `.alpha_add_if_missing`.
*/
key: []u16;
if out_image_channels == 4 {
o16[3] = alpha * 65535;
}
p16 = p16[1:];
o16 = o16[out_image_channels:];
}
case 2:
// Gray with alpha, we shouldn't have a tRNS chunk.
for len(p16) > 0 {
r := p16[0];
if premultiply {
alpha := p16[1];
c := u16(f32(r) * f32(alpha) / f32(65535));
o16[0] = c;
o16[1] = c;
o16[2] = c;
} else {
o16[0] = r;
o16[1] = r;
o16[2] = r;
}
if .alpha_drop_if_present not_in options {
o16[3] = p16[1];
}
p16 = p16[2:];
o16 = o16[out_image_channels:];
}
case 3:
/*
Color without Alpha.
We may still have a tRNS chunk or `.alpha_add_if_missing`.
*/
key: []u16;
if seen_trns {
key = mem.slice_data_cast([]u16, trns.data);
}
for len(p16) > 0 {
r := p16[0];
g := p16[1];
b := p16[2];
alpha := u16(1); // Default to full opaque
if seen_trns {
key = mem.slice_data_cast([]u16, trns.data);
}
for len(p16) > 0 {
r := p16[0];
g := p16[1];
b := p16[2];
alpha := u16(1); // Default to full opaque
if seen_trns {
if r == key[0] && g == key[1] && b == key[2] {
if seen_bkgd {
c := img.background.([3]u16);
r = c[0];
g = c[1];
b = c[2];
} else {
alpha = 0; // Keyed transparency
}
if r == key[0] && g == key[1] && b == key[2] {
if seen_bkgd {
c := img.background.([3]u16);
r = c[0];
g = c[1];
b = c[2];
} else {
alpha = 0; // Keyed transparency
}
}
if premultiply {
o16[0] = r * alpha;
o16[1] = g * alpha;
o16[2] = b * alpha;
} else {
o16[0] = r;
o16[1] = g;
o16[2] = b;
}
if out_image_channels == 4 {
o16[3] = alpha * 65535;
}
p16 = p16[3:];
o16 = o16[out_image_channels:];
}
case 4:
// Color with Alpha, can't have tRNS.
for len(p16) > 0 {
r := p16[0];
g := p16[1];
b := p16[2];
a := p16[3];
if premultiply {
alpha := f32(a) / 65535.0;
o16[0] = u16(f32(r) * alpha);
o16[1] = u16(f32(g) * alpha);
o16[2] = u16(f32(b) * alpha);
} else {
o16[0] = r;
o16[1] = g;
o16[2] = b;
}
if .alpha_drop_if_present not_in options {
o16[3] = a;
}
p16 = p16[4:];
o16 = o16[out_image_channels:];
if premultiply {
o16[0] = r * alpha;
o16[1] = g * alpha;
o16[2] = b * alpha;
} else {
o16[0] = r;
o16[1] = g;
o16[2] = b;
}
case:
unreachable("We should never seen # channels other than 1-4 inclusive.");
if out_image_channels == 4 {
o16[3] = alpha * 65535;
}
p16 = p16[3:];
o16 = o16[out_image_channels:];
}
case 4:
// Color with Alpha, can't have tRNS.
for len(p16) > 0 {
r := p16[0];
g := p16[1];
b := p16[2];
a := p16[3];
if premultiply {
alpha := f32(a) / 65535.0;
o16[0] = u16(f32(r) * alpha);
o16[1] = u16(f32(g) * alpha);
o16[2] = u16(f32(b) * alpha);
} else {
o16[0] = r;
o16[1] = g;
o16[2] = b;
}
if .alpha_drop_if_present not_in options {
o16[3] = a;
}
p16 = p16[4:];
o16 = o16[out_image_channels:];
}
case:
unreachable("We should never seen # channels other than 1-4 inclusive.");
}
img.pixels = t;
@@ -1011,143 +1010,143 @@ load_from_stream :: proc(stream: ^io.Stream, options: Options = {}, allocator :=
o := mem.slice_data_cast([]u8, t.buf[:]);
switch (raw_image_channels) {
case 1:
// Gray without Alpha. Might have tRNS alpha.
key := u8(0);
case 1:
// Gray without Alpha. Might have tRNS alpha.
key := u8(0);
if seen_trns {
key = u8(mem.slice_data_cast([]u16be, trns.data)[0]);
}
for len(p) > 0 {
r := p[0];
alpha := u8(1);
if seen_trns {
key = u8(mem.slice_data_cast([]u16be, trns.data)[0]);
}
for len(p) > 0 {
r := p[0];
alpha := u8(1);
if seen_trns {
if r == key {
if seen_bkgd {
c := img.background.([3]u16);
r = u8(c[0]);
} else {
alpha = 0; // Keyed transparency
}
if r == key {
if seen_bkgd {
c := img.background.([3]u16);
r = u8(c[0]);
} else {
alpha = 0; // Keyed transparency
}
if premultiply {
o[0] = r * alpha;
o[1] = r * alpha;
o[2] = r * alpha;
}
} else {
o[0] = r;
o[1] = r;
o[2] = r;
}
if out_image_channels == 4 {
o[3] = alpha * 255;
if premultiply {
o[0] = r * alpha;
o[1] = r * alpha;
o[2] = r * alpha;
}
p = p[1:];
o = o[out_image_channels:];
} else {
o[0] = r;
o[1] = r;
o[2] = r;
}
case 2:
// Gray with alpha, we shouldn't have a tRNS chunk.
for len(p) > 0 {
r := p[0];
if .alpha_premultiply in options {
alpha := p[1];
c := u8(f32(r) * f32(alpha) / f32(255));
o[0] = c;
o[1] = c;
o[2] = c;
} else {
o[0] = r;
o[1] = r;
o[2] = r;
}
if .alpha_drop_if_present not_in options {
o[3] = p[1];
}
p = p[2:];
o = o[out_image_channels:];
if out_image_channels == 4 {
o[3] = alpha * 255;
}
case 3:
// Color without Alpha. We may still have a tRNS chunk
key: []u8;
p = p[1:];
o = o[out_image_channels:];
}
case 2:
// Gray with alpha, we shouldn't have a tRNS chunk.
for len(p) > 0 {
r := p[0];
if .alpha_premultiply in options {
alpha := p[1];
c := u8(f32(r) * f32(alpha) / f32(255));
o[0] = c;
o[1] = c;
o[2] = c;
} else {
o[0] = r;
o[1] = r;
o[2] = r;
}
if .alpha_drop_if_present not_in options {
o[3] = p[1];
}
p = p[2:];
o = o[out_image_channels:];
}
case 3:
// Color without Alpha. We may still have a tRNS chunk
key: []u8;
if seen_trns {
/*
For 8-bit images, the tRNS chunk still contains a triple in u16be.
We use only the low byte in this case.
*/
key = []u8{trns.data[1], trns.data[3], trns.data[5]};
}
for len(p) > 0 {
r := p[0];
g := p[1];
b := p[2];
alpha := u8(1); // Default to full opaque
// TODO: Combine the seen_trns cases.
if seen_trns {
/*
For 8-bit images, the tRNS chunk still contains a triple in u16be.
We use only the low byte in this case.
*/
key = []u8{trns.data[1], trns.data[3], trns.data[5]};
}
for len(p) > 0 {
r := p[0];
g := p[1];
b := p[2];
alpha := u8(1); // Default to full opaque
// TODO: Combine the seen_trns cases.
if seen_trns {
if r == key[0] && g == key[1] && b == key[2] {
if seen_bkgd {
c := img.background.([3]u16);
r = u8(c[0]);
g = u8(c[1]);
b = u8(c[2]);
} else {
alpha = 0; // Keyed transparency
}
if r == key[0] && g == key[1] && b == key[2] {
if seen_bkgd {
c := img.background.([3]u16);
r = u8(c[0]);
g = u8(c[1]);
b = u8(c[2]);
} else {
alpha = 0; // Keyed transparency
}
if .alpha_premultiply in options || .blend_background in options {
o[0] = r * alpha;
o[1] = g * alpha;
o[2] = b * alpha;
}
} else {
o[0] = r;
o[1] = g;
o[2] = b;
}
if out_image_channels == 4 {
o[3] = alpha * 255;
if .alpha_premultiply in options || .blend_background in options {
o[0] = r * alpha;
o[1] = g * alpha;
o[2] = b * alpha;
}
p = p[3:];
o = o[out_image_channels:];
} else {
o[0] = r;
o[1] = g;
o[2] = b;
}
case 4:
// Color with Alpha, can't have tRNS.
for len(p) > 0 {
r := p[0];
g := p[1];
b := p[2];
a := p[3];
if .alpha_premultiply in options {
alpha := f32(a) / 255.0;
o[0] = u8(f32(r) * alpha);
o[1] = u8(f32(g) * alpha);
o[2] = u8(f32(b) * alpha);
} else {
o[0] = r;
o[1] = g;
o[2] = b;
}
if .alpha_drop_if_present not_in options {
o[3] = a;
}
p = p[4:];
o = o[out_image_channels:];
if out_image_channels == 4 {
o[3] = alpha * 255;
}
case:
unreachable("We should never seen # channels other than 1-4 inclusive.");
p = p[3:];
o = o[out_image_channels:];
}
case 4:
// Color with Alpha, can't have tRNS.
for len(p) > 0 {
r := p[0];
g := p[1];
b := p[2];
a := p[3];
if .alpha_premultiply in options {
alpha := f32(a) / 255.0;
o[0] = u8(f32(r) * alpha);
o[1] = u8(f32(g) * alpha);
o[2] = u8(f32(b) * alpha);
} else {
o[0] = r;
o[1] = g;
o[2] = b;
}
if .alpha_drop_if_present not_in options {
o[3] = a;
}
p = p[4:];
o = o[out_image_channels:];
}
case:
unreachable("We should never seen # channels other than 1-4 inclusive.");
}
img.pixels = t;
@@ -1181,13 +1180,13 @@ filter_paeth :: #force_inline proc(left, up, up_left: u8) -> u8 {
}
Filter_Params :: struct #packed {
src : []u8,
dest : []u8,
width : int,
height : int,
depth : int,
src: []u8,
dest: []u8,
width: int,
height: int,
depth: int,
channels: int,
rescale : bool,
rescale: bool,
}
depth_scale_table :: []u8{0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01};
@@ -1210,39 +1209,39 @@ defilter_8 :: proc(params: ^Filter_Params) -> (ok: bool) {
filter := Row_Filter(src[0]); src = src[1:];
// fmt.printf("Row: %v | Filter: %v\n", y, filter);
switch(filter) {
case .None:
copy(dest, src[:row_stride]);
case .Sub:
for i := 0; i < channels; i += 1 {
dest[i] = src[i];
}
for k := 0; k < nk; k += 1 {
dest[channels+k] = (src[channels+k] + dest[k]) & 255;
}
case .Up:
for k := 0; k < row_stride; k += 1 {
dest[k] = (src[k] + up[k]) & 255;
}
case .Average:
for i := 0; i < channels; i += 1 {
avg := up[i] >> 1;
dest[i] = (src[i] + avg) & 255;
}
for k := 0; k < nk; k += 1 {
avg := u8((u16(up[channels+k]) + u16(dest[k])) >> 1);
dest[channels+k] = (src[channels+k] + avg) & 255;
}
case .Paeth:
for i := 0; i < channels; i += 1 {
paeth := filter_paeth(0, up[i], 0);
dest[i] = (src[i] + paeth) & 255;
}
for k := 0; k < nk; k += 1 {
paeth := filter_paeth(dest[k], up[channels+k], up[k]);
dest[channels+k] = (src[channels+k] + paeth) & 255;
}
case:
return false;
case .None:
copy(dest, src[:row_stride]);
case .Sub:
for i := 0; i < channels; i += 1 {
dest[i] = src[i];
}
for k := 0; k < nk; k += 1 {
dest[channels+k] = (src[channels+k] + dest[k]) & 255;
}
case .Up:
for k := 0; k < row_stride; k += 1 {
dest[k] = (src[k] + up[k]) & 255;
}
case .Average:
for i := 0; i < channels; i += 1 {
avg := up[i] >> 1;
dest[i] = (src[i] + avg) & 255;
}
for k := 0; k < nk; k += 1 {
avg := u8((u16(up[channels+k]) + u16(dest[k])) >> 1);
dest[channels+k] = (src[channels+k] + avg) & 255;
}
case .Paeth:
for i := 0; i < channels; i += 1 {
paeth := filter_paeth(0, up[i], 0);
dest[i] = (src[i] + paeth) & 255;
}
for k := 0; k < nk; k += 1 {
paeth := filter_paeth(dest[k], up[channels+k], up[k]);
dest[channels+k] = (src[channels+k] + paeth) & 255;
}
case:
return false;
}
src = src[row_stride:];
@@ -1277,45 +1276,45 @@ defilter_less_than_8 :: proc(params: ^Filter_Params) -> (ok: bool) #no_bounds_ch
dest = dest[row_offset:];
filter := Row_Filter(src[0]); src = src[1:];
switch(filter) {
case .None:
copy(dest, src[:row_stride_in]);
case .Sub:
for i in 0..channels {
dest[i] = src[i];
}
for k in 0..nk {
dest[channels+k] = (src[channels+k] + dest[k]) & 255;
}
case .Up:
for k in 0..row_stride_in {
dest[k] = (src[k] + up[k]) & 255;
}
case .Average:
for i in 0..channels {
avg := up[i] >> 1;
dest[i] = (src[i] + avg) & 255;
}
for k in 0..nk {
avg := u8((u16(up[channels+k]) + u16(dest[k])) >> 1);
dest[channels+k] = (src[channels+k] + avg) & 255;
}
case .Paeth:
for i in 0..channels {
paeth := filter_paeth(0, up[i], 0);
dest[i] = (src[i] + paeth) & 255;
}
for k in 0..nk {
paeth := filter_paeth(dest[k], up[channels], up[k]);
dest[channels+k] = (src[channels+k] + paeth) & 255;
}
case:
return false;
switch filter {
case .None:
copy(dest, src[:row_stride_in]);
case .Sub:
for i in 0..channels {
dest[i] = src[i];
}
for k in 0..nk {
dest[channels+k] = (src[channels+k] + dest[k]) & 255;
}
case .Up:
for k in 0..row_stride_in {
dest[k] = (src[k] + up[k]) & 255;
}
case .Average:
for i in 0..channels {
avg := up[i] >> 1;
dest[i] = (src[i] + avg) & 255;
}
for k in 0..nk {
avg := u8((u16(up[channels+k]) + u16(dest[k])) >> 1);
dest[channels+k] = (src[channels+k] + avg) & 255;
}
case .Paeth:
for i in 0..channels {
paeth := filter_paeth(0, up[i], 0);
dest[i] = (src[i] + paeth) & 255;
}
for k in 0..nk {
paeth := filter_paeth(dest[k], up[channels], up[k]);
dest[channels+k] = (src[channels+k] + paeth) & 255;
}
case:
return false;
}
src = src [row_stride_in:];
up = dest;
dest = dest[row_stride_in:];
src = src [row_stride_in:];
up = dest;
dest = dest[row_stride_in:];
}
// Let's expand the bits
@@ -1334,7 +1333,8 @@ defilter_less_than_8 :: proc(params: ^Filter_Params) -> (ok: bool) #no_bounds_ch
for j := 0; j < height; j += 1 {
src = dest[row_offset:];
if depth == 4 {
switch depth {
case 4:
k := row_stride_out;
for ; k >= 2; k -= 2 {
c := src[0];
@@ -1347,7 +1347,7 @@ defilter_less_than_8 :: proc(params: ^Filter_Params) -> (ok: bool) #no_bounds_ch
dest[0] = scale * (c >> 4);
dest = dest[1:];
}
} else if depth == 2 {
case 2:
k := row_stride_out;
for ; k >= 4; k -= 4 {
c := src[0];
@@ -1368,7 +1368,7 @@ defilter_less_than_8 :: proc(params: ^Filter_Params) -> (ok: bool) #no_bounds_ch
}
dest = dest[k:];
}
} else if depth == 1 {
case 1:
k := row_stride_out;
for ; k >= 8; k -= 8 {
c := src[0];
@@ -1406,6 +1406,7 @@ defilter_less_than_8 :: proc(params: ^Filter_Params) -> (ok: bool) #no_bounds_ch
dest = dest[k:];
}
}
}
@@ -1429,40 +1430,40 @@ defilter_16 :: proc(params: ^Filter_Params) -> (ok: bool) {
nk := row_stride - stride;
filter := Row_Filter(src[0]); src = src[1:];
switch(filter) {
case .None:
copy(dest, src[:row_stride]);
case .Sub:
for i := 0; i < stride; i += 1 {
dest[i] = src[i];
}
for k := 0; k < nk; k += 1 {
dest[stride+k] = (src[stride+k] + dest[k]) & 255;
}
case .Up:
for k := 0; k < row_stride; k += 1 {
dest[k] = (src[k] + up[k]) & 255;
}
case .Average:
for i := 0; i < stride; i += 1 {
avg := up[i] >> 1;
dest[i] = (src[i] + avg) & 255;
}
for k := 0; k < nk; k += 1 {
avg := u8((u16(up[stride+k]) + u16(dest[k])) >> 1);
dest[stride+k] = (src[stride+k] + avg) & 255;
}
case .Paeth:
for i := 0; i < stride; i += 1 {
paeth := filter_paeth(0, up[i], 0);
dest[i] = (src[i] + paeth) & 255;
}
for k := 0; k < nk; k += 1 {
paeth := filter_paeth(dest[k], up[stride+k], up[k]);
dest[stride+k] = (src[stride+k] + paeth) & 255;
}
case:
return false;
switch filter {
case .None:
copy(dest, src[:row_stride]);
case .Sub:
for i := 0; i < stride; i += 1 {
dest[i] = src[i];
}
for k := 0; k < nk; k += 1 {
dest[stride+k] = (src[stride+k] + dest[k]) & 255;
}
case .Up:
for k := 0; k < row_stride; k += 1 {
dest[k] = (src[k] + up[k]) & 255;
}
case .Average:
for i := 0; i < stride; i += 1 {
avg := up[i] >> 1;
dest[i] = (src[i] + avg) & 255;
}
for k := 0; k < nk; k += 1 {
avg := u8((u16(up[stride+k]) + u16(dest[k])) >> 1);
dest[stride+k] = (src[stride+k] + avg) & 255;
}
case .Paeth:
for i := 0; i < stride; i += 1 {
paeth := filter_paeth(0, up[i], 0);
dest[i] = (src[i] + paeth) & 255;
}
for k := 0; k < nk; k += 1 {
paeth := filter_paeth(dest[k], up[stride+k], up[k]);
dest[stride+k] = (src[stride+k] + paeth) & 255;
}
case:
return false;
}
src = src[row_stride:];
@@ -1510,7 +1511,7 @@ defilter :: proc(img: ^Image, filter_bytes: ^bytes.Buffer, header: ^IHDR, option
if !filter_ok {
// Caller will destroy buffer for us.
return E_PNG.Unknown_Filter_Method;
}
}
} else {
/*
For deinterlacing we need to make a temporary buffer, defiilter part of the image,
@@ -1582,7 +1583,7 @@ defilter :: proc(img: ^Image, filter_bytes: ^bytes.Buffer, header: ^IHDR, option
}
}
return E_General.OK;
return E_General.OK;
}
load :: proc{load_from_file, load_from_slice, load_from_stream};
load :: proc{load_from_file, load_from_slice, load_from_stream};