Minor cleanups to the core library

This commit is contained in:
gingerBill
2023-09-30 20:26:04 +01:00
parent 3e0fd63682
commit 5023313c03
7 changed files with 43 additions and 86 deletions

View File

@@ -118,9 +118,7 @@ reader_peek :: proc(b: ^Reader, n: int) -> (data: []byte, err: io.Error) {
b.last_rune_size = -1
for b.w-b.r < n && b.w-b.r < len(b.buf) && b.err == nil {
if fill_err := _reader_read_new_chunk(b); fill_err != nil {
return nil, fill_err
}
_reader_read_new_chunk(b) or_return
}
if n > len(b.buf) {
@@ -156,9 +154,7 @@ reader_discard :: proc(b: ^Reader, n: int) -> (discarded: int, err: io.Error) {
for {
skip := reader_buffered(b)
if skip == 0 {
if fill_err := _reader_read_new_chunk(b); fill_err != nil {
return 0, fill_err
}
_reader_read_new_chunk(b) or_return
skip = reader_buffered(b)
}
skip = min(skip, remaining)
@@ -223,20 +219,18 @@ reader_read :: proc(b: ^Reader, p: []byte) -> (n: int, err: io.Error) {
// reader_read_byte reads and returns a single byte
// If no byte is available, it return an error
reader_read_byte :: proc(b: ^Reader) -> (byte, io.Error) {
reader_read_byte :: proc(b: ^Reader) -> (c: byte, err: io.Error) {
b.last_rune_size = -1
for b.r == b.w {
if b.err != nil {
return 0, _reader_consume_err(b)
}
if err := _reader_read_new_chunk(b); err != nil {
return 0, err
}
_reader_read_new_chunk(b) or_return
}
c := b.buf[b.r]
c = b.buf[b.r]
b.r += 1
b.last_byte = int(c)
return c, nil
return
}
// reader_unread_byte unreads the last byte. Only the most recently read byte can be unread
@@ -264,15 +258,12 @@ reader_read_rune :: proc(b: ^Reader) -> (r: rune, size: int, err: io.Error) {
!utf8.full_rune(b.buf[b.r:b.w]) &&
b.err == nil &&
b.w-b.w < len(b.buf) {
if err = _reader_read_new_chunk(b); err != nil {
return
}
_reader_read_new_chunk(b) or_return
}
b.last_rune_size = -1
if b.r == b.w {
err = _reader_consume_err(b)
return
return 0, 0, _reader_consume_err(b)
}
r, size = rune(b.buf[b.r]), 1
if r >= utf8.RUNE_SELF {
@@ -305,27 +296,20 @@ reader_write_to :: proc(b: ^Reader, w: io.Writer) -> (n: i64, err: io.Error) {
return i64(n), err
}
n, err = write_buf(b, w)
if err != nil {
return
}
n = write_buf(b, w) or_return
m: i64
if b.w-b.r < len(b.buf) {
if err = _reader_read_new_chunk(b); err != nil {
return
}
_reader_read_new_chunk(b) or_return
}
for b.r < b.w {
m, err = write_buf(b, w)
n += m
n += m // this needs to be done before returning
if err != nil {
return
}
if err = _reader_read_new_chunk(b); err != nil {
return
}
_reader_read_new_chunk(b) or_return
}
if b.err == .EOF {
@@ -403,9 +387,7 @@ reader_read_slice :: proc(b: ^Reader, delim: byte) -> (line: []byte, err: io.Err
s = b.w - b.r
if err = _reader_read_new_chunk(b); err != nil {
break
}
_reader_read_new_chunk(b) or_break
}
if i := len(line)-1; i >= 0 {

View File

@@ -216,24 +216,16 @@ read_slice_from_stream :: #force_inline proc(z: ^Context_Stream_Input, size: int
// TODO: REMOVE ALL USE OF context.temp_allocator here
// the is literally no need for it
b := make([]u8, size, context.temp_allocator)
_, e := io.read(z.input, b[:])
if e == .None {
return b, .None
}
return []u8{}, e
_ = io.read(z.input, b[:]) or_return
return b, nil
}
read_slice :: proc{read_slice_from_memory, read_slice_from_stream}
@(optimization_mode="speed")
read_data :: #force_inline proc(z: ^$C, $T: typeid) -> (res: T, err: io.Error) {
b, e := read_slice(z, size_of(T))
if e == .None {
return (^T)(&b[0])^, .None
}
return T{}, e
b := read_slice(z, size_of(T)) or_return
return (^T)(&b[0])^, nil
}
@(optimization_mode="speed")
@@ -250,12 +242,8 @@ read_u8_from_memory :: #force_inline proc(z: ^Context_Memory_Input) -> (res: u8,
@(optimization_mode="speed")
read_u8_from_stream :: #force_inline proc(z: ^Context_Stream_Input) -> (res: u8, err: io.Error) {
b, e := read_slice_from_stream(z, 1)
if e == .None {
return b[0], .None
}
return 0, e
b := read_slice_from_stream(z, 1) or_return
return b[0], nil
}
read_u8 :: proc{read_u8_from_memory, read_u8_from_stream}
@@ -320,12 +308,9 @@ peek_data_from_stream :: #force_inline proc(z: ^Context_Stream_Input, $T: typeid
size :: size_of(T)
// Get current position to read from.
curr, e1 := z.input->impl_seek(0, .Current)
if e1 != .None {
return T{}, e1
}
r, e2 := io.to_reader_at(z.input)
if !e2 {
curr := z.input->impl_seek(0, .Current) or_return
r, e1 := io.to_reader_at(z.input)
if !e1 {
return T{}, .Empty
}
when size <= 128 {
@@ -333,8 +318,8 @@ peek_data_from_stream :: #force_inline proc(z: ^Context_Stream_Input, $T: typeid
} else {
b := make([]u8, size, context.temp_allocator)
}
_, e3 := io.read_at(r, b[:], curr)
if e3 != .None {
_, e2 := io.read_at(r, b[:], curr)
if e2 != .None {
return T{}, .Empty
}
@@ -347,16 +332,9 @@ peek_data_at_offset_from_stream :: #force_inline proc(z: ^Context_Stream_Input,
size :: size_of(T)
// Get current position to return to.
cur_pos, e1 := z.input->impl_seek(0, .Current)
if e1 != .None {
return T{}, e1
}
cur_pos := z.input->impl_seek(0, .Current) or_return
// Seek to offset.
pos, e2 := z.input->impl_seek(offset, .Start)
if e2 != .None {
return T{}, e2
}
pos := z.input->impl_seek(offset, .Start) or_return
r, e3 := io.to_reader_at(z.input)
if !e3 {
@@ -465,7 +443,7 @@ peek_bits_lsb_from_memory :: #force_inline proc(z: ^Context_Memory_Input, width:
if z.num_bits < u64(width) {
refill_lsb(z)
}
return u32(z.code_buffer & ~(~u64(0) << width))
return u32(z.code_buffer &~ (~u64(0) << width))
}
@(optimization_mode="speed")
@@ -473,7 +451,7 @@ peek_bits_lsb_from_stream :: #force_inline proc(z: ^Context_Stream_Input, width:
if z.num_bits < u64(width) {
refill_lsb(z)
}
return u32(z.code_buffer & ~(~u64(0) << width))
return u32(z.code_buffer &~ (~u64(0) << width))
}
peek_bits_lsb :: proc{peek_bits_lsb_from_memory, peek_bits_lsb_from_stream}
@@ -481,13 +459,13 @@ peek_bits_lsb :: proc{peek_bits_lsb_from_memory, peek_bits_lsb_from_stream}
@(optimization_mode="speed")
peek_bits_no_refill_lsb_from_memory :: #force_inline proc(z: ^Context_Memory_Input, width: u8) -> u32 {
assert(z.num_bits >= u64(width))
return u32(z.code_buffer & ~(~u64(0) << width))
return u32(z.code_buffer &~ (~u64(0) << width))
}
@(optimization_mode="speed")
peek_bits_no_refill_lsb_from_stream :: #force_inline proc(z: ^Context_Stream_Input, width: u8) -> u32 {
assert(z.num_bits >= u64(width))
return u32(z.code_buffer & ~(~u64(0) << width))
return u32(z.code_buffer &~ (~u64(0) << width))
}
peek_bits_no_refill_lsb :: proc{peek_bits_no_refill_lsb_from_memory, peek_bits_no_refill_lsb_from_stream}

View File

@@ -335,10 +335,8 @@ load_from_context :: proc(z: ^$C, buf: ^bytes.Buffer, known_gzip_size := -1, exp
// fmt.printf("GZIP: Expected Payload Size: %v\n", expected_output_size);
zlib_error := zlib.inflate_raw(z, expected_output_size=expected_output_size)
if zlib_error != nil {
return zlib_error
}
zlib.inflate_raw(z, expected_output_size=expected_output_size) or_return
/*
Read CRC32 using the ctx bit reader because zlib may leave bytes in there.
*/

View File

@@ -248,7 +248,7 @@ _read_record :: proc(r: ^Reader, dst: ^[dynamic]string, allocator := context.all
field_length += 1
case '\n', '\r':
if !is_quoted { break read_loop }
is_quoted or_break read_loop
case r.comma:
field_length = 0

View File

@@ -1163,7 +1163,7 @@ internal_int_prime_next_prime :: proc(a: ^Int, trials: int, bbs_style: bool, all
/*
If we didn't pass the sieve and step == MP_MAX then skip test */
if (y && (step >= ((1 << _DIGIT_BITS) - kstep))) { continue }
if y && (step >= ((1 << _DIGIT_BITS) - kstep)) { continue }
if internal_int_is_prime(a, trials) or_return { break }
}

View File

@@ -1089,7 +1089,7 @@ _private_int_div_school :: proc(quotient, remainder, numerator, denominator: ^In
Step 3. for i from n down to (t + 1).
*/
#no_bounds_check for i := n; i >= (t + 1); i -= 1 {
if (i > x.used) { continue }
if i > x.used { continue }
/*
step 3.1 if xi == yt then set q{i-t-1} to b-1, otherwise set q{i-t-1} to (xi*b + x{i-1})/yt

View File

@@ -30,9 +30,12 @@ TS_XML_Options := xml.Options{
parse_qt_linguist_from_bytes :: proc(data: []byte, options := DEFAULT_PARSE_OPTIONS, pluralizer: proc(int) -> int = nil, allocator := context.allocator) -> (translation: ^Translation, err: Error) {
context.allocator = allocator
get_str :: proc(val: xml.Value) -> (str: string, err: Error) {
get_str :: proc(val: xml.Value, intern: ^strings.Intern = nil) -> (str: string, err: Error) {
v, ok := val.(string)
if ok {
if intern != nil {
v, _ = strings.intern_get(intern, v)
}
return v, .None
}
return "", .Bad_Str
@@ -79,8 +82,7 @@ parse_qt_linguist_from_bytes :: proc(data: []byte, options := DEFAULT_PARSE_OPTI
section_name, _ := strings.intern_get(&translation.intern, "")
if !options.merge_sections {
value_text := get_str(ts.elements[section_name_id].value[0]) or_return
section_name, _ = strings.intern_get(&translation.intern, value_text)
section_name = get_str(ts.elements[section_name_id].value[0], &translation.intern) or_return
}
if section_name not_in translation.k_v {
@@ -108,13 +110,11 @@ parse_qt_linguist_from_bytes :: proc(data: []byte, options := DEFAULT_PARSE_OPTI
return translation, .TS_File_Expected_Translation
}
source := get_str(ts.elements[source_id].value[0]) or_return
source, _ = strings.intern_get(&translation.intern, source)
source := get_str(ts.elements[source_id].value[0], &translation.intern) or_return
xlat := ""
if !has_plurals {
xlat = get_str(ts.elements[translation_id].value[0]) or_return
xlat, _ = strings.intern_get(&translation.intern, xlat)
xlat = get_str(ts.elements[translation_id].value[0], &translation.intern) or_return
}
if source in section {
@@ -140,8 +140,7 @@ parse_qt_linguist_from_bytes :: proc(data: []byte, options := DEFAULT_PARSE_OPTI
num_plurals = 0
for {
numerus_id := xml.find_child_by_ident(ts, translation_id, "numerusform", num_plurals) or_break
numerus := get_str(ts.elements[numerus_id].value[0]) or_return
numerus, _ = strings.intern_get(&translation.intern, numerus)
numerus := get_str(ts.elements[numerus_id].value[0], &translation.intern) or_return
section[source][num_plurals] = numerus
num_plurals += 1