Make encoding/json use []byte rather than string

This commit is contained in:
gingerBill
2019-01-07 23:08:38 +00:00
parent cd2c4c02e1
commit 5af20aa467
5 changed files with 68 additions and 19 deletions

View File

@@ -1,6 +1,7 @@
package json
import "core:mem"
import "core:bits"
import "core:runtime"
import "core:strconv"
import "core:strings"
@@ -20,7 +21,10 @@ marshal :: proc(v: any, allocator := context.allocator) -> ([]byte, Marshal_Erro
strings.destroy_builder(&b);
return nil, err;
}
if len(b.buf) == 0 {
strings.destroy_builder(&b);
return nil, err;
}
return b.buf[:], err;
}
@@ -242,7 +246,7 @@ marshal_arg :: proc(b: ^strings.Builder, v: any) -> Marshal_Error {
}
case Type_Info_Enum:
return Marshal_Error.Unsupported_Type;
return marshal_arg(b, any{v.data, info.base.id});
case Type_Info_Bit_Field:
data: u64 = 0;
@@ -273,6 +277,50 @@ marshal_arg :: proc(b: ^strings.Builder, v: any) -> Marshal_Error {
write_byte(b, '}');
case Type_Info_Bit_Set:
is_bit_set_different_endian_to_platform :: proc(ti: ^runtime.Type_Info) -> bool {
if ti == nil {
return false;
}
ti = runtime.type_info_base(ti);
switch info in ti.variant {
case runtime.Type_Info_Integer:
using runtime.Type_Info_Endianness;
switch info.endianness {
case Platform: return false;
case Little: return ODIN_ENDIAN != "little";
case Big: return ODIN_ENDIAN != "big";
}
}
return false;
}
bit_data: u64;
bit_size := u64(8*ti.size);
do_byte_swap := is_bit_set_different_endian_to_platform(info.underlying);
switch bit_size {
case 0: bit_data = 0;
case 8:
x := (^u8)(v.data)^;
bit_data = u64(x);
case 16:
x := (^u16)(v.data)^;
if do_byte_swap do x = bits.byte_swap(x);
bit_data = u64(x);
case 32:
x := (^u32)(v.data)^;
if do_byte_swap do x = bits.byte_swap(x);
bit_data = u64(x);
case 64:
x := (^u64)(v.data)^;
if do_byte_swap do x = bits.byte_swap(x);
bit_data = u64(x);
case: panic("unknown bit_size size");
}
write_u64(b, bit_data);
return Marshal_Error.Unsupported_Type;
case Type_Info_Opaque:

View File

@@ -11,7 +11,7 @@ Parser :: struct {
allocator: mem.Allocator,
}
make_parser :: proc(data: string, spec := Specification.JSON, allocator := context.allocator) -> Parser {
make_parser :: proc(data: []byte, spec := Specification.JSON, allocator := context.allocator) -> Parser {
p: Parser;
p.tok = make_tokenizer(data, spec);
p.spec = spec;
@@ -21,7 +21,7 @@ make_parser :: proc(data: string, spec := Specification.JSON, allocator := conte
return p;
}
parse :: proc(data: string, spec := Specification.JSON, allocator := context.allocator) -> (Value, Error) {
parse :: proc(data: []byte, spec := Specification.JSON, allocator := context.allocator) -> (Value, Error) {
context.allocator = allocator;
p := make_parser(data, spec, allocator);

View File

@@ -35,17 +35,17 @@ Kind :: enum {
}
Tokenizer :: struct {
using pos: Pos,
data: string,
r: rune, // current rune
w: int, // current rune width in bytes
using pos: Pos,
data: []byte,
r: rune, // current rune
w: int, // current rune width in bytes
curr_line_offset: int,
spec: Specification,
spec: Specification,
}
make_tokenizer :: proc(data: string, spec := Specification.JSON) -> Tokenizer {
make_tokenizer :: proc(data: []byte, spec := Specification.JSON) -> Tokenizer {
t := Tokenizer{pos = {line=1}, data = data, spec = spec};
next_rune(&t);
if t.r == utf8.RUNE_BOM {
@@ -59,7 +59,7 @@ next_rune :: proc(t: ^Tokenizer) -> rune #no_bounds_check {
return utf8.RUNE_EOF;
}
t.offset += t.w;
t.r, t.w = utf8.decode_rune_in_string(t.data[t.offset:]);
t.r, t.w = utf8.decode_rune(t.data[t.offset:]);
t.pos.column = t.offset - t.curr_line_offset;
return t.r;
}
@@ -174,7 +174,7 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) {
skip_alphanum(t);
switch str := t.data[token.offset:t.offset]; str {
switch str := string(t.data[token.offset:t.offset]); str {
case "null": token.kind = Kind.Null;
case "false": token.kind = Kind.False;
case "true": token.kind = Kind.True;
@@ -204,7 +204,7 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) {
if t.r == 'I' || t.r == 'N' {
skip_alphanum(t);
}
switch t.data[token.offset:t.offset] {
switch string(t.data[token.offset:t.offset]) {
case "-Infinity": token.kind = Kind.Infinity;
case "-NaN": token.kind = Kind.NaN;
}
@@ -224,7 +224,7 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) {
}
skip_digits(t);
}
str := t.data[token.offset:t.offset];
str := string(t.data[token.offset:t.offset]);
if !is_valid_number(str, t.spec) {
err = Error.Invalid_Number;
}
@@ -254,7 +254,7 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) {
skip_digits(t);
}
str := t.data[token.offset:t.offset];
str := string(t.data[token.offset:t.offset]);
if !is_valid_number(str, t.spec) {
err = Error.Invalid_Number;
}
@@ -284,7 +284,8 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) {
}
}
if !is_valid_string_literal(t.data[token.offset : t.offset], t.spec) {
str := string(t.data[token.offset : t.offset]);
if !is_valid_string_literal(str, t.spec) {
err = Error.Invalid_String;
}
@@ -323,7 +324,7 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) {
case: err = Error.Illegal_Character;
}
token.text = t.data[token.offset : t.offset];
token.text = string(t.data[token.offset : t.offset]);
return;
}

View File

@@ -3,7 +3,7 @@ package json
import "core:mem"
// NOTE(bill): is_valid will not check for duplicate keys
is_valid :: proc(data: string, spec := Specification.JSON) -> bool {
is_valid :: proc(data: []byte, spec := Specification.JSON) -> bool {
p := make_parser(data, spec, mem.nil_allocator());
if p.spec == Specification.JSON5 {
return validate_value(&p);

View File

@@ -739,7 +739,7 @@ enum_value_to_u64 :: proc(ev: runtime.Type_Info_Enum_Value) -> u64 {
}
fmt_bit_set :: proc(fi: ^Info, v: any, name: string = "") {
is_bit_set_different_endian_to_platform :: proc(ti: ^runtime.Type_Info, ) -> bool {
is_bit_set_different_endian_to_platform :: proc(ti: ^runtime.Type_Info) -> bool {
if ti == nil {
return false;
}