diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml
index 67d0396c1..0bfaa11e9 100644
--- a/.github/workflows/nightly.yml
+++ b/.github/workflows/nightly.yml
@@ -7,6 +7,7 @@ on:
jobs:
build_windows:
+ if: github.repository == 'odin-lang/Odin'
runs-on: windows-2022
steps:
- uses: actions/checkout@v1
@@ -37,6 +38,7 @@ jobs:
name: windows_artifacts
path: dist
build_ubuntu:
+ if: github.repository == 'odin-lang/Odin'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
@@ -61,6 +63,7 @@ jobs:
name: ubuntu_artifacts
path: dist
build_macos:
+ if: github.repository == 'odin-lang/Odin'
runs-on: macOS-latest
steps:
- uses: actions/checkout@v1
diff --git a/.gitignore b/.gitignore
index 824e0c203..3528af624 100644
--- a/.gitignore
+++ b/.gitignore
@@ -270,6 +270,7 @@ bin/
# - Linux/MacOS
odin
+!odin/
odin.dSYM
*.bin
demo.bin
@@ -286,4 +287,4 @@ shared/
*.sublime-workspace
examples/bug/
build.sh
-!core/debug/
\ No newline at end of file
+!core/debug/
diff --git a/build.bat b/build.bat
index 99c3ad2ee..b7537fba6 100644
--- a/build.bat
+++ b/build.bat
@@ -3,18 +3,20 @@
setlocal EnableDelayedExpansion
where /Q cl.exe || (
- set __VSCMD_ARG_NO_LOGO=1
- for /f "tokens=*" %%i in ('"C:\Program Files (x86)\Microsoft Visual Studio\Installer\vswhere.exe" -latest -requires Microsoft.VisualStudio.Workload.NativeDesktop -property installationPath') do set VS=%%i
- if "!VS!" equ "" (
- echo ERROR: Visual Studio installation not found
- exit /b 1
- )
- call "!VS!\VC\Auxiliary\Build\vcvarsall.bat" amd64 || exit /b 1
+ set __VSCMD_ARG_NO_LOGO=1
+ for /f "tokens=*" %%i in ('"C:\Program Files (x86)\Microsoft Visual Studio\Installer\vswhere.exe" -latest -requires Microsoft.VisualStudio.Workload.NativeDesktop -property installationPath') do set VS=%%i
+ if "!VS!" equ "" (
+ echo ERROR: Visual Studio installation not found
+ exit /b 1
+ )
+ call "!VS!\VC\Auxiliary\Build\vcvarsall.bat" amd64 || exit /b 1
)
if "%VSCMD_ARG_TGT_ARCH%" neq "x64" (
- echo ERROR: please run this from MSVC x64 native tools command prompt, 32-bit target is not supported!
- exit /b 1
+ if "%ODIN_IGNORE_MSVC_CHECK%" == "" (
+ echo ERROR: please run this from MSVC x64 native tools command prompt, 32-bit target is not supported!
+ exit /b 1
+ )
)
for /f "usebackq tokens=1,2 delims=,=- " %%i in (`wmic os get LocalDateTime /value`) do @if %%i==LocalDateTime (
diff --git a/core/bufio/read_writer.odin b/core/bufio/read_writer.odin
index f9ae1ed45..3e6bd3aa0 100644
--- a/core/bufio/read_writer.odin
+++ b/core/bufio/read_writer.odin
@@ -14,51 +14,29 @@ read_writer_init :: proc(rw: ^Read_Writer, r: ^Reader, w: ^Writer) {
}
read_writer_to_stream :: proc(rw: ^Read_Writer) -> (s: io.Stream) {
- s.stream_data = rw
- s.stream_vtable = &_read_writer_vtable
+ s.procedure = _read_writer_procedure
+ s.data = rw
return
}
@(private)
-_read_writer_vtable := io.Stream_VTable{
- impl_read = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
- b := (^Read_Writer)(s.stream_data).r
- return reader_read(b, p)
- },
- impl_unread_byte = proc(s: io.Stream) -> io.Error {
- b := (^Read_Writer)(s.stream_data).r
- return reader_unread_byte(b)
- },
- impl_read_rune = proc(s: io.Stream) -> (r: rune, size: int, err: io.Error) {
- b := (^Read_Writer)(s.stream_data).r
- return reader_read_rune(b)
- },
- impl_unread_rune = proc(s: io.Stream) -> io.Error {
- b := (^Read_Writer)(s.stream_data).r
- return reader_unread_rune(b)
- },
- impl_write_to = proc(s: io.Stream, w: io.Writer) -> (n: i64, err: io.Error) {
- b := (^Read_Writer)(s.stream_data).r
- return reader_write_to(b, w)
- },
- impl_flush = proc(s: io.Stream) -> io.Error {
- b := (^Read_Writer)(s.stream_data).w
- return writer_flush(b)
- },
- impl_write = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
- b := (^Read_Writer)(s.stream_data).w
- return writer_write(b, p)
- },
- impl_write_byte = proc(s: io.Stream, c: byte) -> io.Error {
- b := (^Read_Writer)(s.stream_data).w
- return writer_write_byte(b, c)
- },
- impl_write_rune = proc(s: io.Stream, r: rune) -> (int, io.Error) {
- b := (^Read_Writer)(s.stream_data).w
- return writer_write_rune(b, r)
- },
- impl_read_from = proc(s: io.Stream, r: io.Reader) -> (n: i64, err: io.Error) {
- b := (^Read_Writer)(s.stream_data).w
- return writer_read_from(b, r)
- },
-}
+_read_writer_procedure := proc(stream_data: rawptr, mode: io.Stream_Mode, p: []byte, offset: i64, whence: io.Seek_From) -> (n: i64, err: io.Error) {
+ rw := (^Read_Writer)(stream_data)
+ n_int: int
+ #partial switch mode {
+ case .Flush:
+ err = writer_flush(rw.w)
+ return
+ case .Read:
+ n_int, err = reader_read(rw.r, p)
+ n = i64(n_int)
+ return
+ case .Write:
+ n_int, err = writer_write(rw.w, p)
+ n = i64(n_int)
+ return
+ case .Query:
+ return io.query_utility({.Flush, .Read, .Write, .Query})
+ }
+ return 0, .Empty
+}
\ No newline at end of file
diff --git a/core/bufio/reader.odin b/core/bufio/reader.odin
index 6bfc4cd9d..dc4e02c02 100644
--- a/core/bufio/reader.odin
+++ b/core/bufio/reader.odin
@@ -311,18 +311,6 @@ reader_write_to :: proc(b: ^Reader, w: io.Writer) -> (n: i64, err: io.Error) {
}
m: i64
- if nr, ok := io.to_writer_to(b.rd); ok {
- m, err = io.write_to(nr, w)
- n += m
- return n, err
- }
-
- if nw, ok := io.to_reader_from(w); ok {
- m, err = io.read_from(nw, b.rd)
- n += m
- return n, err
- }
-
if b.w-b.r < len(b.buf) {
if err = _reader_read_new_chunk(b); err != nil {
return
@@ -352,48 +340,28 @@ reader_write_to :: proc(b: ^Reader, w: io.Writer) -> (n: i64, err: io.Error) {
// reader_to_stream converts a Reader into an io.Stream
reader_to_stream :: proc(b: ^Reader) -> (s: io.Stream) {
- s.stream_data = b
- s.stream_vtable = &_reader_vtable
+ s.data = b
+ s.procedure = _reader_proc
return
}
@(private)
-_reader_vtable := io.Stream_VTable{
- impl_destroy = proc(s: io.Stream) -> io.Error {
- b := (^Reader)(s.stream_data)
+_reader_proc :: proc(stream_data: rawptr, mode: io.Stream_Mode, p: []byte, offset: i64, whence: io.Seek_From) -> (n: i64, err: io.Error) {
+ b := (^Reader)(stream_data)
+ #partial switch mode {
+ case .Read:
+ return io._i64_err(reader_read(b, p))
+ case .Destroy:
reader_destroy(b)
- return nil
- },
- impl_read = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
- b := (^Reader)(s.stream_data)
- return reader_read(b, p)
- },
- impl_read_byte = proc(s: io.Stream) -> (c: byte, err: io.Error) {
- b := (^Reader)(s.stream_data)
- return reader_read_byte(b)
- },
- impl_unread_byte = proc(s: io.Stream) -> io.Error {
- b := (^Reader)(s.stream_data)
- return reader_unread_byte(b)
- },
- impl_read_rune = proc(s: io.Stream) -> (r: rune, size: int, err: io.Error) {
- b := (^Reader)(s.stream_data)
- return reader_read_rune(b)
- },
- impl_unread_rune = proc(s: io.Stream) -> io.Error {
- b := (^Reader)(s.stream_data)
- return reader_unread_rune(b)
- },
- impl_write_to = proc(s: io.Stream, w: io.Writer) -> (n: i64, err: io.Error) {
- b := (^Reader)(s.stream_data)
- return reader_write_to(b, w)
- },
+ return
+ case .Query:
+ return io.query_utility({.Read, .Destroy, .Query})
+ }
+ return 0, .Empty
}
-
-
//
// Utility procedures
//
diff --git a/core/bufio/writer.odin b/core/bufio/writer.odin
index ed0d557c5..bfa8b804f 100644
--- a/core/bufio/writer.odin
+++ b/core/bufio/writer.odin
@@ -173,14 +173,6 @@ writer_read_from :: proc(b: ^Writer, r: io.Reader) -> (n: i64, err: io.Error) {
if b.err != nil {
return 0, b.err
}
- if writer_buffered(b) == 0 {
- if w, ok := io.to_reader_from(b.wr); !ok {
- n, err = io.read_from(w, r)
- b.err = err
- return
- }
- }
-
for {
if writer_available(b) == 0 {
writer_flush(b) or_return
@@ -222,46 +214,35 @@ writer_read_from :: proc(b: ^Writer, r: io.Reader) -> (n: i64, err: io.Error) {
// writer_to_stream converts a Writer into an io.Stream
writer_to_stream :: proc(b: ^Writer) -> (s: io.Stream) {
- s.stream_data = b
- s.stream_vtable = &_writer_vtable
+ s.data = b
+ s.procedure = _writer_proc
return
}
// writer_to_stream converts a Writer into an io.Stream
writer_to_writer :: proc(b: ^Writer) -> (s: io.Writer) {
- s.stream_data = b
- s.stream_vtable = &_writer_vtable
- return
+ return writer_to_stream(b)
}
-
@(private)
-_writer_vtable := io.Stream_VTable{
- impl_destroy = proc(s: io.Stream) -> io.Error {
- b := (^Writer)(s.stream_data)
+_writer_proc :: proc(stream_data: rawptr, mode: io.Stream_Mode, p: []byte, offset: i64, whence: io.Seek_From) -> (n: i64, err: io.Error) {
+ b := (^Writer)(stream_data)
+ #partial switch mode {
+ case .Flush:
+ err = writer_flush(b)
+ return
+ case .Write:
+ n_int: int
+ n_int, err = writer_write(b, p)
+ n = i64(n_int)
+ return
+ case .Destroy:
writer_destroy(b)
- return nil
- },
- impl_flush = proc(s: io.Stream) -> io.Error {
- b := (^Writer)(s.stream_data)
- return writer_flush(b)
- },
- impl_write = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
- b := (^Writer)(s.stream_data)
- return writer_write(b, p)
- },
- impl_write_byte = proc(s: io.Stream, c: byte) -> io.Error {
- b := (^Writer)(s.stream_data)
- return writer_write_byte(b, c)
- },
- impl_write_rune = proc(s: io.Stream, r: rune) -> (int, io.Error) {
- b := (^Writer)(s.stream_data)
- return writer_write_rune(b, r)
- },
- impl_read_from = proc(s: io.Stream, r: io.Reader) -> (n: i64, err: io.Error) {
- b := (^Writer)(s.stream_data)
- return writer_read_from(b, r)
- },
+ return
+ case .Query:
+ return io.query_utility({.Flush, .Write, .Destroy, .Query})
+ }
+ return 0, .Empty
}
diff --git a/core/bytes/buffer.odin b/core/bytes/buffer.odin
index b60a8e877..4375d8195 100644
--- a/core/bytes/buffer.odin
+++ b/core/bytes/buffer.odin
@@ -375,69 +375,31 @@ buffer_read_from :: proc(b: ^Buffer, r: io.Reader) -> (n: i64, err: io.Error) #n
buffer_to_stream :: proc(b: ^Buffer) -> (s: io.Stream) {
- s.stream_data = b
- s.stream_vtable = &_buffer_vtable
+ s.data = b
+ s.procedure = _buffer_proc
return
}
@(private)
-_buffer_vtable := io.Stream_VTable{
- impl_size = proc(s: io.Stream) -> i64 {
- b := (^Buffer)(s.stream_data)
- return i64(buffer_capacity(b))
- },
- impl_read = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
- b := (^Buffer)(s.stream_data)
- return buffer_read(b, p)
- },
- impl_read_at = proc(s: io.Stream, p: []byte, offset: i64) -> (n: int, err: io.Error) {
- b := (^Buffer)(s.stream_data)
- return buffer_read_at(b, p, int(offset))
- },
- impl_read_byte = proc(s: io.Stream) -> (byte, io.Error) {
- b := (^Buffer)(s.stream_data)
- return buffer_read_byte(b)
- },
- impl_read_rune = proc(s: io.Stream) -> (r: rune, size: int, err: io.Error) {
- b := (^Buffer)(s.stream_data)
- return buffer_read_rune(b)
- },
- impl_write = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
- b := (^Buffer)(s.stream_data)
- return buffer_write(b, p)
- },
- impl_write_at = proc(s: io.Stream, p: []byte, offset: i64) -> (n: int, err: io.Error) {
- b := (^Buffer)(s.stream_data)
- return buffer_write_at(b, p, int(offset))
- },
- impl_write_byte = proc(s: io.Stream, c: byte) -> io.Error {
- b := (^Buffer)(s.stream_data)
- return buffer_write_byte(b, c)
- },
- impl_write_rune = proc(s: io.Stream, r: rune) -> (int, io.Error) {
- b := (^Buffer)(s.stream_data)
- return buffer_write_rune(b, r)
- },
- impl_unread_byte = proc(s: io.Stream) -> io.Error {
- b := (^Buffer)(s.stream_data)
- return buffer_unread_byte(b)
- },
- impl_unread_rune = proc(s: io.Stream) -> io.Error {
- b := (^Buffer)(s.stream_data)
- return buffer_unread_rune(b)
- },
- impl_destroy = proc(s: io.Stream) -> io.Error {
- b := (^Buffer)(s.stream_data)
+_buffer_proc :: proc(stream_data: rawptr, mode: io.Stream_Mode, p: []byte, offset: i64, whence: io.Seek_From) -> (n: i64, err: io.Error) {
+ b := (^Buffer)(stream_data)
+ #partial switch mode {
+ case .Read:
+ return io._i64_err(buffer_read(b, p))
+ case .Read_At:
+ return io._i64_err(buffer_read_at(b, p, int(offset)))
+ case .Write:
+ return io._i64_err(buffer_write(b, p))
+ case .Write_At:
+ return io._i64_err(buffer_write_at(b, p, int(offset)))
+ case .Size:
+ n = i64(buffer_capacity(b))
+ return
+ case .Destroy:
buffer_destroy(b)
- return nil
- },
- impl_write_to = proc(s: io.Stream, w: io.Writer) -> (n: i64, err: io.Error) {
- b := (^Buffer)(s.stream_data)
- return buffer_write_to(b, w)
- },
- impl_read_from = proc(s: io.Stream, r: io.Reader) -> (n: i64, err: io.Error) {
- b := (^Buffer)(s.stream_data)
- return buffer_read_from(b, r)
- },
+ return
+ case .Query:
+ return io.query_utility({.Read, .Read_At, .Write, .Write_At, .Size, .Destroy})
+ }
+ return 0, .Empty
}
-
diff --git a/core/bytes/reader.odin b/core/bytes/reader.odin
index 7c37f3061..4b18345ba 100644
--- a/core/bytes/reader.odin
+++ b/core/bytes/reader.odin
@@ -16,8 +16,8 @@ reader_init :: proc(r: ^Reader, s: []byte) {
}
reader_to_stream :: proc(r: ^Reader) -> (s: io.Stream) {
- s.stream_data = r
- s.stream_vtable = &_reader_vtable
+ s.data = r
+ s.procedure = _reader_proc
return
}
@@ -137,41 +137,22 @@ reader_write_to :: proc(r: ^Reader, w: io.Writer) -> (n: i64, err: io.Error) {
@(private)
-_reader_vtable := io.Stream_VTable{
- impl_size = proc(s: io.Stream) -> i64 {
- r := (^Reader)(s.stream_data)
- return reader_size(r)
- },
- impl_read = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
- r := (^Reader)(s.stream_data)
- return reader_read(r, p)
- },
- impl_read_at = proc(s: io.Stream, p: []byte, off: i64) -> (n: int, err: io.Error) {
- r := (^Reader)(s.stream_data)
- return reader_read_at(r, p, off)
- },
- impl_read_byte = proc(s: io.Stream) -> (byte, io.Error) {
- r := (^Reader)(s.stream_data)
- return reader_read_byte(r)
- },
- impl_unread_byte = proc(s: io.Stream) -> io.Error {
- r := (^Reader)(s.stream_data)
- return reader_unread_byte(r)
- },
- impl_read_rune = proc(s: io.Stream) -> (ch: rune, size: int, err: io.Error) {
- r := (^Reader)(s.stream_data)
- return reader_read_rune(r)
- },
- impl_unread_rune = proc(s: io.Stream) -> io.Error {
- r := (^Reader)(s.stream_data)
- return reader_unread_rune(r)
- },
- impl_seek = proc(s: io.Stream, offset: i64, whence: io.Seek_From) -> (i64, io.Error) {
- r := (^Reader)(s.stream_data)
- return reader_seek(r, offset, whence)
- },
- impl_write_to = proc(s: io.Stream, w: io.Writer) -> (n: i64, err: io.Error) {
- r := (^Reader)(s.stream_data)
- return reader_write_to(r, w)
- },
+_reader_proc :: proc(stream_data: rawptr, mode: io.Stream_Mode, p: []byte, offset: i64, whence: io.Seek_From) -> (n: i64, err: io.Error) {
+ r := (^Reader)(stream_data)
+ #partial switch mode {
+ case .Read:
+ return io._i64_err(reader_read(r, p))
+ case .Read_At:
+ return io._i64_err(reader_read_at(r, p, offset))
+ case .Seek:
+ n, err = reader_seek(r, offset, whence)
+ return
+ case .Size:
+ n = reader_size(r)
+ return
+ case .Query:
+ return io.query_utility({.Read, .Read_At, .Seek, .Size, .Query})
+ }
+ return 0, .Empty
}
+
diff --git a/core/c/frontend/preprocessor/preprocess.odin b/core/c/frontend/preprocessor/preprocess.odin
index 4cfad2c1c..b5eab0bb3 100644
--- a/core/c/frontend/preprocessor/preprocess.odin
+++ b/core/c/frontend/preprocessor/preprocess.odin
@@ -1118,7 +1118,7 @@ expand_macro :: proc(cpp: ^Preprocessor, rest: ^^Token, tok: ^Token) -> bool {
search_include_next :: proc(cpp: ^Preprocessor, filename: string) -> (path: string, ok: bool) {
for ; cpp.include_next_index < len(cpp.include_paths); cpp.include_next_index += 1 {
- tpath := filepath.join(elems={cpp.include_paths[cpp.include_next_index], filename}, allocator=context.temp_allocator)
+ tpath := filepath.join({cpp.include_paths[cpp.include_next_index], filename}, allocator=context.temp_allocator)
if os.exists(tpath) {
return strings.clone(tpath), true
}
@@ -1136,7 +1136,7 @@ search_include_paths :: proc(cpp: ^Preprocessor, filename: string) -> (path: str
}
for include_path in cpp.include_paths {
- tpath := filepath.join(elems={include_path, filename}, allocator=context.temp_allocator)
+ tpath := filepath.join({include_path, filename}, allocator=context.temp_allocator)
if os.exists(tpath) {
path, ok = strings.clone(tpath), true
cpp.filepath_cache[filename] = path
diff --git a/core/compress/common.odin b/core/compress/common.odin
index 6bcc46bf2..bc56229c2 100644
--- a/core/compress/common.odin
+++ b/core/compress/common.odin
@@ -188,7 +188,8 @@ input_size_from_memory :: proc(z: ^Context_Memory_Input) -> (res: i64, err: Erro
}
input_size_from_stream :: proc(z: ^Context_Stream_Input) -> (res: i64, err: Error) {
- return io.size(z.input), nil
+ res, _ = io.size(z.input)
+ return
}
input_size :: proc{input_size_from_memory, input_size_from_stream}
@@ -215,7 +216,7 @@ read_slice_from_stream :: #force_inline proc(z: ^Context_Stream_Input, size: int
// TODO: REMOVE ALL USE OF context.temp_allocator here
// the is literally no need for it
b := make([]u8, size, context.temp_allocator)
- _, e := z.input->impl_read(b[:])
+ _, e := io.read(z.input, b[:])
if e == .None {
return b, .None
}
diff --git a/core/compress/gzip/gzip.odin b/core/compress/gzip/gzip.odin
index 4de4d1b63..b0ca4491b 100644
--- a/core/compress/gzip/gzip.odin
+++ b/core/compress/gzip/gzip.odin
@@ -335,7 +335,7 @@ load_from_context :: proc(z: ^$C, buf: ^bytes.Buffer, known_gzip_size := -1, exp
// fmt.printf("GZIP: Expected Payload Size: %v\n", expected_output_size);
- zlib_error := zlib.inflate_raw(z=z, expected_output_size=expected_output_size)
+ zlib_error := zlib.inflate_raw(z, expected_output_size=expected_output_size)
if zlib_error != nil {
return zlib_error
}
diff --git a/core/compress/zlib/zlib.odin b/core/compress/zlib/zlib.odin
index 855eef7a8..21172e8e8 100644
--- a/core/compress/zlib/zlib.odin
+++ b/core/compress/zlib/zlib.odin
@@ -471,7 +471,7 @@ inflate_from_context :: proc(using ctx: ^compress.Context_Memory_Input, raw := f
}
// Parse ZLIB stream without header.
- inflate_raw(z=ctx, expected_output_size=expected_output_size) or_return
+ inflate_raw(ctx, expected_output_size=expected_output_size) or_return
if !raw {
compress.discard_to_next_byte_lsb(ctx)
@@ -665,7 +665,7 @@ inflate_from_byte_array :: proc(input: []u8, buf: ^bytes.Buffer, raw := false, e
ctx.input_data = input
ctx.output = buf
- return inflate_from_context(ctx=&ctx, raw=raw, expected_output_size=expected_output_size)
+ return inflate_from_context(&ctx, raw=raw, expected_output_size=expected_output_size)
}
inflate_from_byte_array_raw :: proc(input: []u8, buf: ^bytes.Buffer, raw := false, expected_output_size := -1) -> (err: Error) {
@@ -674,7 +674,7 @@ inflate_from_byte_array_raw :: proc(input: []u8, buf: ^bytes.Buffer, raw := fals
ctx.input_data = input
ctx.output = buf
- return inflate_raw(z=&ctx, expected_output_size=expected_output_size)
+ return inflate_raw(&ctx, expected_output_size=expected_output_size)
}
inflate :: proc{inflate_from_context, inflate_from_byte_array}
diff --git a/core/container/topological_sort/topological_sort.odin b/core/container/topological_sort/topological_sort.odin
index 4b69930d5..314e3e070 100644
--- a/core/container/topological_sort/topological_sort.odin
+++ b/core/container/topological_sort/topological_sort.odin
@@ -32,7 +32,7 @@ init :: proc(sorter: ^$S/Sorter($K)) {
}
destroy :: proc(sorter: ^$S/Sorter($K)) {
- for _, v in &sorter.relations {
+ for _, v in sorter.relations {
delete(v.dependents)
}
delete(sorter.relations)
@@ -80,7 +80,7 @@ sort :: proc(sorter: ^$S/Sorter($K)) -> (sorted, cycled: [dynamic]K) {
}
}
- for root in &sorted do for k, _ in relations[root].dependents {
+ for root in sorted do for k, _ in relations[root].dependents {
relation := &relations[k]
relation.dependencies -= 1
if relation.dependencies == 0 {
diff --git a/core/crypto/blake/blake.odin b/core/crypto/blake/blake.odin
index 5fc0a02b9..3685109e4 100644
--- a/core/crypto/blake/blake.odin
+++ b/core/crypto/blake/blake.odin
@@ -70,7 +70,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
@@ -149,7 +149,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
@@ -228,7 +228,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
@@ -307,7 +307,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
diff --git a/core/crypto/blake2b/blake2b.odin b/core/crypto/blake2b/blake2b.odin
index e75d74197..8f0770f82 100644
--- a/core/crypto/blake2b/blake2b.odin
+++ b/core/crypto/blake2b/blake2b.odin
@@ -77,7 +77,7 @@ hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
_blake2.update(&ctx, buf[:read])
}
diff --git a/core/crypto/blake2s/blake2s.odin b/core/crypto/blake2s/blake2s.odin
index 831335081..6a2d4ab9b 100644
--- a/core/crypto/blake2s/blake2s.odin
+++ b/core/crypto/blake2s/blake2s.odin
@@ -77,7 +77,7 @@ hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
_blake2.update(&ctx, buf[:read])
}
diff --git a/core/crypto/gost/gost.odin b/core/crypto/gost/gost.odin
index 1d0274fae..5aca8ce95 100644
--- a/core/crypto/gost/gost.odin
+++ b/core/crypto/gost/gost.odin
@@ -65,7 +65,7 @@ hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
diff --git a/core/crypto/groestl/groestl.odin b/core/crypto/groestl/groestl.odin
index 8e5a2440d..61460808f 100644
--- a/core/crypto/groestl/groestl.odin
+++ b/core/crypto/groestl/groestl.odin
@@ -70,7 +70,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
@@ -149,7 +149,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
@@ -228,7 +228,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
@@ -307,7 +307,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
diff --git a/core/crypto/haval/haval.odin b/core/crypto/haval/haval.odin
index 811ecf95d..b98facb33 100644
--- a/core/crypto/haval/haval.odin
+++ b/core/crypto/haval/haval.odin
@@ -79,7 +79,7 @@ hash_stream_128_3 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
ctx.str_len = u32(len(buf[:read]))
if read > 0 {
update(&ctx, buf[:read])
@@ -164,7 +164,7 @@ hash_stream_128_4 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
ctx.str_len = u32(len(buf[:read]))
if read > 0 {
update(&ctx, buf[:read])
@@ -249,7 +249,7 @@ hash_stream_128_5 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
ctx.str_len = u32(len(buf[:read]))
if read > 0 {
update(&ctx, buf[:read])
@@ -334,7 +334,7 @@ hash_stream_160_3 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
ctx.str_len = u32(len(buf[:read]))
if read > 0 {
update(&ctx, buf[:read])
@@ -419,7 +419,7 @@ hash_stream_160_4 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
ctx.str_len = u32(len(buf[:read]))
if read > 0 {
update(&ctx, buf[:read])
@@ -504,7 +504,7 @@ hash_stream_160_5 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
ctx.str_len = u32(len(buf[:read]))
if read > 0 {
update(&ctx, buf[:read])
@@ -589,7 +589,7 @@ hash_stream_192_3 :: proc(s: io.Stream) -> ([DIGEST_SIZE_192]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
ctx.str_len = u32(len(buf[:read]))
if read > 0 {
update(&ctx, buf[:read])
@@ -674,7 +674,7 @@ hash_stream_192_4 :: proc(s: io.Stream) -> ([DIGEST_SIZE_192]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
ctx.str_len = u32(len(buf[:read]))
if read > 0 {
update(&ctx, buf[:read])
@@ -759,7 +759,7 @@ hash_stream_192_5 :: proc(s: io.Stream) -> ([DIGEST_SIZE_192]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
ctx.str_len = u32(len(buf[:read]))
if read > 0 {
update(&ctx, buf[:read])
@@ -844,7 +844,7 @@ hash_stream_224_3 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
ctx.str_len = u32(len(buf[:read]))
if read > 0 {
update(&ctx, buf[:read])
@@ -929,7 +929,7 @@ hash_stream_224_4 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
ctx.str_len = u32(len(buf[:read]))
if read > 0 {
update(&ctx, buf[:read])
@@ -1014,7 +1014,7 @@ hash_stream_224_5 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
ctx.str_len = u32(len(buf[:read]))
if read > 0 {
update(&ctx, buf[:read])
@@ -1099,7 +1099,7 @@ hash_stream_256_3 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
ctx.str_len = u32(len(buf[:read]))
if read > 0 {
update(&ctx, buf[:read])
@@ -1184,7 +1184,7 @@ hash_stream_256_4 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
ctx.str_len = u32(len(buf[:read]))
if read > 0 {
update(&ctx, buf[:read])
@@ -1270,7 +1270,7 @@ hash_stream_256_5 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
ctx.str_len = u32(len(buf[:read]))
if read > 0 {
update(&ctx, buf[:read])
diff --git a/core/crypto/jh/jh.odin b/core/crypto/jh/jh.odin
index 42c2d1d34..5dc6c4e6b 100644
--- a/core/crypto/jh/jh.odin
+++ b/core/crypto/jh/jh.odin
@@ -70,7 +70,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
@@ -149,7 +149,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
@@ -228,7 +228,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
@@ -307,7 +307,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
diff --git a/core/crypto/keccak/keccak.odin b/core/crypto/keccak/keccak.odin
index aeb5aac52..4c74858d2 100644
--- a/core/crypto/keccak/keccak.odin
+++ b/core/crypto/keccak/keccak.odin
@@ -77,7 +77,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
_sha3.update(&ctx, buf[:read])
}
@@ -159,7 +159,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
_sha3.update(&ctx, buf[:read])
}
@@ -241,7 +241,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
_sha3.update(&ctx, buf[:read])
}
@@ -323,7 +323,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
_sha3.update(&ctx, buf[:read])
}
diff --git a/core/crypto/md2/md2.odin b/core/crypto/md2/md2.odin
index 711e6e9f6..4942183e1 100644
--- a/core/crypto/md2/md2.odin
+++ b/core/crypto/md2/md2.odin
@@ -64,7 +64,7 @@ hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
diff --git a/core/crypto/md4/md4.odin b/core/crypto/md4/md4.odin
index b2651225b..8efdbb5a5 100644
--- a/core/crypto/md4/md4.odin
+++ b/core/crypto/md4/md4.odin
@@ -68,7 +68,7 @@ hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
diff --git a/core/crypto/md5/md5.odin b/core/crypto/md5/md5.odin
index 30a556102..858480b04 100644
--- a/core/crypto/md5/md5.odin
+++ b/core/crypto/md5/md5.odin
@@ -67,7 +67,7 @@ hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
diff --git a/core/crypto/ripemd/ripemd.odin b/core/crypto/ripemd/ripemd.odin
index 702d29037..f9edb121b 100644
--- a/core/crypto/ripemd/ripemd.odin
+++ b/core/crypto/ripemd/ripemd.odin
@@ -69,7 +69,7 @@ hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
@@ -145,7 +145,7 @@ hash_stream_160 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
@@ -221,7 +221,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
@@ -297,7 +297,7 @@ hash_stream_320 :: proc(s: io.Stream) -> ([DIGEST_SIZE_320]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
diff --git a/core/crypto/sha1/sha1.odin b/core/crypto/sha1/sha1.odin
index b0dbd7dc8..599d1791e 100644
--- a/core/crypto/sha1/sha1.odin
+++ b/core/crypto/sha1/sha1.odin
@@ -67,7 +67,7 @@ hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
diff --git a/core/crypto/sha2/sha2.odin b/core/crypto/sha2/sha2.odin
index 9792a4cb8..0f55c4be1 100644
--- a/core/crypto/sha2/sha2.odin
+++ b/core/crypto/sha2/sha2.odin
@@ -74,7 +74,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
@@ -153,7 +153,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
@@ -232,7 +232,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
@@ -311,7 +311,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
diff --git a/core/crypto/sha3/sha3.odin b/core/crypto/sha3/sha3.odin
index 1202f8b23..5d8ad2106 100644
--- a/core/crypto/sha3/sha3.odin
+++ b/core/crypto/sha3/sha3.odin
@@ -73,7 +73,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
_sha3.update(&ctx, buf[:read])
}
@@ -152,7 +152,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
_sha3.update(&ctx, buf[:read])
}
@@ -231,7 +231,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
_sha3.update(&ctx, buf[:read])
}
@@ -310,7 +310,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
_sha3.update(&ctx, buf[:read])
}
diff --git a/core/crypto/shake/shake.odin b/core/crypto/shake/shake.odin
index 525dcfbd3..020ba68f3 100644
--- a/core/crypto/shake/shake.odin
+++ b/core/crypto/shake/shake.odin
@@ -73,7 +73,7 @@ hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
_sha3.update(&ctx, buf[:read])
}
@@ -155,7 +155,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
_sha3.update(&ctx, buf[:read])
}
diff --git a/core/crypto/sm3/sm3.odin b/core/crypto/sm3/sm3.odin
index 74c9f22e2..9e684ff08 100644
--- a/core/crypto/sm3/sm3.odin
+++ b/core/crypto/sm3/sm3.odin
@@ -66,7 +66,7 @@ hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
diff --git a/core/crypto/streebog/streebog.odin b/core/crypto/streebog/streebog.odin
index f85977cba..42da1e695 100644
--- a/core/crypto/streebog/streebog.odin
+++ b/core/crypto/streebog/streebog.odin
@@ -70,7 +70,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
@@ -146,7 +146,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
diff --git a/core/crypto/tiger/tiger.odin b/core/crypto/tiger/tiger.odin
index cf6159fad..614926129 100644
--- a/core/crypto/tiger/tiger.odin
+++ b/core/crypto/tiger/tiger.odin
@@ -71,7 +71,7 @@ hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
_tiger.update(&ctx, buf[:read])
}
@@ -150,7 +150,7 @@ hash_stream_160 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
_tiger.update(&ctx, buf[:read])
}
@@ -229,7 +229,7 @@ hash_stream_192 :: proc(s: io.Stream) -> ([DIGEST_SIZE_192]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
_tiger.update(&ctx, buf[:read])
}
diff --git a/core/crypto/tiger2/tiger2.odin b/core/crypto/tiger2/tiger2.odin
index e8f2c4edb..ead874d56 100644
--- a/core/crypto/tiger2/tiger2.odin
+++ b/core/crypto/tiger2/tiger2.odin
@@ -71,7 +71,7 @@ hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
_tiger.update(&ctx, buf[:read])
}
@@ -150,7 +150,7 @@ hash_stream_160 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
_tiger.update(&ctx, buf[:read])
}
@@ -229,7 +229,7 @@ hash_stream_192 :: proc(s: io.Stream) -> ([DIGEST_SIZE_192]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
_tiger.update(&ctx, buf[:read])
}
diff --git a/core/crypto/whirlpool/whirlpool.odin b/core/crypto/whirlpool/whirlpool.odin
index 0cfef7c6b..cf0bf6490 100644
--- a/core/crypto/whirlpool/whirlpool.odin
+++ b/core/crypto/whirlpool/whirlpool.odin
@@ -66,7 +66,7 @@ hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
defer delete(buf)
read := 1
for read > 0 {
- read, _ = s->impl_read(buf)
+ read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
diff --git a/core/debug/pe/section.odin b/core/debug/pe/section.odin
index 809da8bb4..926306dbb 100644
--- a/core/debug/pe/section.odin
+++ b/core/debug/pe/section.odin
@@ -1,8 +1,5 @@
package debug_pe
-import "core:runtime"
-import "core:io"
-
Section_Header32 :: struct {
name: [8]u8,
virtual_size: u32le,
diff --git a/core/dynlib/lib_js.odin b/core/dynlib/lib_js.odin
index 37dab8758..ace1b0939 100644
--- a/core/dynlib/lib_js.odin
+++ b/core/dynlib/lib_js.odin
@@ -3,13 +3,13 @@
package dynlib
_load_library :: proc(path: string, global_symbols := false) -> (Library, bool) {
- return
+ return nil, false
}
_unload_library :: proc(library: Library) -> bool {
- return
+ return false
}
_symbol_address :: proc(library: Library, symbol: string) -> (ptr: rawptr, found: bool) {
- return
+ return nil, false
}
diff --git a/core/encoding/hxa/read.odin b/core/encoding/hxa/read.odin
index abe295530..8a8636f19 100644
--- a/core/encoding/hxa/read.odin
+++ b/core/encoding/hxa/read.odin
@@ -83,7 +83,7 @@ read :: proc(data: []byte, filename := "", print_error := false, allocato
meta_data = make([]Meta, int(capacity))
count := 0
defer meta_data = meta_data[:count]
- for m in &meta_data {
+ for &m in meta_data {
m.name = read_name(r) or_return
type := read_value(r, Meta_Value_Type) or_return
@@ -116,7 +116,7 @@ read :: proc(data: []byte, filename := "", print_error := false, allocato
layer_count := 0
layers = make(Layer_Stack, stack_count)
defer layers = layers[:layer_count]
- for layer in &layers {
+ for &layer in layers {
layer.name = read_name(r) or_return
layer.components = read_value(r, u8) or_return
type := read_value(r, Layer_Data_Type) or_return
diff --git a/core/encoding/json/unmarshal.odin b/core/encoding/json/unmarshal.odin
index e6c61d8fa..678f2dcfa 100644
--- a/core/encoding/json/unmarshal.odin
+++ b/core/encoding/json/unmarshal.odin
@@ -72,7 +72,7 @@ unmarshal_string :: proc(data: string, ptr: ^$T, spec := DEFAULT_SPECIFICATION,
@(private)
assign_bool :: proc(val: any, b: bool) -> bool {
v := reflect.any_core(val)
- switch dst in &v {
+ switch &dst in v {
case bool: dst = bool(b)
case b8: dst = b8 (b)
case b16: dst = b16 (b)
@@ -85,7 +85,7 @@ assign_bool :: proc(val: any, b: bool) -> bool {
@(private)
assign_int :: proc(val: any, i: $T) -> bool {
v := reflect.any_core(val)
- switch dst in &v {
+ switch &dst in v {
case i8: dst = i8 (i)
case i16: dst = i16 (i)
case i16le: dst = i16le (i)
@@ -122,7 +122,7 @@ assign_int :: proc(val: any, i: $T) -> bool {
@(private)
assign_float :: proc(val: any, f: $T) -> bool {
v := reflect.any_core(val)
- switch dst in &v {
+ switch &dst in v {
case f16: dst = f16 (f)
case f16le: dst = f16le(f)
case f16be: dst = f16be(f)
@@ -150,7 +150,7 @@ assign_float :: proc(val: any, f: $T) -> bool {
@(private)
unmarshal_string_token :: proc(p: ^Parser, val: any, str: string, ti: ^reflect.Type_Info) -> bool {
val := val
- switch dst in &val {
+ switch &dst in val {
case string:
dst = str
return true
@@ -215,7 +215,7 @@ unmarshal_value :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
}
}
- switch dst in &v {
+ switch &dst in v {
// Handle json.Value as an unknown type
case Value:
dst = parse_value(p) or_return
diff --git a/core/fmt/fmt.odin b/core/fmt/fmt.odin
index 61f7b5d99..f1f94b1b3 100644
--- a/core/fmt/fmt.odin
+++ b/core/fmt/fmt.odin
@@ -123,7 +123,7 @@ register_user_formatter :: proc(id: typeid, formatter: User_Formatter) -> Regist
aprint :: proc(args: ..any, sep := " ") -> string {
str: strings.Builder
strings.builder_init(&str)
- sbprint(buf=&str, args=args, sep=sep)
+ sbprint(&str, ..args, sep=sep)
return strings.to_string(str)
}
// Creates a formatted string with a newline character at the end
@@ -139,7 +139,7 @@ aprint :: proc(args: ..any, sep := " ") -> string {
aprintln :: proc(args: ..any, sep := " ") -> string {
str: strings.Builder
strings.builder_init(&str)
- sbprintln(buf=&str, args=args, sep=sep)
+ sbprintln(&str, ..args, sep=sep)
return strings.to_string(str)
}
// Creates a formatted string using a format string and arguments
@@ -171,7 +171,7 @@ aprintf :: proc(fmt: string, args: ..any) -> string {
tprint :: proc(args: ..any, sep := " ") -> string {
str: strings.Builder
strings.builder_init(&str, context.temp_allocator)
- sbprint(buf=&str, args=args, sep=sep)
+ sbprint(&str, ..args, sep=sep)
return strings.to_string(str)
}
// Creates a formatted string with a newline character at the end
@@ -187,7 +187,7 @@ tprint :: proc(args: ..any, sep := " ") -> string {
tprintln :: proc(args: ..any, sep := " ") -> string {
str: strings.Builder
strings.builder_init(&str, context.temp_allocator)
- sbprintln(buf=&str, args=args, sep=sep)
+ sbprintln(&str, ..args, sep=sep)
return strings.to_string(str)
}
// Creates a formatted string using a format string and arguments
@@ -217,7 +217,7 @@ tprintf :: proc(fmt: string, args: ..any) -> string {
//
bprint :: proc(buf: []byte, args: ..any, sep := " ") -> string {
sb := strings.builder_from_bytes(buf[0:len(buf)])
- return sbprint(buf=&sb, args=args, sep=sep)
+ return sbprint(&sb, ..args, sep=sep)
}
// Creates a formatted string using a supplied buffer as the backing array, appends newline. Writes into the buffer.
//
@@ -230,7 +230,7 @@ bprint :: proc(buf: []byte, args: ..any, sep := " ") -> string {
//
bprintln :: proc(buf: []byte, args: ..any, sep := " ") -> string {
sb := strings.builder_from_bytes(buf[0:len(buf)])
- return sbprintln(buf=&sb, args=args, sep=sep)
+ return sbprintln(&sb, ..args, sep=sep)
}
// Creates a formatted string using a supplied buffer as the backing array. Writes into the buffer.
//
@@ -327,7 +327,7 @@ ctprintf :: proc(format: string, args: ..any) -> cstring {
// Returns: A formatted string
//
sbprint :: proc(buf: ^strings.Builder, args: ..any, sep := " ") -> string {
- wprint(w=strings.to_writer(buf), args=args, sep=sep)
+ wprint(strings.to_writer(buf), ..args, sep=sep)
return strings.to_string(buf^)
}
// Formats and writes to a strings.Builder buffer using the default print settings
@@ -340,7 +340,7 @@ sbprint :: proc(buf: ^strings.Builder, args: ..any, sep := " ") -> string {
// Returns: The resulting formatted string
//
sbprintln :: proc(buf: ^strings.Builder, args: ..any, sep := " ") -> string {
- wprintln(w=strings.to_writer(buf), args=args, sep=sep)
+ wprintln(strings.to_writer(buf), ..args, sep=sep)
return strings.to_string(buf^)
}
// Formats and writes to a strings.Builder buffer according to the specified format string
@@ -353,7 +353,7 @@ sbprintln :: proc(buf: ^strings.Builder, args: ..any, sep := " ") -> string {
// Returns: The resulting formatted string
//
sbprintf :: proc(buf: ^strings.Builder, fmt: string, args: ..any) -> string {
- wprintf(w=strings.to_writer(buf), fmt=fmt, args=args)
+ wprintf(strings.to_writer(buf), fmt, ..args)
return strings.to_string(buf^)
}
// Formats and writes to an io.Writer using the default print settings
diff --git a/core/fmt/fmt_js.odin b/core/fmt/fmt_js.odin
index 5e06041f5..881cde867 100644
--- a/core/fmt/fmt_js.odin
+++ b/core/fmt/fmt_js.odin
@@ -11,27 +11,24 @@ foreign odin_env {
}
@(private="file")
-write_vtable := io.Stream_VTable{
- impl_write = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
- fd := u32(uintptr(s.stream_data))
+write_stream_proc :: proc(stream_data: rawptr, mode: io.Stream_Mode, p: []byte, offset: i64, whence: io.Seek_From) -> (n: i64, err: io.Error) {
+ if mode == .Write {
+ fd := u32(uintptr(stream_data))
write(fd, p)
- return len(p), nil
- },
+ return i64(len(p)), nil
+ }
+ return 0, .Empty
}
@(private="file")
stdout := io.Writer{
- stream = {
- stream_vtable = &write_vtable,
- stream_data = rawptr(uintptr(1)),
- },
+ procedure = write_stream_proc,
+ data = rawptr(uintptr(1)),
}
@(private="file")
stderr := io.Writer{
- stream = {
- stream_vtable = &write_vtable,
- stream_data = rawptr(uintptr(2)),
- },
+ procedure = write_stream_proc,
+ data = rawptr(uintptr(2)),
}
// print formats using the default print settings and writes to stdout
diff --git a/core/fmt/fmt_os.odin b/core/fmt/fmt_os.odin
index 861b0c3b9..840fd1545 100644
--- a/core/fmt/fmt_os.odin
+++ b/core/fmt/fmt_os.odin
@@ -12,9 +12,9 @@ fprint :: proc(fd: os.Handle, args: ..any, sep := " ") -> int {
b: bufio.Writer
defer bufio.writer_flush(&b)
- bufio.writer_init_with_buf(&b, {os.stream_from_handle(fd)}, buf[:])
+ bufio.writer_init_with_buf(&b, os.stream_from_handle(fd), buf[:])
w := bufio.writer_to_writer(&b)
- return wprint(w=w, args=args, sep=sep)
+ return wprint(w, ..args, sep=sep)
}
// fprintln formats using the default print settings and writes to fd
@@ -23,10 +23,10 @@ fprintln :: proc(fd: os.Handle, args: ..any, sep := " ") -> int {
b: bufio.Writer
defer bufio.writer_flush(&b)
- bufio.writer_init_with_buf(&b, {os.stream_from_handle(fd)}, buf[:])
+ bufio.writer_init_with_buf(&b, os.stream_from_handle(fd), buf[:])
w := bufio.writer_to_writer(&b)
- return wprintln(w=w, args=args, sep=sep)
+ return wprintln(w, ..args, sep=sep)
}
// fprintf formats according to the specified format string and writes to fd
fprintf :: proc(fd: os.Handle, fmt: string, args: ..any) -> int {
@@ -34,7 +34,7 @@ fprintf :: proc(fd: os.Handle, fmt: string, args: ..any) -> int {
b: bufio.Writer
defer bufio.writer_flush(&b)
- bufio.writer_init_with_buf(&b, {os.stream_from_handle(fd)}, buf[:])
+ bufio.writer_init_with_buf(&b, os.stream_from_handle(fd), buf[:])
w := bufio.writer_to_writer(&b)
return wprintf(w, fmt, ..args)
@@ -44,7 +44,7 @@ fprint_type :: proc(fd: os.Handle, info: ^runtime.Type_Info) -> (n: int, err: io
b: bufio.Writer
defer bufio.writer_flush(&b)
- bufio.writer_init_with_buf(&b, {os.stream_from_handle(fd)}, buf[:])
+ bufio.writer_init_with_buf(&b, os.stream_from_handle(fd), buf[:])
w := bufio.writer_to_writer(&b)
return wprint_type(w, info)
@@ -54,22 +54,22 @@ fprint_typeid :: proc(fd: os.Handle, id: typeid) -> (n: int, err: io.Error) {
b: bufio.Writer
defer bufio.writer_flush(&b)
- bufio.writer_init_with_buf(&b, {os.stream_from_handle(fd)}, buf[:])
+ bufio.writer_init_with_buf(&b, os.stream_from_handle(fd), buf[:])
w := bufio.writer_to_writer(&b)
return wprint_typeid(w, id)
}
// print formats using the default print settings and writes to os.stdout
-print :: proc(args: ..any, sep := " ") -> int { return fprint(fd=os.stdout, args=args, sep=sep) }
+print :: proc(args: ..any, sep := " ") -> int { return fprint(os.stdout, ..args, sep=sep) }
// println formats using the default print settings and writes to os.stdout
-println :: proc(args: ..any, sep := " ") -> int { return fprintln(fd=os.stdout, args=args, sep=sep) }
+println :: proc(args: ..any, sep := " ") -> int { return fprintln(os.stdout, ..args, sep=sep) }
// printf formats according to the specified format string and writes to os.stdout
printf :: proc(fmt: string, args: ..any) -> int { return fprintf(os.stdout, fmt, ..args) }
// eprint formats using the default print settings and writes to os.stderr
-eprint :: proc(args: ..any, sep := " ") -> int { return fprint(fd=os.stderr, args=args, sep=sep) }
+eprint :: proc(args: ..any, sep := " ") -> int { return fprint(os.stderr, ..args, sep=sep) }
// eprintln formats using the default print settings and writes to os.stderr
-eprintln :: proc(args: ..any, sep := " ") -> int { return fprintln(fd=os.stderr, args=args, sep=sep) }
+eprintln :: proc(args: ..any, sep := " ") -> int { return fprintln(os.stderr, ..args, sep=sep) }
// eprintf formats according to the specified format string and writes to os.stderr
eprintf :: proc(fmt: string, args: ..any) -> int { return fprintf(os.stderr, fmt, ..args) }
diff --git a/core/image/netpbm/netpbm.odin b/core/image/netpbm/netpbm.odin
index cb07b1e3a..74e482cb4 100644
--- a/core/image/netpbm/netpbm.odin
+++ b/core/image/netpbm/netpbm.odin
@@ -161,18 +161,18 @@ save_to_buffer :: proc(img: ^Image, custom_info: Info = {}, allocator := context
// convert from native endianness
if img.depth == 16 {
pixels := mem.slice_data_cast([]u16be, data.buf[len(header_buf):])
- for p in &pixels {
+ for &p in pixels {
p = u16be(transmute(u16) p)
}
} else if header.format in PFM {
if header.little_endian {
pixels := mem.slice_data_cast([]f32le, data.buf[len(header_buf):])
- for p in &pixels {
+ for &p in pixels {
p = f32le(transmute(f32) p)
}
} else {
pixels := mem.slice_data_cast([]f32be, data.buf[len(header_buf):])
- for p in &pixels {
+ for &p in pixels {
p = f32be(transmute(f32) p)
}
}
@@ -578,18 +578,18 @@ decode_image :: proc(img: ^Image, header: Header, data: []byte, allocator := con
if header.format in PFM {
pixels := mem.slice_data_cast([]f32, img.pixels.buf[:])
if header.little_endian {
- for p in &pixels {
+ for &p in pixels {
p = f32(transmute(f32le) p)
}
} else {
- for p in &pixels {
+ for &p in pixels {
p = f32(transmute(f32be) p)
}
}
} else {
if img.depth == 16 {
pixels := mem.slice_data_cast([]u16, img.pixels.buf[:])
- for p in &pixels {
+ for &p in pixels {
p = u16(transmute(u16be) p)
}
}
diff --git a/core/image/png/helpers.odin b/core/image/png/helpers.odin
index bfe56d62e..889b3cb6b 100644
--- a/core/image/png/helpers.odin
+++ b/core/image/png/helpers.odin
@@ -36,7 +36,7 @@ destroy :: proc(img: ^Image) {
bytes.buffer_destroy(&img.pixels)
if v, ok := img.metadata.(^image.PNG_Info); ok {
- for chunk in &v.chunks {
+ for chunk in v.chunks {
delete(chunk.data)
}
delete(v.chunks)
@@ -99,7 +99,7 @@ text :: proc(c: image.PNG_Chunk) -> (res: Text, ok: bool) {
case .tEXt:
ok = true
- fields := bytes.split(s=c.data, sep=[]u8{0}, allocator=context.temp_allocator)
+ fields := bytes.split(c.data, sep=[]u8{0}, allocator=context.temp_allocator)
if len(fields) == 2 {
res.keyword = strings.clone(string(fields[0]))
res.text = strings.clone(string(fields[1]))
@@ -110,7 +110,7 @@ text :: proc(c: image.PNG_Chunk) -> (res: Text, ok: bool) {
case .zTXt:
ok = true
- fields := bytes.split_n(s=c.data, sep=[]u8{0}, n=3, allocator=context.temp_allocator)
+ fields := bytes.split_n(c.data, sep=[]u8{0}, n=3, allocator=context.temp_allocator)
if len(fields) != 3 || len(fields[1]) != 0 {
// Compression method must be 0=Deflate, which thanks to the split above turns
// into an empty slice
@@ -199,7 +199,7 @@ text_destroy :: proc(text: Text) {
iccp :: proc(c: image.PNG_Chunk) -> (res: iCCP, ok: bool) {
runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD(ignore = context.temp_allocator == context.allocator)
- fields := bytes.split_n(s=c.data, sep=[]u8{0}, n=3, allocator=context.temp_allocator)
+ fields := bytes.split_n(c.data, sep=[]u8{0}, n=3, allocator=context.temp_allocator)
if len(fields[0]) < 1 || len(fields[0]) > 79 {
// Invalid profile name
@@ -263,7 +263,7 @@ splt :: proc(c: image.PNG_Chunk) -> (res: sPLT, ok: bool) {
}
runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD(ignore = context.temp_allocator == context.allocator)
- fields := bytes.split_n(s=c.data, sep=[]u8{0}, n=2, allocator=context.temp_allocator)
+ fields := bytes.split_n(c.data, sep=[]u8{0}, n=2, allocator=context.temp_allocator)
if len(fields) != 2 {
return
}
diff --git a/core/io/conv.odin b/core/io/conv.odin
index 39a72d69d..e3286baca 100644
--- a/core/io/conv.odin
+++ b/core/io/conv.odin
@@ -1,124 +1,80 @@
package io
to_reader :: proc(s: Stream) -> (r: Reader, ok: bool = true) #optional_ok {
- r.stream = s
- if s.stream_vtable == nil || s.impl_read == nil {
- ok = false
- }
+ r = s
+ ok = .Read in query(s)
return
}
to_writer :: proc(s: Stream) -> (w: Writer, ok: bool = true) #optional_ok {
- w.stream = s
- if s.stream_vtable == nil || s.impl_write == nil {
- ok = false
- }
+ w = s
+ ok = .Write in query(s)
return
}
to_closer :: proc(s: Stream) -> (c: Closer, ok: bool = true) #optional_ok {
- c.stream = s
- if s.stream_vtable == nil || s.impl_close == nil {
- ok = false
- }
+ c = s
+ ok = .Close in query(s)
return
}
to_flusher :: proc(s: Stream) -> (f: Flusher, ok: bool = true) #optional_ok {
- f.stream = s
- if s.stream_vtable == nil || s.impl_flush == nil {
- ok = false
- }
+ f = s
+ ok = .Flush in query(s)
return
}
to_seeker :: proc(s: Stream) -> (seeker: Seeker, ok: bool = true) #optional_ok {
- seeker.stream = s
- if s.stream_vtable == nil || s.impl_seek == nil {
- ok = false
- }
+ seeker = s
+ ok = .Seek in query(s)
return
}
to_read_writer :: proc(s: Stream) -> (r: Read_Writer, ok: bool = true) #optional_ok {
- r.stream = s
- if s.stream_vtable == nil || s.impl_read == nil || s.impl_write == nil {
- ok = false
- }
+ r = s
+ ok = query(s) >= {.Read, .Write}
return
}
to_read_closer :: proc(s: Stream) -> (r: Read_Closer, ok: bool = true) #optional_ok {
- r.stream = s
- if s.stream_vtable == nil || s.impl_read == nil || s.impl_close == nil {
- ok = false
- }
+ r = s
+ ok = query(s) >= {.Read, .Close}
return
}
to_read_write_closer :: proc(s: Stream) -> (r: Read_Write_Closer, ok: bool = true) #optional_ok {
- r.stream = s
- if s.stream_vtable == nil || s.impl_read == nil || s.impl_write == nil || s.impl_close == nil {
- ok = false
- }
+ r = s
+ ok = query(s) >= {.Read, .Write, .Close}
return
}
to_read_write_seeker :: proc(s: Stream) -> (r: Read_Write_Seeker, ok: bool = true) #optional_ok {
- r.stream = s
- if s.stream_vtable == nil || s.impl_read == nil || s.impl_write == nil || s.impl_seek == nil {
- ok = false
- }
+ r = s
+ ok = query(s) >= {.Read, .Write, .Seek}
return
}
to_write_flusher :: proc(s: Stream) -> (w: Write_Flusher, ok: bool = true) #optional_ok {
- w.stream = s
- if s.stream_vtable == nil || s.impl_write == nil || s.impl_flush == nil {
- ok = false
- }
+ w = s
+ ok = query(s) >= {.Write, .Flush}
return
}
to_write_flush_closer :: proc(s: Stream) -> (w: Write_Flush_Closer, ok: bool = true) #optional_ok {
- w.stream = s
- if s.stream_vtable == nil || s.impl_write == nil || s.impl_flush == nil || s.impl_close == nil {
- ok = false
- }
+ w = s
+ ok = query(s) >= {.Write, .Flush, .Close}
return
}
to_reader_at :: proc(s: Stream) -> (r: Reader_At, ok: bool = true) #optional_ok {
- r.stream = s
- if s.stream_vtable == nil || s.impl_read_at == nil {
- ok = false
- }
+ r = s
+ ok = query(s) >= {.Read_At}
return
}
to_writer_at :: proc(s: Stream) -> (w: Writer_At, ok: bool = true) #optional_ok {
- w.stream = s
- if s.stream_vtable == nil || s.impl_write_at == nil {
- ok = false
- }
- return
-}
-to_reader_from :: proc(s: Stream) -> (r: Reader_From, ok: bool = true) #optional_ok {
- r.stream = s
- if s.stream_vtable == nil || s.impl_read_from == nil {
- ok = false
- }
- return
-}
-to_writer_to :: proc(s: Stream) -> (w: Writer_To, ok: bool = true) #optional_ok {
- w.stream = s
- if s.stream_vtable == nil || s.impl_write_to == nil {
- ok = false
- }
+ w = s
+ ok = query(s) >= {.Write_At}
return
}
to_write_closer :: proc(s: Stream) -> (w: Write_Closer, ok: bool = true) #optional_ok {
- w.stream = s
- if s.stream_vtable == nil || s.impl_write == nil || s.impl_close == nil {
- ok = false
- }
+ w = s
+ ok = query(s) >= {.Write, .Close}
return
}
to_write_seeker :: proc(s: Stream) -> (w: Write_Seeker, ok: bool = true) #optional_ok {
- w.stream = s
- if s.stream_vtable == nil || s.impl_write == nil || s.impl_seek == nil {
- ok = false
- }
+ w = s
+ ok = query(s) >= {.Write, .Seek}
return
}
diff --git a/core/io/io.odin b/core/io/io.odin
index 7faa500b1..566e13c54 100644
--- a/core/io/io.odin
+++ b/core/io/io.odin
@@ -53,137 +53,106 @@ Error :: enum i32 {
Empty = -1,
}
-Close_Proc :: proc(using s: Stream) -> Error
-Flush_Proc :: proc(using s: Stream) -> Error
-Seek_Proc :: proc(using s: Stream, offset: i64, whence: Seek_From) -> (n: i64, err: Error)
-Size_Proc :: proc(using s: Stream) -> i64
-Read_Proc :: proc(using s: Stream, p: []byte) -> (n: int, err: Error)
-Read_At_Proc :: proc(using s: Stream, p: []byte, off: i64) -> (n: int, err: Error)
-Read_From_Proc :: proc(using s: Stream, r: Reader) -> (n: i64, err: Error)
-Read_Byte_Proc :: proc(using s: Stream) -> (byte, Error)
-Read_Rune_Proc :: proc(using s: Stream) -> (ch: rune, size: int, err: Error)
-Unread_Byte_Proc :: proc(using s: Stream) -> Error
-Unread_Rune_Proc :: proc(using s: Stream) -> Error
-Write_Proc :: proc(using s: Stream, p: []byte) -> (n: int, err: Error)
-Write_At_Proc :: proc(using s: Stream, p: []byte, off: i64) -> (n: int, err: Error)
-Write_To_Proc :: proc(using s: Stream, w: Writer) -> (n: i64, err: Error)
-Write_Byte_Proc :: proc(using s: Stream, c: byte) -> Error
-Write_Rune_Proc :: proc(using s: Stream, r: rune) -> (size: int, err: Error)
-Destroy_Proc :: proc(using s: Stream) -> Error
+Stream_Mode :: enum {
+ Close,
+ Flush,
+ Read,
+ Read_At,
+ Write,
+ Write_At,
+ Seek,
+ Size,
+ Destroy,
+ Query, // query what modes are available
+}
+Stream_Mode_Set :: distinct bit_set[Stream_Mode; i64]
+
+Stream_Proc :: #type proc(stream_data: rawptr, mode: Stream_Mode, p: []byte, offset: i64, whence: Seek_From) -> (n: i64, err: Error)
Stream :: struct {
- using stream_vtable: ^Stream_VTable,
- stream_data: rawptr,
-}
-Stream_VTable :: struct {
- impl_close: Close_Proc,
- impl_flush: Flush_Proc,
-
- impl_seek: Seek_Proc,
- impl_size: Size_Proc,
-
- impl_read: Read_Proc,
- impl_read_at: Read_At_Proc,
- impl_read_byte: Read_Byte_Proc,
- impl_read_rune: Read_Rune_Proc,
- impl_write_to: Write_To_Proc,
-
- impl_write: Write_Proc,
- impl_write_at: Write_At_Proc,
- impl_write_byte: Write_Byte_Proc,
- impl_write_rune: Write_Rune_Proc,
- impl_read_from: Read_From_Proc,
-
- impl_unread_byte: Unread_Byte_Proc,
- impl_unread_rune: Unread_Rune_Proc,
-
- impl_destroy: Destroy_Proc,
+ procedure: Stream_Proc,
+ data: rawptr,
}
+Reader :: Stream
+Writer :: Stream
+Closer :: Stream
+Flusher :: Stream
+Seeker :: Stream
-Reader :: struct {using stream: Stream}
-Writer :: struct {using stream: Stream}
-Closer :: struct {using stream: Stream}
-Flusher :: struct {using stream: Stream}
-Seeker :: struct {using stream: Stream}
+Read_Writer :: Stream
+Read_Closer :: Stream
+Read_Write_Closer :: Stream
+Read_Write_Seeker :: Stream
-Read_Writer :: struct {using stream: Stream}
-Read_Closer :: struct {using stream: Stream}
-Read_Write_Closer :: struct {using stream: Stream}
-Read_Write_Seeker :: struct {using stream: Stream}
+Write_Closer :: Stream
+Write_Seeker :: Stream
+Write_Flusher :: Stream
+Write_Flush_Closer :: Stream
-Write_Closer :: struct {using stream: Stream}
-Write_Seeker :: struct {using stream: Stream}
-Write_Flusher :: struct {using stream: Stream}
-Write_Flush_Closer :: struct {using stream: Stream}
-
-Reader_At :: struct {using stream: Stream}
-Writer_At :: struct {using stream: Stream}
-Reader_From :: struct {using stream: Stream}
-Writer_To :: struct {using stream: Stream}
+Reader_At :: Stream
+Writer_At :: Stream
-destroy :: proc(s: Stream) -> Error {
- close_err := close({s})
- if s.stream_vtable != nil && s.impl_destroy != nil {
- return s->impl_destroy()
+destroy :: proc(s: Stream) -> (err: Error) {
+ _ = flush(s)
+ _ = close(s)
+ if s.procedure != nil {
+ _, err = s.procedure(s.data, .Destroy, nil, 0, nil)
+ } else {
+ err = .Empty
}
- if close_err != .None {
- return close_err
- }
- return .Empty
+ return
}
+query :: proc(s: Stream) -> (set: Stream_Mode_Set) {
+ if s.procedure != nil {
+ n, _ := s.procedure(s.data, .Query, nil, 0, nil)
+ set = transmute(Stream_Mode_Set)n
+ if set != nil {
+ set += {.Query}
+ }
+ }
+ return
+}
+
+query_utility :: #force_inline proc "contextless" (set: Stream_Mode_Set) -> (n: i64, err: Error) {
+ return transmute(i64)set, nil
+}
+
+_i64_err :: #force_inline proc "contextless" (n: int, err: Error) -> (i64, Error) {
+ return i64(n), err
+}
+
+
// read reads up to len(p) bytes into s. It returns the number of bytes read and any error if occurred.
//
// When read encounters an .EOF or error after successfully reading n > 0 bytes, it returns the number of
// bytes read along with the error.
read :: proc(s: Reader, p: []byte, n_read: ^int = nil) -> (n: int, err: Error) {
- if s.stream_vtable != nil {
- if s.impl_read != nil {
- n, err = s->impl_read(p)
- if n_read != nil {
- n_read^ += n
- }
- return
- } else if s.impl_read_byte != nil {
- bytes_read := 0
- defer if n_read != nil {
- n_read^ += bytes_read
- }
- for _, i in p {
- p[i] = s->impl_read_byte() or_return
- bytes_read += 1
- }
- return
- }
+ if s.procedure != nil {
+ n64: i64
+ n64, err = s.procedure(s.data, .Read, p, 0, nil)
+ n = int(n64)
+ if n_read != nil { n_read^ += n }
+ } else {
+ err = .Empty
}
- return 0, .Empty
+ return
}
// write writes up to len(p) bytes into s. It returns the number of bytes written and any error if occurred.
write :: proc(s: Writer, p: []byte, n_written: ^int = nil) -> (n: int, err: Error) {
- if s.stream_vtable != nil {
- if s.impl_write != nil {
- n, err = s->impl_write(p)
- if n_written != nil {
- n_written^ += n
- }
- return
- } else if s.impl_write_byte != nil {
- bytes_written := 0
- defer if n_written != nil {
- n_written^ += bytes_written
- }
- for c in p {
- s->impl_write_byte(c) or_return
- bytes_written += 1
- }
- return
- }
+ if s.procedure != nil {
+ n64: i64
+ n64, err = s.procedure(s.data, .Write, p, 0, nil)
+ n = int(n64)
+ if n_written != nil { n_written^ += n }
+ } else {
+ err = .Empty
}
- return 0, .Empty
+ return
}
// seek sets the offset of the next read or write to offset.
@@ -194,57 +163,45 @@ write :: proc(s: Writer, p: []byte, n_written: ^int = nil) -> (n: int, err: Erro
//
// seek returns the new offset to the start of the file/stream, and any error if occurred.
seek :: proc(s: Seeker, offset: i64, whence: Seek_From) -> (n: i64, err: Error) {
- if s.stream_vtable != nil && s.impl_seek != nil {
- return s->impl_seek(offset, whence)
+ if s.procedure != nil {
+ n, err = s.procedure(s.data, .Seek, nil, offset, whence)
+ } else {
+ err = .Empty
}
- return 0, .Empty
+ return
}
// The behaviour of close after the first call is stream implementation defined.
// Different streams may document their own behaviour.
-close :: proc(s: Closer) -> Error {
- if s.stream_vtable != nil && s.impl_close != nil {
- return s->impl_close()
+close :: proc(s: Closer) -> (err: Error) {
+ if s.procedure != nil {
+ _, err = s.procedure(s.data, .Close, nil, 0, nil)
}
- // Instead of .Empty, .None is fine in this case
- return .None
+ return
}
-flush :: proc(s: Flusher) -> Error {
- if s.stream_vtable != nil && s.impl_flush != nil {
- return s->impl_flush()
+flush :: proc(s: Flusher) -> (err: Error) {
+ if s.procedure != nil {
+ _, err = s.procedure(s.data, .Flush, nil, 0, nil)
}
- // Instead of .Empty, .None is fine in this case
- return .None
+ return
}
// size returns the size of the stream. If the stream does not support querying its size, 0 will be returned.
-size :: proc(s: Stream) -> i64 {
- if s.stream_vtable == nil {
- return 0
+size :: proc(s: Stream) -> (n: i64, err: Error) {
+ if s.procedure != nil {
+ n, err = s.procedure(s.data, .Size, nil, 0, nil)
+ if err == .Empty {
+ n = 0
+ curr := seek(s, 0, .Current) or_return
+ end := seek(s, 0, .End) or_return
+ seek(s, curr, .Start) or_return
+ n = end
+ }
+ } else {
+ err = .Empty
}
- if s.impl_size != nil {
- return s->impl_size()
- }
- if s.impl_seek == nil {
- return 0
- }
-
- curr, end: i64
- err: Error
- if curr, err = s->impl_seek(0, .Current); err != nil {
- return 0
- }
-
- if end, err = s->impl_seek(0, .End); err != nil {
- return 0
- }
-
- if _, err = s->impl_seek(curr, .Start); err != nil {
- return 0
- }
-
- return end
+ return
}
@@ -256,29 +213,24 @@ size :: proc(s: Stream) -> i64 {
//
// If n == len(p), err may be either nil or .EOF
read_at :: proc(r: Reader_At, p: []byte, offset: i64, n_read: ^int = nil) -> (n: int, err: Error) {
- defer if n_read != nil {
- n_read^ += n
- }
-
- if r.stream_vtable == nil {
- return 0, .Empty
- }
- if r.impl_read_at != nil {
- return r->impl_read_at(p, offset)
- }
- if r.impl_seek == nil || r.impl_read == nil {
- return 0, .Empty
- }
-
- curr_offset := r->impl_seek(offset, .Current) or_return
-
- n, err = r->impl_read(p)
- _, err1 := r->impl_seek(curr_offset, .Start)
- if err1 != nil && err == nil {
- err = err1
+ if r.procedure != nil {
+ n64: i64
+ n64, err = r.procedure(r.data, .Read_At, p, offset, nil)
+ if err != .Empty {
+ n = int(n64)
+ } else {
+ curr := seek(r, offset, .Current) or_return
+ n, err = read(r, p)
+ _, err1 := seek(r, curr, .Start)
+ if err1 != nil && err == nil {
+ err = err1
+ }
+ }
+ if n_read != nil { n_read^ += n }
+ } else {
+ err = .Empty
}
return
-
}
// write_at writes len(p) bytes into p starting with the provided offset in the underlying Writer_At stream w.
@@ -287,97 +239,39 @@ read_at :: proc(r: Reader_At, p: []byte, offset: i64, n_read: ^int = nil) -> (n:
// If write_at is writing to a Writer_At which has a seek offset, then write_at should not affect the underlying
// seek offset.
write_at :: proc(w: Writer_At, p: []byte, offset: i64, n_written: ^int = nil) -> (n: int, err: Error) {
- defer if n_written != nil {
- n_written^ += n
- }
-
- if w.stream_vtable == nil {
- return 0, .Empty
- }
- if w.impl_write_at != nil {
- return w->impl_write_at(p, offset)
- }
- if w.impl_seek == nil || w.impl_write == nil {
- return 0, .Empty
- }
-
- curr_offset: i64
- curr_offset, err = w->impl_seek(offset, .Current)
- if err != nil {
- return 0, err
- }
-
- n, err = w->impl_write(p)
- _, err1 := w->impl_seek(curr_offset, .Start)
- if err1 != nil && err == nil {
- err = err1
+ if w.procedure != nil {
+ n64: i64
+ n64, err = w.procedure(w.data, .Write_At, p, offset, nil)
+ if err != .Empty {
+ n = int(n64)
+ } else {
+ curr := seek(w, offset, .Current) or_return
+ n, err = write(w, p)
+ _, err1 := seek(w, curr, .Start)
+ if err1 != nil && err == nil {
+ err = err1
+ }
+ }
+ if n_written != nil { n_written^ += n }
+ } else {
+ err = .Empty
}
return
}
-write_to :: proc(r: Writer_To, w: Writer) -> (n: i64, err: Error) {
- if r.stream_vtable == nil || w.stream_vtable == nil {
- return 0, .Empty
- }
- if r.impl_write_to != nil {
- return r->impl_write_to(w)
- }
- return 0, .Empty
-}
-read_from :: proc(w: Reader_From, r: Reader) -> (n: i64, err: Error) {
- if r.stream_vtable == nil || w.stream_vtable == nil {
- return 0, .Empty
- }
- if r.impl_read_from != nil {
- return w->impl_read_from(r)
- }
- return 0, .Empty
-}
-
-
// read_byte reads and returns the next byte from r.
read_byte :: proc(r: Reader, n_read: ^int = nil) -> (b: byte, err: Error) {
- defer if err == nil && n_read != nil {
- n_read^ += 1
- }
-
- if r.stream_vtable == nil {
- return 0, .Empty
- }
- if r.impl_read_byte != nil {
- return r->impl_read_byte()
- }
- if r.impl_read == nil {
- return 0, .Empty
- }
-
buf: [1]byte
- _, err = r->impl_read(buf[:])
- return buf[0], err
+ _, err = read(r, buf[:], n_read)
+ b = buf[0]
+ return
}
write_byte :: proc(w: Writer, c: byte, n_written: ^int = nil) -> Error {
- return _write_byte(auto_cast w, c, n_written)
-}
-
-@(private)
-_write_byte :: proc(w: Writer, c: byte, n_written: ^int = nil) -> (err: Error) {
- defer if err == nil && n_written != nil {
- n_written^ += 1
- }
- if w.stream_vtable == nil {
- return .Empty
- }
- if w.impl_write_byte != nil {
- return w->impl_write_byte(c)
- }
- if w.impl_write == nil {
- return .Empty
- }
-
- b := [1]byte{c}
- _, err = w->impl_write(b[:])
- return err
+ buf: [1]byte
+ buf[0] = c
+ write(w, buf[:], n_written) or_return
+ return nil
}
// read_rune reads a single UTF-8 encoded Unicode codepoint and returns the rune and its size in bytes.
@@ -385,19 +279,9 @@ read_rune :: proc(br: Reader, n_read: ^int = nil) -> (ch: rune, size: int, err:
defer if err == nil && n_read != nil {
n_read^ += size
}
- if br.stream_vtable == nil {
- return 0, 0, .Empty
- }
- if br.impl_read_rune != nil {
- return br->impl_read_rune()
- }
- if br.impl_read == nil {
- return 0, 0, .Empty
- }
b: [utf8.UTF_MAX]byte
- _, err = br->impl_read(b[:1])
-
+ _, err = read(br, b[:1])
s0 := b[0]
ch = rune(s0)
@@ -415,7 +299,7 @@ read_rune :: proc(br: Reader, n_read: ^int = nil) -> (ch: rune, size: int, err:
return
}
sz := int(x&7)
- size, err = br->impl_read(b[1:sz])
+ size, err = read(br, b[1:sz])
if err != nil || size+1 < sz {
ch = utf8.RUNE_ERROR
return
@@ -425,28 +309,6 @@ read_rune :: proc(br: Reader, n_read: ^int = nil) -> (ch: rune, size: int, err:
return
}
-unread_byte :: proc(s: Stream) -> Error {
- if s.stream_vtable == nil {
- return .Empty
- }
- if s.impl_unread_byte != nil {
- return s->impl_unread_byte()
- }
- if s.impl_seek != nil {
- _, err := s->impl_seek(-1, .Current)
- return err
- }
-
- return .Empty
-}
-unread_rune :: proc(s: Writer) -> Error {
- if s.stream_vtable != nil && s.impl_unread_rune != nil {
- return s->impl_unread_rune()
- }
- return .Empty
-}
-
-
// write_string writes the contents of the string s to w.
write_string :: proc(s: Writer, str: string, n_written: ^int = nil) -> (n: int, err: Error) {
return write(s, transmute([]byte)str, n_written)
@@ -457,14 +319,6 @@ write_rune :: proc(s: Writer, r: rune, n_written: ^int = nil) -> (size: int, err
defer if err == nil && n_written != nil {
n_written^ += size
}
-
- if s.stream_vtable == nil {
- return 0, .Empty
- }
- if s.impl_write_rune != nil {
- return s->impl_write_rune(r)
- }
-
if r < utf8.RUNE_SELF {
err = write_byte(s, byte(r))
if err == nil {
@@ -542,21 +396,15 @@ copy_n :: proc(dst: Writer, src: Reader, n: i64) -> (written: i64, err: Error) {
@(private)
_copy_buffer :: proc(dst: Writer, src: Reader, buf: []byte) -> (written: i64, err: Error) {
- if dst.stream_vtable == nil || src.stream_vtable == nil {
+ if dst.procedure == nil || src.procedure == nil {
return 0, .Empty
}
- if src.impl_write_to != nil {
- return src->impl_write_to(dst)
- }
- if src.impl_read_from != nil {
- return dst->impl_read_from(src)
- }
buf := buf
if buf == nil {
DEFAULT_SIZE :: 4 * 1024
size := DEFAULT_SIZE
- if src.stream_vtable == _limited_reader_vtable {
- l := (^Limited_Reader)(src.stream_data)
+ if src.procedure == _limited_reader_proc {
+ l := (^Limited_Reader)(src.data)
if i64(size) > l.n {
if l.n < 1 {
size = 1
diff --git a/core/io/multi.odin b/core/io/multi.odin
index 64c533e37..e85114a7a 100644
--- a/core/io/multi.odin
+++ b/core/io/multi.odin
@@ -5,33 +5,37 @@ Multi_Reader :: struct {
}
@(private)
-_multi_reader_vtable := &Stream_VTable{
- impl_read = proc(s: Stream, p: []byte) -> (n: int, err: Error) {
- mr := (^Multi_Reader)(s.stream_data)
- for len(mr.readers) > 0 {
- r := mr.readers[0]
- n, err = read(r, p)
- if err == .EOF {
- ordered_remove(&mr.readers, 0)
- }
- if n > 0 || err != .EOF {
- if err == .EOF && len(mr.readers) > 0 {
- // Don't return EOF yet, more readers remain
- err = nil
- }
- return
- }
+_multi_reader_proc :: proc(stream_data: rawptr, mode: Stream_Mode, p: []byte, offset: i64, whence: Seek_From) -> (n: i64, err: Error) {
+ if mode == .Query {
+ return query_utility({.Read, .Query})
+ } else if mode != .Read {
+ return 0, .Empty
+ }
+ mr := (^Multi_Reader)(stream_data)
+ for len(mr.readers) > 0 {
+ r := mr.readers[0]
+ n, err = _i64_err(read(r, p))
+ if err == .EOF {
+ ordered_remove(&mr.readers, 0)
}
- return 0, .EOF
- },
+ if n > 0 || err != .EOF {
+ if err == .EOF && len(mr.readers) > 0 {
+ // Don't return EOF yet, more readers remain
+ err = nil
+ }
+ return
+ }
+ }
+ return 0, .EOF
}
+
multi_reader_init :: proc(mr: ^Multi_Reader, readers: ..Reader, allocator := context.allocator) -> (r: Reader) {
all_readers := make([dynamic]Reader, 0, len(readers), allocator)
for w in readers {
- if w.stream_vtable == _multi_reader_vtable {
- other := (^Multi_Reader)(w.stream_data)
+ if w.procedure == _multi_reader_proc {
+ other := (^Multi_Reader)(w.data)
append(&all_readers, ..other.readers[:])
} else {
append(&all_readers, w)
@@ -40,8 +44,8 @@ multi_reader_init :: proc(mr: ^Multi_Reader, readers: ..Reader, allocator := con
mr.readers = all_readers
- r.stream_vtable = _multi_reader_vtable
- r.stream_data = mr
+ r.procedure = _multi_reader_proc
+ r.data = mr
return
}
@@ -55,38 +59,42 @@ Multi_Writer :: struct {
}
@(private)
-_multi_writer_vtable := &Stream_VTable{
- impl_write = proc(s: Stream, p: []byte) -> (n: int, err: Error) {
- mw := (^Multi_Writer)(s.stream_data)
- for w in mw.writers {
- n, err = write(w, p)
- if err != nil {
- return
- }
- if n != len(p) {
- err = .Short_Write
- return
- }
+_multi_writer_proc :: proc(stream_data: rawptr, mode: Stream_Mode, p: []byte, offset: i64, whence: Seek_From) -> (n: i64, err: Error) {
+ if mode == .Query {
+ return query_utility({.Write, .Query})
+ } else if mode != .Write {
+ return 0, .Empty
+ }
+ mw := (^Multi_Writer)(stream_data)
+ for w in mw.writers {
+ n, err = _i64_err(write(w, p))
+ if err != nil {
+ return
}
+ if n != i64(len(p)) {
+ err = .Short_Write
+ return
+ }
+ }
- return len(p), nil
- },
+ return i64(len(p)), nil
}
+
multi_writer_init :: proc(mw: ^Multi_Writer, writers: ..Writer, allocator := context.allocator) -> (out: Writer) {
mw.writers = make([dynamic]Writer, 0, len(writers), allocator)
for w in writers {
- if w.stream_vtable == _multi_writer_vtable {
- other := (^Multi_Writer)(w.stream_data)
+ if w.procedure == _multi_writer_proc {
+ other := (^Multi_Writer)(w.data)
append(&mw.writers, ..other.writers[:])
} else {
append(&mw.writers, w)
}
}
- out.stream_vtable = _multi_writer_vtable
- out.stream_data = mw
+ out.procedure = _multi_writer_proc
+ out.data = mw
return
}
diff --git a/core/io/util.odin b/core/io/util.odin
index cfd7d3608..c77d0be9d 100644
--- a/core/io/util.odin
+++ b/core/io/util.odin
@@ -292,17 +292,21 @@ Tee_Reader :: struct {
}
@(private)
-_tee_reader_vtable := &Stream_VTable{
- impl_read = proc(s: Stream, p: []byte) -> (n: int, err: Error) {
- t := (^Tee_Reader)(s.stream_data)
- n, err = read(t.r, p)
+_tee_reader_proc :: proc(stream_data: rawptr, mode: Stream_Mode, p: []byte, offset: i64, whence: Seek_From) -> (n: i64, err: Error) {
+ t := (^Tee_Reader)(stream_data)
+ #partial switch mode {
+ case .Read:
+ n, err = _i64_err(read(t.r, p))
if n > 0 {
if wn, werr := write(t.w, p[:n]); werr != nil {
- return wn, werr
+ return i64(wn), werr
}
}
return
- },
+ case .Query:
+ return query_utility({.Read, .Query})
+ }
+ return 0, .Empty
}
// tee_reader_init returns a Reader that writes to 'w' what it reads from 'r'
@@ -317,8 +321,8 @@ tee_reader_init :: proc(t: ^Tee_Reader, r: Reader, w: Writer, allocator := conte
}
tee_reader_to_reader :: proc(t: ^Tee_Reader) -> (r: Reader) {
- r.stream_data = t
- r.stream_vtable = _tee_reader_vtable
+ r.data = t
+ r.procedure = _tee_reader_proc
return
}
@@ -332,9 +336,10 @@ Limited_Reader :: struct {
}
@(private)
-_limited_reader_vtable := &Stream_VTable{
- impl_read = proc(s: Stream, p: []byte) -> (n: int, err: Error) {
- l := (^Limited_Reader)(s.stream_data)
+_limited_reader_proc :: proc(stream_data: rawptr, mode: Stream_Mode, p: []byte, offset: i64, whence: Seek_From) -> (n: i64, err: Error) {
+ l := (^Limited_Reader)(stream_data)
+ #partial switch mode {
+ case .Read:
if l.n <= 0 {
return 0, .EOF
}
@@ -342,10 +347,13 @@ _limited_reader_vtable := &Stream_VTable{
if i64(len(p)) > l.n {
p = p[0:l.n]
}
- n, err = read(l.r, p)
+ n, err = _i64_err(read(l.r, p))
l.n -= i64(n)
return
- },
+ case .Query:
+ return query_utility({.Read, .Query})
+ }
+ return 0, .Empty
}
limited_reader_init :: proc(l: ^Limited_Reader, r: Reader, n: i64) -> Reader {
@@ -355,8 +363,8 @@ limited_reader_init :: proc(l: ^Limited_Reader, r: Reader, n: i64) -> Reader {
}
limited_reader_to_reader :: proc(l: ^Limited_Reader) -> (r: Reader) {
- r.stream_vtable = _limited_reader_vtable
- r.stream_data = l
+ r.procedure = _limited_reader_proc
+ r.data = l
return
}
@@ -375,15 +383,16 @@ section_reader_init :: proc(s: ^Section_Reader, r: Reader_At, off: i64, n: i64)
return
}
section_reader_to_stream :: proc(s: ^Section_Reader) -> (out: Stream) {
- out.stream_data = s
- out.stream_vtable = _section_reader_vtable
+ out.data = s
+ out.procedure = _section_reader_proc
return
}
@(private)
-_section_reader_vtable := &Stream_VTable{
- impl_read = proc(stream: Stream, p: []byte) -> (n: int, err: Error) {
- s := (^Section_Reader)(stream.stream_data)
+_section_reader_proc :: proc(stream_data: rawptr, mode: Stream_Mode, p: []byte, offset: i64, whence: Seek_From) -> (n: i64, err: Error) {
+ s := (^Section_Reader)(stream_data)
+ #partial switch mode {
+ case .Read:
if s.off >= s.limit {
return 0, .EOF
}
@@ -391,13 +400,11 @@ _section_reader_vtable := &Stream_VTable{
if max := s.limit - s.off; i64(len(p)) > max {
p = p[0:max]
}
- n, err = read_at(s.r, p, s.off)
+ n, err = _i64_err(read_at(s.r, p, s.off))
s.off += i64(n)
return
- },
- impl_read_at = proc(stream: Stream, p: []byte, off: i64) -> (n: int, err: Error) {
- s := (^Section_Reader)(stream.stream_data)
- p, off := p, off
+ case .Read_At:
+ p, off := p, offset
if off < 0 || off >= s.limit - s.base {
return 0, .EOF
@@ -405,17 +412,15 @@ _section_reader_vtable := &Stream_VTable{
off += s.base
if max := s.limit - off; i64(len(p)) > max {
p = p[0:max]
- n, err = read_at(s.r, p, off)
+ n, err = _i64_err(read_at(s.r, p, off))
if err == nil {
err = .EOF
}
return
}
- return read_at(s.r, p, off)
- },
- impl_seek = proc(stream: Stream, offset: i64, whence: Seek_From) -> (n: i64, err: Error) {
- s := (^Section_Reader)(stream.stream_data)
+ return _i64_err(read_at(s.r, p, off))
+ case .Seek:
offset := offset
switch whence {
case:
@@ -433,10 +438,12 @@ _section_reader_vtable := &Stream_VTable{
s.off = offset
n = offset - s.base
return
- },
- impl_size = proc(stream: Stream) -> i64 {
- s := (^Section_Reader)(stream.stream_data)
- return s.limit - s.base
- },
-}
+ case .Size:
+ n = s.limit - s.base
+ return
+ case .Query:
+ return query_utility({.Read, .Read_At, .Seek, .Size, .Query})
+ }
+ return 0, nil
+}
diff --git a/core/log/log.odin b/core/log/log.odin
index a699247b8..f3554791b 100644
--- a/core/log/log.odin
+++ b/core/log/log.odin
@@ -76,43 +76,43 @@ nil_logger :: proc() -> Logger {
}
debugf :: proc(fmt_str: string, args: ..any, location := #caller_location) {
- logf(level=.Debug, fmt_str=fmt_str, args=args, location=location)
+ logf(.Debug, fmt_str, ..args, location=location)
}
infof :: proc(fmt_str: string, args: ..any, location := #caller_location) {
- logf(level=.Info, fmt_str=fmt_str, args=args, location=location)
+ logf(.Info, fmt_str, ..args, location=location)
}
warnf :: proc(fmt_str: string, args: ..any, location := #caller_location) {
- logf(level=.Warning, fmt_str=fmt_str, args=args, location=location)
+ logf(.Warning, fmt_str, ..args, location=location)
}
errorf :: proc(fmt_str: string, args: ..any, location := #caller_location) {
- logf(level=.Error, fmt_str=fmt_str, args=args, location=location)
+ logf(.Error, fmt_str, ..args, location=location)
}
fatalf :: proc(fmt_str: string, args: ..any, location := #caller_location) {
- logf(level=.Fatal, fmt_str=fmt_str, args=args, location=location)
+ logf(.Fatal, fmt_str, ..args, location=location)
}
debug :: proc(args: ..any, sep := " ", location := #caller_location) {
- log(level=.Debug, args=args, sep=sep, location=location)
+ log(.Debug, ..args, sep=sep, location=location)
}
info :: proc(args: ..any, sep := " ", location := #caller_location) {
- log(level=.Info, args=args, sep=sep, location=location)
+ log(.Info, ..args, sep=sep, location=location)
}
warn :: proc(args: ..any, sep := " ", location := #caller_location) {
- log(level=.Warning, args=args, sep=sep, location=location)
+ log(.Warning, ..args, sep=sep, location=location)
}
error :: proc(args: ..any, sep := " ", location := #caller_location) {
- log(level=.Error, args=args, sep=sep, location=location)
+ log(.Error, ..args, sep=sep, location=location)
}
fatal :: proc(args: ..any, sep := " ", location := #caller_location) {
- log(level=.Fatal, args=args, sep=sep, location=location)
+ log(.Fatal, ..args, sep=sep, location=location)
}
panic :: proc(args: ..any, location := #caller_location) -> ! {
- log(level=.Fatal, args=args, location=location)
+ log(.Fatal, ..args, location=location)
runtime.panic("log.panic", location)
}
panicf :: proc(fmt_str: string, args: ..any, location := #caller_location) -> ! {
- logf(level=.Fatal, fmt_str=fmt_str, args=args, location=location)
+ logf(.Fatal, fmt_str, ..args, location=location)
runtime.panic("log.panicf", location)
}
@@ -127,7 +127,7 @@ log :: proc(level: Level, args: ..any, sep := " ", location := #caller_location)
if level < logger.lowest_level {
return
}
- str := fmt.tprint(args=args, sep=sep) //NOTE(Hoej): While tprint isn't thread-safe, no logging is.
+ str := fmt.tprint(..args, sep=sep) //NOTE(Hoej): While tprint isn't thread-safe, no logging is.
logger.procedure(logger.data, level, str, logger.options, location)
}
diff --git a/core/log/log_allocator.odin b/core/log/log_allocator.odin
index f4d1841db..934f0d643 100644
--- a/core/log/log_allocator.odin
+++ b/core/log/log_allocator.odin
@@ -38,60 +38,60 @@ log_allocator_proc :: proc(allocator_data: rawptr, mode: runtime.Allocator_Mode,
switch mode {
case .Alloc:
logf(
- level=la.level,
- fmt_str = "%s%s>>> ALLOCATOR(mode=.Alloc, size=%d, alignment=%d)",
- args = {la.prefix, padding, size, alignment},
+ la.level,
+ "%s%s>>> ALLOCATOR(mode=.Alloc, size=%d, alignment=%d)",
+ la.prefix, padding, size, alignment,
location = location,
)
case .Alloc_Non_Zeroed:
logf(
- level=la.level,
- fmt_str = "%s%s>>> ALLOCATOR(mode=.Alloc_Non_Zeroed, size=%d, alignment=%d)",
- args = {la.prefix, padding, size, alignment},
+ la.level,
+ "%s%s>>> ALLOCATOR(mode=.Alloc_Non_Zeroed, size=%d, alignment=%d)",
+ la.prefix, padding, size, alignment,
location = location,
)
case .Free:
if old_size != 0 {
logf(
- level=la.level,
- fmt_str = "%s%s<<< ALLOCATOR(mode=.Free, ptr=%p, size=%d)",
- args = {la.prefix, padding, old_memory, old_size},
+ la.level,
+ "%s%s<<< ALLOCATOR(mode=.Free, ptr=%p, size=%d)",
+ la.prefix, padding, old_memory, old_size,
location = location,
)
} else {
logf(
- level=la.level,
- fmt_str = "%s%s<<< ALLOCATOR(mode=.Free, ptr=%p)",
- args = {la.prefix, padding, old_memory},
+ la.level,
+ "%s%s<<< ALLOCATOR(mode=.Free, ptr=%p)",
+ la.prefix, padding, old_memory,
location = location,
)
}
case .Free_All:
logf(
- level=la.level,
- fmt_str = "%s%s<<< ALLOCATOR(mode=.Free_All)",
- args = {la.prefix, padding},
+ la.level,
+ "%s%s<<< ALLOCATOR(mode=.Free_All)",
+ la.prefix, padding,
location = location,
)
case .Resize:
logf(
- level=la.level,
- fmt_str = "%s%s>>> ALLOCATOR(mode=.Resize, ptr=%p, old_size=%d, size=%d, alignment=%d)",
- args = {la.prefix, padding, old_memory, old_size, size, alignment},
+ la.level,
+ "%s%s>>> ALLOCATOR(mode=.Resize, ptr=%p, old_size=%d, size=%d, alignment=%d)",
+ la.prefix, padding, old_memory, old_size, size, alignment,
location = location,
)
case .Query_Features:
logf(
- level=la.level,
- fmt_str = "%s%ALLOCATOR(mode=.Query_Features)",
- args = {la.prefix, padding},
+ la.level,
+ "%s%ALLOCATOR(mode=.Query_Features)",
+ la.prefix, padding,
location = location,
)
case .Query_Info:
logf(
- level=la.level,
- fmt_str = "%s%ALLOCATOR(mode=.Query_Info)",
- args = {la.prefix, padding},
+ la.level,
+ "%s%ALLOCATOR(mode=.Query_Info)",
+ la.prefix, padding,
location = location,
)
}
@@ -103,9 +103,9 @@ log_allocator_proc :: proc(allocator_data: rawptr, mode: runtime.Allocator_Mode,
defer la.locked = false
if err != nil {
logf(
- level=la.level,
- fmt_str = "%s%ALLOCATOR ERROR=%v",
- args = {la.prefix, padding, error},
+ la.level,
+ "%s%ALLOCATOR ERROR=%v",
+ la.prefix, padding, error,
location = location,
)
}
diff --git a/core/math/big/helpers.odin b/core/math/big/helpers.odin
index 6c4b5dd01..a4313a244 100644
--- a/core/math/big/helpers.odin
+++ b/core/math/big/helpers.odin
@@ -19,7 +19,7 @@ import rnd "core:math/rand"
int_destroy :: proc(integers: ..^Int) {
integers := integers
- for a in &integers {
+ for a in integers {
assert_if_nil(a)
}
#force_inline internal_int_destroy(..integers)
@@ -408,7 +408,7 @@ clear_if_uninitialized_multi :: proc(args: ..^Int, allocator := context.allocato
args := args
assert_if_nil(..args)
- for i in &args {
+ for i in args {
#force_inline internal_clear_if_uninitialized_single(i, allocator) or_return
}
return err
@@ -435,7 +435,7 @@ int_init_multi :: proc(integers: ..^Int, allocator := context.allocator) -> (err
assert_if_nil(..integers)
integers := integers
- for a in &integers {
+ for a in integers {
#force_inline internal_clear(a, true, allocator) or_return
}
return nil
diff --git a/core/math/big/internal.odin b/core/math/big/internal.odin
index 13aa96bef..968a26f8f 100644
--- a/core/math/big/internal.odin
+++ b/core/math/big/internal.odin
@@ -1857,7 +1857,7 @@ internal_root_n :: proc { internal_int_root_n, }
internal_int_destroy :: proc(integers: ..^Int) {
integers := integers
- for a in &integers {
+ for &a in integers {
if internal_int_allocated_cap(a) > 0 {
mem.zero_slice(a.digit[:])
free(&a.digit[0])
@@ -2909,7 +2909,7 @@ internal_int_init_multi :: proc(integers: ..^Int, allocator := context.allocator
context.allocator = allocator
integers := integers
- for a in &integers {
+ for a in integers {
internal_clear(a) or_return
}
return nil
diff --git a/core/math/big/radix.odin b/core/math/big/radix.odin
index 2b758dc35..d15ce0e98 100644
--- a/core/math/big/radix.odin
+++ b/core/math/big/radix.odin
@@ -429,7 +429,7 @@ internal_int_write_to_ascii_file :: proc(a: ^Int, filename: string, radix := i8(
len = l,
}
- ok := os.write_entire_file(name=filename, data=data, truncate=true)
+ ok := os.write_entire_file(filename, data, truncate=true)
return nil if ok else .Cannot_Write_File
}
diff --git a/core/math/big/rat.odin b/core/math/big/rat.odin
index c3efc30aa..35618affb 100644
--- a/core/math/big/rat.odin
+++ b/core/math/big/rat.odin
@@ -137,7 +137,7 @@ rat_copy :: proc(dst, src: ^Rat, minimize := false, allocator := context.allocat
internal_rat_destroy :: proc(rationals: ..^Rat) {
rationals := rationals
- for z in &rationals {
+ for &z in rationals {
internal_int_destroy(&z.a, &z.b)
}
}
diff --git a/core/math/cmplx/cmplx.odin b/core/math/cmplx/cmplx.odin
new file mode 100644
index 000000000..c029be30c
--- /dev/null
+++ b/core/math/cmplx/cmplx.odin
@@ -0,0 +1,513 @@
+package math_cmplx
+
+import "core:builtin"
+import "core:math"
+
+// The original C code, the long comment, and the constants
+// below are from http://netlib.sandia.gov/cephes/c9x-complex/clog.c.
+// The go code is a simplified version of the original C.
+//
+// Cephes Math Library Release 2.8: June, 2000
+// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
+//
+// The readme file at http://netlib.sandia.gov/cephes/ says:
+// Some software in this archive may be from the book _Methods and
+// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
+// International, 1989) or from the Cephes Mathematical Library, a
+// commercial product. In either event, it is copyrighted by the author.
+// What you see here may be used freely but it comes with no support or
+// guarantee.
+//
+// The two known misprints in the book are repaired here in the
+// source listings for the gamma function and the incomplete beta
+// integral.
+//
+// Stephen L. Moshier
+// moshier@na-net.ornl.gov
+
+abs :: builtin.abs
+conj :: builtin.conj
+real :: builtin.real
+imag :: builtin.imag
+jmag :: builtin.jmag
+kmag :: builtin.kmag
+
+
+sin :: proc{
+ sin_complex128,
+}
+cos :: proc{
+ cos_complex128,
+}
+tan :: proc{
+ tan_complex128,
+}
+cot :: proc{
+ cot_complex128,
+}
+
+
+sinh :: proc{
+ sinh_complex128,
+}
+cosh :: proc{
+ cosh_complex128,
+}
+tanh :: proc{
+ tanh_complex128,
+}
+
+
+
+// sqrt returns the square root of x.
+// The result r is chosen so that real(r) ā„ 0 and imag(r) has the same sign as imag(x).
+sqrt :: proc{
+ sqrt_complex32,
+ sqrt_complex64,
+ sqrt_complex128,
+}
+ln :: proc{
+ ln_complex32,
+ ln_complex64,
+ ln_complex128,
+}
+log10 :: proc{
+ log10_complex32,
+ log10_complex64,
+ log10_complex128,
+}
+
+exp :: proc{
+ exp_complex32,
+ exp_complex64,
+ exp_complex128,
+}
+
+pow :: proc{
+ pow_complex32,
+ pow_complex64,
+ pow_complex128,
+}
+
+phase :: proc{
+ phase_complex32,
+ phase_complex64,
+ phase_complex128,
+}
+
+polar :: proc{
+ polar_complex32,
+ polar_complex64,
+ polar_complex128,
+}
+
+is_inf :: proc{
+ is_inf_complex32,
+ is_inf_complex64,
+ is_inf_complex128,
+}
+
+is_nan :: proc{
+ is_nan_complex32,
+ is_nan_complex64,
+ is_nan_complex128,
+}
+
+
+
+// sqrt_complex32 returns the square root of x.
+// The result r is chosen so that real(r) ā„ 0 and imag(r) has the same sign as imag(x).
+sqrt_complex32 :: proc "contextless" (x: complex32) -> complex32 {
+ return complex32(sqrt_complex128(complex128(x)))
+}
+
+// sqrt_complex64 returns the square root of x.
+// The result r is chosen so that real(r) ā„ 0 and imag(r) has the same sign as imag(x).
+sqrt_complex64 :: proc "contextless" (x: complex64) -> complex64 {
+ return complex64(sqrt_complex128(complex128(x)))
+}
+
+
+// sqrt_complex128 returns the square root of x.
+// The result r is chosen so that real(r) ā„ 0 and imag(r) has the same sign as imag(x).
+sqrt_complex128 :: proc "contextless" (x: complex128) -> complex128 {
+ // The original C code, the long comment, and the constants
+ // below are from http://netlib.sandia.gov/cephes/c9x-complex/clog.c.
+ // The go code is a simplified version of the original C.
+ //
+ // Cephes Math Library Release 2.8: June, 2000
+ // Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
+ //
+ // The readme file at http://netlib.sandia.gov/cephes/ says:
+ // Some software in this archive may be from the book _Methods and
+ // Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
+ // International, 1989) or from the Cephes Mathematical Library, a
+ // commercial product. In either event, it is copyrighted by the author.
+ // What you see here may be used freely but it comes with no support or
+ // guarantee.
+ //
+ // The two known misprints in the book are repaired here in the
+ // source listings for the gamma function and the incomplete beta
+ // integral.
+ //
+ // Stephen L. Moshier
+ // moshier@na-net.ornl.gov
+
+ // Complex square root
+ //
+ // DESCRIPTION:
+ //
+ // If z = x + iy, r = |z|, then
+ //
+ // 1/2
+ // Re w = [ (r + x)/2 ] ,
+ //
+ // 1/2
+ // Im w = [ (r - x)/2 ] .
+ //
+ // Cancellation error in r-x or r+x is avoided by using the
+ // identity 2 Re w Im w = y.
+ //
+ // Note that -w is also a square root of z. The root chosen
+ // is always in the right half plane and Im w has the same sign as y.
+ //
+ // ACCURACY:
+ //
+ // Relative error:
+ // arithmetic domain # trials peak rms
+ // DEC -10,+10 25000 3.2e-17 9.6e-18
+ // IEEE -10,+10 1,000,000 2.9e-16 6.1e-17
+
+ if imag(x) == 0 {
+ // Ensure that imag(r) has the same sign as imag(x) for imag(x) == signed zero.
+ if real(x) == 0 {
+ return complex(0, imag(x))
+ }
+ if real(x) < 0 {
+ return complex(0, math.copy_sign(math.sqrt(-real(x)), imag(x)))
+ }
+ return complex(math.sqrt(real(x)), imag(x))
+ } else if math.is_inf(imag(x), 0) {
+ return complex(math.inf_f64(1.0), imag(x))
+ }
+ if real(x) == 0 {
+ if imag(x) < 0 {
+ r := math.sqrt(-0.5 * imag(x))
+ return complex(r, -r)
+ }
+ r := math.sqrt(0.5 * imag(x))
+ return complex(r, r)
+ }
+ a := real(x)
+ b := imag(x)
+ scale: f64
+ // Rescale to avoid internal overflow or underflow.
+ if abs(a) > 4 || abs(b) > 4 {
+ a *= 0.25
+ b *= 0.25
+ scale = 2
+ } else {
+ a *= 1.8014398509481984e16 // 2**54
+ b *= 1.8014398509481984e16
+ scale = 7.450580596923828125e-9 // 2**-27
+ }
+ r := math.hypot(a, b)
+ t: f64
+ if a > 0 {
+ t = math.sqrt(0.5*r + 0.5*a)
+ r = scale * abs((0.5*b)/t)
+ t *= scale
+ } else {
+ r = math.sqrt(0.5*r - 0.5*a)
+ t = scale * abs((0.5*b)/r)
+ r *= scale
+ }
+ if b < 0 {
+ return complex(t, -r)
+ }
+ return complex(t, r)
+}
+
+ln_complex32 :: proc "contextless" (x: complex32) -> complex32 {
+ return complex(math.ln(abs(x)), phase(x))
+}
+ln_complex64 :: proc "contextless" (x: complex64) -> complex64 {
+ return complex(math.ln(abs(x)), phase(x))
+}
+ln_complex128 :: proc "contextless" (x: complex128) -> complex128 {
+ return complex(math.ln(abs(x)), phase(x))
+}
+
+
+exp_complex32 :: proc "contextless" (x: complex32) -> complex32 {
+ switch re, im := real(x), imag(x); {
+ case math.is_inf(re, 0):
+ switch {
+ case re > 0 && im == 0:
+ return x
+ case math.is_inf(im, 0) || math.is_nan(im):
+ if re < 0 {
+ return complex(0, math.copy_sign(0, im))
+ } else {
+ return complex(math.inf_f64(1.0), math.nan_f64())
+ }
+ }
+ case math.is_nan(re):
+ if im == 0 {
+ return complex(math.nan_f16(), im)
+ }
+ }
+ r := math.exp(real(x))
+ s, c := math.sincos(imag(x))
+ return complex(r*c, r*s)
+}
+exp_complex64 :: proc "contextless" (x: complex64) -> complex64 {
+ switch re, im := real(x), imag(x); {
+ case math.is_inf(re, 0):
+ switch {
+ case re > 0 && im == 0:
+ return x
+ case math.is_inf(im, 0) || math.is_nan(im):
+ if re < 0 {
+ return complex(0, math.copy_sign(0, im))
+ } else {
+ return complex(math.inf_f64(1.0), math.nan_f64())
+ }
+ }
+ case math.is_nan(re):
+ if im == 0 {
+ return complex(math.nan_f32(), im)
+ }
+ }
+ r := math.exp(real(x))
+ s, c := math.sincos(imag(x))
+ return complex(r*c, r*s)
+}
+exp_complex128 :: proc "contextless" (x: complex128) -> complex128 {
+ switch re, im := real(x), imag(x); {
+ case math.is_inf(re, 0):
+ switch {
+ case re > 0 && im == 0:
+ return x
+ case math.is_inf(im, 0) || math.is_nan(im):
+ if re < 0 {
+ return complex(0, math.copy_sign(0, im))
+ } else {
+ return complex(math.inf_f64(1.0), math.nan_f64())
+ }
+ }
+ case math.is_nan(re):
+ if im == 0 {
+ return complex(math.nan_f64(), im)
+ }
+ }
+ r := math.exp(real(x))
+ s, c := math.sincos(imag(x))
+ return complex(r*c, r*s)
+}
+
+
+pow_complex32 :: proc "contextless" (x, y: complex32) -> complex32 {
+ if x == 0 { // Guaranteed also true for x == -0.
+ if is_nan(y) {
+ return nan_complex32()
+ }
+ r, i := real(y), imag(y)
+ switch {
+ case r == 0:
+ return 1
+ case r < 0:
+ if i == 0 {
+ return complex(math.inf_f16(1), 0)
+ }
+ return inf_complex32()
+ case r > 0:
+ return 0
+ }
+ unreachable()
+ }
+ modulus := abs(x)
+ if modulus == 0 {
+ return complex(0, 0)
+ }
+ r := math.pow(modulus, real(y))
+ arg := phase(x)
+ theta := real(y) * arg
+ if imag(y) != 0 {
+ r *= math.exp(-imag(y) * arg)
+ theta += imag(y) * math.ln(modulus)
+ }
+ s, c := math.sincos(theta)
+ return complex(r*c, r*s)
+}
+pow_complex64 :: proc "contextless" (x, y: complex64) -> complex64 {
+ if x == 0 { // Guaranteed also true for x == -0.
+ if is_nan(y) {
+ return nan_complex64()
+ }
+ r, i := real(y), imag(y)
+ switch {
+ case r == 0:
+ return 1
+ case r < 0:
+ if i == 0 {
+ return complex(math.inf_f32(1), 0)
+ }
+ return inf_complex64()
+ case r > 0:
+ return 0
+ }
+ unreachable()
+ }
+ modulus := abs(x)
+ if modulus == 0 {
+ return complex(0, 0)
+ }
+ r := math.pow(modulus, real(y))
+ arg := phase(x)
+ theta := real(y) * arg
+ if imag(y) != 0 {
+ r *= math.exp(-imag(y) * arg)
+ theta += imag(y) * math.ln(modulus)
+ }
+ s, c := math.sincos(theta)
+ return complex(r*c, r*s)
+}
+pow_complex128 :: proc "contextless" (x, y: complex128) -> complex128 {
+ if x == 0 { // Guaranteed also true for x == -0.
+ if is_nan(y) {
+ return nan_complex128()
+ }
+ r, i := real(y), imag(y)
+ switch {
+ case r == 0:
+ return 1
+ case r < 0:
+ if i == 0 {
+ return complex(math.inf_f64(1), 0)
+ }
+ return inf_complex128()
+ case r > 0:
+ return 0
+ }
+ unreachable()
+ }
+ modulus := abs(x)
+ if modulus == 0 {
+ return complex(0, 0)
+ }
+ r := math.pow(modulus, real(y))
+ arg := phase(x)
+ theta := real(y) * arg
+ if imag(y) != 0 {
+ r *= math.exp(-imag(y) * arg)
+ theta += imag(y) * math.ln(modulus)
+ }
+ s, c := math.sincos(theta)
+ return complex(r*c, r*s)
+}
+
+
+
+log10_complex32 :: proc "contextless" (x: complex32) -> complex32 {
+ return math.LN10*ln(x)
+}
+log10_complex64 :: proc "contextless" (x: complex64) -> complex64 {
+ return math.LN10*ln(x)
+}
+log10_complex128 :: proc "contextless" (x: complex128) -> complex128 {
+ return math.LN10*ln(x)
+}
+
+
+phase_complex32 :: proc "contextless" (x: complex32) -> f16 {
+ return math.atan2(imag(x), real(x))
+}
+phase_complex64 :: proc "contextless" (x: complex64) -> f32 {
+ return math.atan2(imag(x), real(x))
+}
+phase_complex128 :: proc "contextless" (x: complex128) -> f64 {
+ return math.atan2(imag(x), real(x))
+}
+
+
+rect_complex32 :: proc "contextless" (r, Īø: f16) -> complex32 {
+ s, c := math.sincos(Īø)
+ return complex(r*c, r*s)
+}
+rect_complex64 :: proc "contextless" (r, Īø: f32) -> complex64 {
+ s, c := math.sincos(Īø)
+ return complex(r*c, r*s)
+}
+rect_complex128 :: proc "contextless" (r, Īø: f64) -> complex128 {
+ s, c := math.sincos(Īø)
+ return complex(r*c, r*s)
+}
+
+polar_complex32 :: proc "contextless" (x: complex32) -> (r, Īø: f16) {
+ return abs(x), phase(x)
+}
+polar_complex64 :: proc "contextless" (x: complex64) -> (r, Īø: f32) {
+ return abs(x), phase(x)
+}
+polar_complex128 :: proc "contextless" (x: complex128) -> (r, Īø: f64) {
+ return abs(x), phase(x)
+}
+
+
+
+
+nan_complex32 :: proc "contextless" () -> complex32 {
+ return complex(math.nan_f16(), math.nan_f16())
+}
+nan_complex64 :: proc "contextless" () -> complex64 {
+ return complex(math.nan_f32(), math.nan_f32())
+}
+nan_complex128 :: proc "contextless" () -> complex128 {
+ return complex(math.nan_f64(), math.nan_f64())
+}
+
+
+inf_complex32 :: proc "contextless" () -> complex32 {
+ inf := math.inf_f16(1)
+ return complex(inf, inf)
+}
+inf_complex64 :: proc "contextless" () -> complex64 {
+ inf := math.inf_f32(1)
+ return complex(inf, inf)
+}
+inf_complex128 :: proc "contextless" () -> complex128 {
+ inf := math.inf_f64(1)
+ return complex(inf, inf)
+}
+
+
+is_inf_complex32 :: proc "contextless" (x: complex32) -> bool {
+ return math.is_inf(real(x), 0) || math.is_inf(imag(x), 0)
+}
+is_inf_complex64 :: proc "contextless" (x: complex64) -> bool {
+ return math.is_inf(real(x), 0) || math.is_inf(imag(x), 0)
+}
+is_inf_complex128 :: proc "contextless" (x: complex128) -> bool {
+ return math.is_inf(real(x), 0) || math.is_inf(imag(x), 0)
+}
+
+
+is_nan_complex32 :: proc "contextless" (x: complex32) -> bool {
+ if math.is_inf(real(x), 0) || math.is_inf(imag(x), 0) {
+ return false
+ }
+ return math.is_nan(real(x)) || math.is_nan(imag(x))
+}
+is_nan_complex64 :: proc "contextless" (x: complex64) -> bool {
+ if math.is_inf(real(x), 0) || math.is_inf(imag(x), 0) {
+ return false
+ }
+ return math.is_nan(real(x)) || math.is_nan(imag(x))
+}
+is_nan_complex128 :: proc "contextless" (x: complex128) -> bool {
+ if math.is_inf(real(x), 0) || math.is_inf(imag(x), 0) {
+ return false
+ }
+ return math.is_nan(real(x)) || math.is_nan(imag(x))
+}
diff --git a/core/math/cmplx/cmplx_invtrig.odin b/core/math/cmplx/cmplx_invtrig.odin
new file mode 100644
index 000000000..a746a370f
--- /dev/null
+++ b/core/math/cmplx/cmplx_invtrig.odin
@@ -0,0 +1,273 @@
+package math_cmplx
+
+import "core:builtin"
+import "core:math"
+
+// The original C code, the long comment, and the constants
+// below are from http://netlib.sandia.gov/cephes/c9x-complex/clog.c.
+// The go code is a simplified version of the original C.
+//
+// Cephes Math Library Release 2.8: June, 2000
+// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
+//
+// The readme file at http://netlib.sandia.gov/cephes/ says:
+// Some software in this archive may be from the book _Methods and
+// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
+// International, 1989) or from the Cephes Mathematical Library, a
+// commercial product. In either event, it is copyrighted by the author.
+// What you see here may be used freely but it comes with no support or
+// guarantee.
+//
+// The two known misprints in the book are repaired here in the
+// source listings for the gamma function and the incomplete beta
+// integral.
+//
+// Stephen L. Moshier
+// moshier@na-net.ornl.gov
+
+acos :: proc{
+ acos_complex32,
+ acos_complex64,
+ acos_complex128,
+}
+acosh :: proc{
+ acosh_complex32,
+ acosh_complex64,
+ acosh_complex128,
+}
+
+asin :: proc{
+ asin_complex32,
+ asin_complex64,
+ asin_complex128,
+}
+asinh :: proc{
+ asinh_complex32,
+ asinh_complex64,
+ asinh_complex128,
+}
+
+atan :: proc{
+ atan_complex32,
+ atan_complex64,
+ atan_complex128,
+}
+
+atanh :: proc{
+ atanh_complex32,
+ atanh_complex64,
+ atanh_complex128,
+}
+
+
+acos_complex32 :: proc "contextless" (x: complex32) -> complex32 {
+ w := asin(x)
+ return complex(math.PI/2 - real(w), -imag(w))
+}
+acos_complex64 :: proc "contextless" (x: complex64) -> complex64 {
+ w := asin(x)
+ return complex(math.PI/2 - real(w), -imag(w))
+}
+acos_complex128 :: proc "contextless" (x: complex128) -> complex128 {
+ w := asin(x)
+ return complex(math.PI/2 - real(w), -imag(w))
+}
+
+
+acosh_complex32 :: proc "contextless" (x: complex32) -> complex32 {
+ if x == 0 {
+ return complex(0, math.copy_sign(math.PI/2, imag(x)))
+ }
+ w := acos(x)
+ if imag(w) <= 0 {
+ return complex(-imag(w), real(w))
+ }
+ return complex(imag(w), -real(w))
+}
+acosh_complex64 :: proc "contextless" (x: complex64) -> complex64 {
+ if x == 0 {
+ return complex(0, math.copy_sign(math.PI/2, imag(x)))
+ }
+ w := acos(x)
+ if imag(w) <= 0 {
+ return complex(-imag(w), real(w))
+ }
+ return complex(imag(w), -real(w))
+}
+acosh_complex128 :: proc "contextless" (x: complex128) -> complex128 {
+ if x == 0 {
+ return complex(0, math.copy_sign(math.PI/2, imag(x)))
+ }
+ w := acos(x)
+ if imag(w) <= 0 {
+ return complex(-imag(w), real(w))
+ }
+ return complex(imag(w), -real(w))
+}
+
+asin_complex32 :: proc "contextless" (x: complex32) -> complex32 {
+ return complex32(asin_complex128(complex128(x)))
+}
+asin_complex64 :: proc "contextless" (x: complex64) -> complex64 {
+ return complex64(asin_complex128(complex128(x)))
+}
+asin_complex128 :: proc "contextless" (x: complex128) -> complex128 {
+ switch re, im := real(x), imag(x); {
+ case im == 0 && abs(re) <= 1:
+ return complex(math.asin(re), im)
+ case re == 0 && abs(im) <= 1:
+ return complex(re, math.asinh(im))
+ case math.is_nan(im):
+ switch {
+ case re == 0:
+ return complex(re, math.nan_f64())
+ case math.is_inf(re, 0):
+ return complex(math.nan_f64(), re)
+ case:
+ return nan_complex128()
+ }
+ case math.is_inf(im, 0):
+ switch {
+ case math.is_nan(re):
+ return x
+ case math.is_inf(re, 0):
+ return complex(math.copy_sign(math.PI/4, re), im)
+ case:
+ return complex(math.copy_sign(0, re), im)
+ }
+ case math.is_inf(re, 0):
+ return complex(math.copy_sign(math.PI/2, re), math.copy_sign(re, im))
+ }
+ ct := complex(-imag(x), real(x)) // i * x
+ xx := x * x
+ x1 := complex(1-real(xx), -imag(xx)) // 1 - x*x
+ x2 := sqrt(x1) // x2 = sqrt(1 - x*x)
+ w := ln(ct + x2)
+ return complex(imag(w), -real(w)) // -i * w
+}
+
+asinh_complex32 :: proc "contextless" (x: complex32) -> complex32 {
+ return complex32(asinh_complex128(complex128(x)))
+}
+asinh_complex64 :: proc "contextless" (x: complex64) -> complex64 {
+ return complex64(asinh_complex128(complex128(x)))
+}
+asinh_complex128 :: proc "contextless" (x: complex128) -> complex128 {
+ switch re, im := real(x), imag(x); {
+ case im == 0 && abs(re) <= 1:
+ return complex(math.asinh(re), im)
+ case re == 0 && abs(im) <= 1:
+ return complex(re, math.asin(im))
+ case math.is_inf(re, 0):
+ switch {
+ case math.is_inf(im, 0):
+ return complex(re, math.copy_sign(math.PI/4, im))
+ case math.is_nan(im):
+ return x
+ case:
+ return complex(re, math.copy_sign(0.0, im))
+ }
+ case math.is_nan(re):
+ switch {
+ case im == 0:
+ return x
+ case math.is_inf(im, 0):
+ return complex(im, re)
+ case:
+ return nan_complex128()
+ }
+ case math.is_inf(im, 0):
+ return complex(math.copy_sign(im, re), math.copy_sign(math.PI/2, im))
+ }
+ xx := x * x
+ x1 := complex(1+real(xx), imag(xx)) // 1 + x*x
+ return ln(x + sqrt(x1)) // log(x + sqrt(1 + x*x))
+}
+
+
+atan_complex32 :: proc "contextless" (x: complex32) -> complex32 {
+ return complex32(atan_complex128(complex128(x)))
+}
+atan_complex64 :: proc "contextless" (x: complex64) -> complex64 {
+ return complex64(atan_complex128(complex128(x)))
+}
+atan_complex128 :: proc "contextless" (x: complex128) -> complex128 {
+ // Complex circular arc tangent
+ //
+ // DESCRIPTION:
+ //
+ // If
+ // z = x + iy,
+ //
+ // then
+ // 1 ( 2x )
+ // Re w = - arctan(-----------) + k PI
+ // 2 ( 2 2)
+ // (1 - x - y )
+ //
+ // ( 2 2)
+ // 1 (x + (y+1) )
+ // Im w = - log(------------)
+ // 4 ( 2 2)
+ // (x + (y-1) )
+ //
+ // Where k is an arbitrary integer.
+ //
+ // catan(z) = -i catanh(iz).
+ //
+ // ACCURACY:
+ //
+ // Relative error:
+ // arithmetic domain # trials peak rms
+ // DEC -10,+10 5900 1.3e-16 7.8e-18
+ // IEEE -10,+10 30000 2.3e-15 8.5e-17
+ // The check catan( ctan(z) ) = z, with |x| and |y| < PI/2,
+ // had peak relative error 1.5e-16, rms relative error
+ // 2.9e-17. See also clog().
+
+ switch re, im := real(x), imag(x); {
+ case im == 0:
+ return complex(math.atan(re), im)
+ case re == 0 && abs(im) <= 1:
+ return complex(re, math.atanh(im))
+ case math.is_inf(im, 0) || math.is_inf(re, 0):
+ if math.is_nan(re) {
+ return complex(math.nan_f64(), math.copy_sign(0, im))
+ }
+ return complex(math.copy_sign(math.PI/2, re), math.copy_sign(0, im))
+ case math.is_nan(re) || math.is_nan(im):
+ return nan_complex128()
+ }
+ x2 := real(x) * real(x)
+ a := 1 - x2 - imag(x)*imag(x)
+ if a == 0 {
+ return nan_complex128()
+ }
+ t := 0.5 * math.atan2(2*real(x), a)
+ w := _reduce_pi_f64(t)
+
+ t = imag(x) - 1
+ b := x2 + t*t
+ if b == 0 {
+ return nan_complex128()
+ }
+ t = imag(x) + 1
+ c := (x2 + t*t) / b
+ return complex(w, 0.25*math.ln(c))
+}
+
+atanh_complex32 :: proc "contextless" (x: complex32) -> complex32 {
+ z := complex(-imag(x), real(x)) // z = i * x
+ z = atan(z)
+ return complex(imag(z), -real(z)) // z = -i * z
+}
+atanh_complex64 :: proc "contextless" (x: complex64) -> complex64 {
+ z := complex(-imag(x), real(x)) // z = i * x
+ z = atan(z)
+ return complex(imag(z), -real(z)) // z = -i * z
+}
+atanh_complex128 :: proc "contextless" (x: complex128) -> complex128 {
+ z := complex(-imag(x), real(x)) // z = i * x
+ z = atan(z)
+ return complex(imag(z), -real(z)) // z = -i * z
+}
\ No newline at end of file
diff --git a/core/math/cmplx/cmplx_trig.odin b/core/math/cmplx/cmplx_trig.odin
new file mode 100644
index 000000000..7ca404fab
--- /dev/null
+++ b/core/math/cmplx/cmplx_trig.odin
@@ -0,0 +1,409 @@
+package math_cmplx
+
+import "core:math"
+import "core:math/bits"
+
+// The original C code, the long comment, and the constants
+// below are from http://netlib.sandia.gov/cephes/c9x-complex/clog.c.
+// The go code is a simplified version of the original C.
+//
+// Cephes Math Library Release 2.8: June, 2000
+// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
+//
+// The readme file at http://netlib.sandia.gov/cephes/ says:
+// Some software in this archive may be from the book _Methods and
+// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
+// International, 1989) or from the Cephes Mathematical Library, a
+// commercial product. In either event, it is copyrighted by the author.
+// What you see here may be used freely but it comes with no support or
+// guarantee.
+//
+// The two known misprints in the book are repaired here in the
+// source listings for the gamma function and the incomplete beta
+// integral.
+//
+// Stephen L. Moshier
+// moshier@na-net.ornl.gov
+
+sin_complex128 :: proc "contextless" (x: complex128) -> complex128 {
+ // Complex circular sine
+ //
+ // DESCRIPTION:
+ //
+ // If
+ // z = x + iy,
+ //
+ // then
+ //
+ // w = sin x cosh y + i cos x sinh y.
+ //
+ // csin(z) = -i csinh(iz).
+ //
+ // ACCURACY:
+ //
+ // Relative error:
+ // arithmetic domain # trials peak rms
+ // DEC -10,+10 8400 5.3e-17 1.3e-17
+ // IEEE -10,+10 30000 3.8e-16 1.0e-16
+ // Also tested by csin(casin(z)) = z.
+
+ switch re, im := real(x), imag(x); {
+ case im == 0 && (math.is_inf(re, 0) || math.is_nan(re)):
+ return complex(math.nan_f64(), im)
+ case math.is_inf(im, 0):
+ switch {
+ case re == 0:
+ return x
+ case math.is_inf(re, 0) || math.is_nan(re):
+ return complex(math.nan_f64(), im)
+ }
+ case re == 0 && math.is_nan(im):
+ return x
+ }
+ s, c := math.sincos(real(x))
+ sh, ch := _sinhcosh_f64(imag(x))
+ return complex(s*ch, c*sh)
+}
+
+cos_complex128 :: proc "contextless" (x: complex128) -> complex128 {
+ // Complex circular cosine
+ //
+ // DESCRIPTION:
+ //
+ // If
+ // z = x + iy,
+ //
+ // then
+ //
+ // w = cos x cosh y - i sin x sinh y.
+ //
+ // ACCURACY:
+ //
+ // Relative error:
+ // arithmetic domain # trials peak rms
+ // DEC -10,+10 8400 4.5e-17 1.3e-17
+ // IEEE -10,+10 30000 3.8e-16 1.0e-16
+
+ switch re, im := real(x), imag(x); {
+ case im == 0 && (math.is_inf(re, 0) || math.is_nan(re)):
+ return complex(math.nan_f64(), -im*math.copy_sign(0, re))
+ case math.is_inf(im, 0):
+ switch {
+ case re == 0:
+ return complex(math.inf_f64(1), -re*math.copy_sign(0, im))
+ case math.is_inf(re, 0) || math.is_nan(re):
+ return complex(math.inf_f64(1), math.nan_f64())
+ }
+ case re == 0 && math.is_nan(im):
+ return complex(math.nan_f64(), 0)
+ }
+ s, c := math.sincos(real(x))
+ sh, ch := _sinhcosh_f64(imag(x))
+ return complex(c*ch, -s*sh)
+}
+
+sinh_complex128 :: proc "contextless" (x: complex128) -> complex128 {
+ // Complex hyperbolic sine
+ //
+ // DESCRIPTION:
+ //
+ // csinh z = (cexp(z) - cexp(-z))/2
+ // = sinh x * cos y + i cosh x * sin y .
+ //
+ // ACCURACY:
+ //
+ // Relative error:
+ // arithmetic domain # trials peak rms
+ // IEEE -10,+10 30000 3.1e-16 8.2e-17
+
+ switch re, im := real(x), imag(x); {
+ case re == 0 && (math.is_inf(im, 0) || math.is_nan(im)):
+ return complex(re, math.nan_f64())
+ case math.is_inf(re, 0):
+ switch {
+ case im == 0:
+ return complex(re, im)
+ case math.is_inf(im, 0) || math.is_nan(im):
+ return complex(re, math.nan_f64())
+ }
+ case im == 0 && math.is_nan(re):
+ return complex(math.nan_f64(), im)
+ }
+ s, c := math.sincos(imag(x))
+ sh, ch := _sinhcosh_f64(real(x))
+ return complex(c*sh, s*ch)
+}
+
+cosh_complex128 :: proc "contextless" (x: complex128) -> complex128 {
+ // Complex hyperbolic cosine
+ //
+ // DESCRIPTION:
+ //
+ // ccosh(z) = cosh x cos y + i sinh x sin y .
+ //
+ // ACCURACY:
+ //
+ // Relative error:
+ // arithmetic domain # trials peak rms
+ // IEEE -10,+10 30000 2.9e-16 8.1e-17
+
+ switch re, im := real(x), imag(x); {
+ case re == 0 && (math.is_inf(im, 0) || math.is_nan(im)):
+ return complex(math.nan_f64(), re*math.copy_sign(0, im))
+ case math.is_inf(re, 0):
+ switch {
+ case im == 0:
+ return complex(math.inf_f64(1), im*math.copy_sign(0, re))
+ case math.is_inf(im, 0) || math.is_nan(im):
+ return complex(math.inf_f64(1), math.nan_f64())
+ }
+ case im == 0 && math.is_nan(re):
+ return complex(math.nan_f64(), im)
+ }
+ s, c := math.sincos(imag(x))
+ sh, ch := _sinhcosh_f64(real(x))
+ return complex(c*ch, s*sh)
+}
+
+tan_complex128 :: proc "contextless" (x: complex128) -> complex128 {
+ // Complex circular tangent
+ //
+ // DESCRIPTION:
+ //
+ // If
+ // z = x + iy,
+ //
+ // then
+ //
+ // sin 2x + i sinh 2y
+ // w = --------------------.
+ // cos 2x + cosh 2y
+ //
+ // On the real axis the denominator is zero at odd multiples
+ // of PI/2. The denominator is evaluated by its Taylor
+ // series near these points.
+ //
+ // ctan(z) = -i ctanh(iz).
+ //
+ // ACCURACY:
+ //
+ // Relative error:
+ // arithmetic domain # trials peak rms
+ // DEC -10,+10 5200 7.1e-17 1.6e-17
+ // IEEE -10,+10 30000 7.2e-16 1.2e-16
+ // Also tested by ctan * ccot = 1 and catan(ctan(z)) = z.
+
+ switch re, im := real(x), imag(x); {
+ case math.is_inf(im, 0):
+ switch {
+ case math.is_inf(re, 0) || math.is_nan(re):
+ return complex(math.copy_sign(0, re), math.copy_sign(1, im))
+ }
+ return complex(math.copy_sign(0, math.sin(2*re)), math.copy_sign(1, im))
+ case re == 0 && math.is_nan(im):
+ return x
+ }
+ d := math.cos(2*real(x)) + math.cosh(2*imag(x))
+ if abs(d) < 0.25 {
+ d = _tan_series_f64(x)
+ }
+ if d == 0 {
+ return inf_complex128()
+ }
+ return complex(math.sin(2*real(x))/d, math.sinh(2*imag(x))/d)
+}
+
+tanh_complex128 :: proc "contextless" (x: complex128) -> complex128 {
+ switch re, im := real(x), imag(x); {
+ case math.is_inf(re, 0):
+ switch {
+ case math.is_inf(im, 0) || math.is_nan(im):
+ return complex(math.copy_sign(1, re), math.copy_sign(0, im))
+ }
+ return complex(math.copy_sign(1, re), math.copy_sign(0, math.sin(2*im)))
+ case im == 0 && math.is_nan(re):
+ return x
+ }
+ d := math.cosh(2*real(x)) + math.cos(2*imag(x))
+ if d == 0 {
+ return inf_complex128()
+ }
+ return complex(math.sinh(2*real(x))/d, math.sin(2*imag(x))/d)
+}
+
+cot_complex128 :: proc "contextless" (x: complex128) -> complex128 {
+ d := math.cosh(2*imag(x)) - math.cos(2*real(x))
+ if abs(d) < 0.25 {
+ d = _tan_series_f64(x)
+ }
+ if d == 0 {
+ return inf_complex128()
+ }
+ return complex(math.sin(2*real(x))/d, -math.sinh(2*imag(x))/d)
+}
+
+
+@(private="file")
+_sinhcosh_f64 :: proc "contextless" (x: f64) -> (sh, ch: f64) {
+ if abs(x) <= 0.5 {
+ return math.sinh(x), math.cosh(x)
+ }
+ e := math.exp(x)
+ ei := 0.5 / e
+ e *= 0.5
+ return e - ei, e + ei
+}
+
+
+// taylor series of cosh(2y) - cos(2x)
+@(private)
+_tan_series_f64 :: proc "contextless" (z: complex128) -> f64 {
+ MACH_EPSILON :: 1.0 / (1 << 53)
+
+ x := abs(2 * real(z))
+ y := abs(2 * imag(z))
+ x = _reduce_pi_f64(x)
+ x, y = x * x, y * y
+ x2, y2 := 1.0, 1.0
+ f, rn, d := 1.0, 0.0, 0.0
+
+ for {
+ rn += 1
+ f *= rn
+ rn += 1
+ f *= rn
+ x2 *= x
+ y2 *= y
+ t := y2 + x2
+ t /= f
+ d += t
+
+ rn += 1
+ f *= rn
+ rn += 1
+ f *= rn
+ x2 *= x
+ y2 *= y
+ t = y2 - x2
+ t /= f
+ d += t
+ if !(abs(t/d) > MACH_EPSILON) { // don't use <=, because of floating point nonsense and NaN
+ break
+ }
+ }
+ return d
+}
+
+// _reduce_pi_f64 reduces the input argument x to the range (-PI/2, PI/2].
+// x must be greater than or equal to 0. For small arguments it
+// uses Cody-Waite reduction in 3 f64 parts based on:
+// "Elementary Function Evaluation: Algorithms and Implementation"
+// Jean-Michel Muller, 1997.
+// For very large arguments it uses Payne-Hanek range reduction based on:
+// "ARGUMENT REDUCTION FOR HUGE ARGUMENTS: Good to the Last Bit"
+@(private)
+_reduce_pi_f64 :: proc "contextless" (x: f64) -> f64 #no_bounds_check {
+ x := x
+
+ // REDUCE_THRESHOLD is the maximum value of x where the reduction using
+ // Cody-Waite reduction still gives accurate results. This threshold
+ // is set by t*PIn being representable as a f64 without error
+ // where t is given by t = floor(x * (1 / PI)) and PIn are the leading partial
+ // terms of PI. Since the leading terms, PI1 and PI2 below, have 30 and 32
+ // trailing zero bits respectively, t should have less than 30 significant bits.
+ // t < 1<<30 -> floor(x*(1/PI)+0.5) < 1<<30 -> x < (1<<30-1) * PI - 0.5
+ // So, conservatively we can take x < 1<<30.
+ REDUCE_THRESHOLD :: f64(1 << 30)
+
+ if abs(x) < REDUCE_THRESHOLD {
+ // Use Cody-Waite reduction in three parts.
+ // PI1, PI2 and PI3 comprise an extended precision value of PI
+ // such that PI ~= PI1 + PI2 + PI3. The parts are chosen so
+ // that PI1 and PI2 have an approximately equal number of trailing
+ // zero bits. This ensures that t*PI1 and t*PI2 are exact for
+ // large integer values of t. The full precision PI3 ensures the
+ // approximation of PI is accurate to 102 bits to handle cancellation
+ // during subtraction.
+ PI1 :: 0h400921fb40000000 // 3.141592502593994
+ PI2 :: 0h3e84442d00000000 // 1.5099578831723193e-07
+ PI3 :: 0h3d08469898cc5170 // 1.0780605716316238e-14
+
+ t := x / math.PI
+ t += 0.5
+ t = f64(i64(t)) // i64(t) = the multiple
+ return ((x - t*PI1) - t*PI2) - t*PI3
+ }
+ // Must apply Payne-Hanek range reduction
+ MASK :: 0x7FF
+ SHIFT :: 64 - 11 - 1
+ BIAS :: 1023
+ FRAC_MASK :: 1<>SHIFT&MASK) - BIAS - SHIFT
+ ix &= FRAC_MASK
+ ix |= 1 << SHIFT
+
+ // bdpi is the binary digits of 1/PI as a u64 array,
+ // that is, 1/PI = SUM bdpi[i]*2^(-64*i).
+ // 19 64-bit digits give 1216 bits of precision
+ // to handle the largest possible f64 exponent.
+ @static bdpi := [?]u64{
+ 0x0000000000000000,
+ 0x517cc1b727220a94,
+ 0xfe13abe8fa9a6ee0,
+ 0x6db14acc9e21c820,
+ 0xff28b1d5ef5de2b0,
+ 0xdb92371d2126e970,
+ 0x0324977504e8c90e,
+ 0x7f0ef58e5894d39f,
+ 0x74411afa975da242,
+ 0x74ce38135a2fbf20,
+ 0x9cc8eb1cc1a99cfa,
+ 0x4e422fc5defc941d,
+ 0x8ffc4bffef02cc07,
+ 0xf79788c5ad05368f,
+ 0xb69b3f6793e584db,
+ 0xa7a31fb34f2ff516,
+ 0xba93dd63f5f2f8bd,
+ 0x9e839cfbc5294975,
+ 0x35fdafd88fc6ae84,
+ 0x2b0198237e3db5d5,
+ }
+
+ // Use the exponent to extract the 3 appropriate u64 digits from bdpi,
+ // B ~ (z0, z1, z2), such that the product leading digit has the exponent -64.
+ // Note, exp >= 50 since x >= REDUCE_THRESHOLD and exp < 971 for maximum f64.
+ digit, bitshift := uint(exp+64)/64, uint(exp+64)%64
+ z0 := (bdpi[digit] << bitshift) | (bdpi[digit+1] >> (64 - bitshift))
+ z1 := (bdpi[digit+1] << bitshift) | (bdpi[digit+2] >> (64 - bitshift))
+ z2 := (bdpi[digit+2] << bitshift) | (bdpi[digit+3] >> (64 - bitshift))
+
+ // Multiply mantissa by the digits and extract the upper two digits (hi, lo).
+ z2hi, _ := bits.mul(z2, ix)
+ z1hi, z1lo := bits.mul(z1, ix)
+ z0lo := z0 * ix
+ lo, c := bits.add(z1lo, z2hi, 0)
+ hi, _ := bits.add(z0lo, z1hi, c)
+
+ // Find the magnitude of the fraction.
+ lz := uint(bits.leading_zeros(hi))
+ e := u64(BIAS - (lz + 1))
+
+ // Clear implicit mantissa bit and shift into place.
+ hi = (hi << (lz + 1)) | (lo >> (64 - (lz + 1)))
+ hi >>= 64 - SHIFT
+
+ // Include the exponent and convert to a float.
+ hi |= e << SHIFT
+ x = transmute(f64)(hi)
+
+ // map to (-PI/2, PI/2]
+ if x > 0.5 {
+ x -= 1
+ }
+ return math.PI * x
+}
+
diff --git a/core/math/ease/ease.odin b/core/math/ease/ease.odin
index d5cb85dd8..0e6569bca 100644
--- a/core/math/ease/ease.odin
+++ b/core/math/ease/ease.odin
@@ -450,7 +450,7 @@ flux_tween_init :: proc(tween: ^Flux_Tween($T), duration: time.Duration) where i
flux_update :: proc(flux: ^Flux_Map($T), dt: f64) where intrinsics.type_is_float(T) {
clear(&flux.keys_to_be_deleted)
- for key, tween in &flux.values {
+ for key, &tween in flux.values {
delay_remainder := f64(0)
// Update delay if necessary.
diff --git a/core/math/math.odin b/core/math/math.odin
index 05177378f..6f7a36bab 100644
--- a/core/math/math.odin
+++ b/core/math/math.odin
@@ -2158,6 +2158,80 @@ signbit :: proc{
}
+@(require_results)
+hypot_f16 :: proc "contextless" (x, y: f16) -> (r: f16) {
+ p, q := abs(x), abs(y)
+ switch {
+ case is_inf(p, 1) || is_inf(q, 1):
+ return inf_f16(1)
+ case is_nan(p) || is_nan(q):
+ return nan_f16()
+ }
+ if p < q {
+ p, q = q, p
+ }
+ if p == 0 {
+ return 0
+ }
+ q = q / p
+ return p * sqrt(1+q*q)
+}
+@(require_results)
+hypot_f32 :: proc "contextless" (x, y: f32) -> (r: f32) {
+ p, q := abs(x), abs(y)
+ switch {
+ case is_inf(p, 1) || is_inf(q, 1):
+ return inf_f32(1)
+ case is_nan(p) || is_nan(q):
+ return nan_f32()
+ }
+ if p < q {
+ p, q = q, p
+ }
+ if p == 0 {
+ return 0
+ }
+ q = q / p
+ return p * sqrt(1+q*q)
+}
+@(require_results)
+hypot_f64 :: proc "contextless" (x, y: f64) -> (r: f64) {
+ p, q := abs(x), abs(y)
+ switch {
+ case is_inf(p, 1) || is_inf(q, 1):
+ return inf_f64(1)
+ case is_nan(p) || is_nan(q):
+ return nan_f64()
+ }
+ if p < q {
+ p, q = q, p
+ }
+ if p == 0 {
+ return 0
+ }
+ q = q / p
+ return p * sqrt(1+q*q)
+}
+@(require_results) hypot_f16le :: proc "contextless" (x, y: f16le) -> (r: f16le) { return f16le(hypot_f16(f16(x), f16(y))) }
+@(require_results) hypot_f16be :: proc "contextless" (x, y: f16be) -> (r: f16be) { return f16be(hypot_f16(f16(x), f16(y))) }
+@(require_results) hypot_f32le :: proc "contextless" (x, y: f32le) -> (r: f32le) { return f32le(hypot_f32(f32(x), f32(y))) }
+@(require_results) hypot_f32be :: proc "contextless" (x, y: f32be) -> (r: f32be) { return f32be(hypot_f32(f32(x), f32(y))) }
+@(require_results) hypot_f64le :: proc "contextless" (x, y: f64le) -> (r: f64le) { return f64le(hypot_f64(f64(x), f64(y))) }
+@(require_results) hypot_f64be :: proc "contextless" (x, y: f64be) -> (r: f64be) { return f64be(hypot_f64(f64(x), f64(y))) }
+
+// hypot returns Sqrt(p*p + q*q), taking care to avoid unnecessary overflow and underflow.
+//
+// Special cases:
+// hypot(±Inf, q) = +Inf
+// hypot(p, ±Inf) = +Inf
+// hypot(NaN, q) = NaN
+// hypot(p, NaN) = NaN
+hypot :: proc{
+ hypot_f16, hypot_f16le, hypot_f16be,
+ hypot_f32, hypot_f32le, hypot_f32be,
+ hypot_f64, hypot_f64le, hypot_f64be,
+}
+
F16_DIG :: 3
F16_EPSILON :: 0.00097656
F16_GUARD :: 0
diff --git a/core/math/math_basic.odin b/core/math/math_basic.odin
index 785c43b10..95e0a93ec 100644
--- a/core/math/math_basic.odin
+++ b/core/math/math_basic.odin
@@ -3,44 +3,110 @@ package math
import "core:intrinsics"
-@(default_calling_convention="none")
+@(default_calling_convention="none", private="file")
foreign _ {
@(link_name="llvm.sin.f16", require_results)
- sin_f16 :: proc(Īø: f16) -> f16 ---
+ _sin_f16 :: proc(Īø: f16) -> f16 ---
@(link_name="llvm.sin.f32", require_results)
- sin_f32 :: proc(Īø: f32) -> f32 ---
+ _sin_f32 :: proc(Īø: f32) -> f32 ---
@(link_name="llvm.sin.f64", require_results)
- sin_f64 :: proc(Īø: f64) -> f64 ---
+ _sin_f64 :: proc(Īø: f64) -> f64 ---
@(link_name="llvm.cos.f16", require_results)
- cos_f16 :: proc(Īø: f16) -> f16 ---
+ _cos_f16 :: proc(Īø: f16) -> f16 ---
@(link_name="llvm.cos.f32", require_results)
- cos_f32 :: proc(Īø: f32) -> f32 ---
+ _cos_f32 :: proc(Īø: f32) -> f32 ---
@(link_name="llvm.cos.f64", require_results)
- cos_f64 :: proc(Īø: f64) -> f64 ---
+ _cos_f64 :: proc(Īø: f64) -> f64 ---
@(link_name="llvm.pow.f16", require_results)
- pow_f16 :: proc(x, power: f16) -> f16 ---
+ _pow_f16 :: proc(x, power: f16) -> f16 ---
@(link_name="llvm.pow.f32", require_results)
- pow_f32 :: proc(x, power: f32) -> f32 ---
+ _pow_f32 :: proc(x, power: f32) -> f32 ---
@(link_name="llvm.pow.f64", require_results)
- pow_f64 :: proc(x, power: f64) -> f64 ---
+ _pow_f64 :: proc(x, power: f64) -> f64 ---
@(link_name="llvm.fmuladd.f16", require_results)
- fmuladd_f16 :: proc(a, b, c: f16) -> f16 ---
+ _fmuladd_f16 :: proc(a, b, c: f16) -> f16 ---
@(link_name="llvm.fmuladd.f32", require_results)
- fmuladd_f32 :: proc(a, b, c: f32) -> f32 ---
+ _fmuladd_f32 :: proc(a, b, c: f32) -> f32 ---
@(link_name="llvm.fmuladd.f64", require_results)
- fmuladd_f64 :: proc(a, b, c: f64) -> f64 ---
+ _fmuladd_f64 :: proc(a, b, c: f64) -> f64 ---
@(link_name="llvm.exp.f16", require_results)
- exp_f16 :: proc(x: f16) -> f16 ---
+ _exp_f16 :: proc(x: f16) -> f16 ---
@(link_name="llvm.exp.f32", require_results)
- exp_f32 :: proc(x: f32) -> f32 ---
+ _exp_f32 :: proc(x: f32) -> f32 ---
@(link_name="llvm.exp.f64", require_results)
- exp_f64 :: proc(x: f64) -> f64 ---
+ _exp_f64 :: proc(x: f64) -> f64 ---
}
+@(require_results)
+sin_f16 :: proc "contextless" (Īø: f16) -> f16 {
+ return _sin_f16(Īø)
+}
+@(require_results)
+sin_f32 :: proc "contextless" (Īø: f32) -> f32 {
+ return _sin_f32(Īø)
+}
+@(require_results)
+sin_f64 :: proc "contextless" (Īø: f64) -> f64 {
+ return _sin_f64(Īø)
+}
+
+@(require_results)
+cos_f16 :: proc "contextless" (Īø: f16) -> f16 {
+ return _cos_f16(Īø)
+}
+@(require_results)
+cos_f32 :: proc "contextless" (Īø: f32) -> f32 {
+ return _cos_f32(Īø)
+}
+@(require_results)
+cos_f64 :: proc "contextless" (Īø: f64) -> f64 {
+ return _cos_f64(Īø)
+}
+
+@(require_results)
+pow_f16 :: proc "contextless" (x, power: f16) -> f16 {
+ return _pow_f16(x, power)
+}
+@(require_results)
+pow_f32 :: proc "contextless" (x, power: f32) -> f32 {
+ return _pow_f32(x, power)
+}
+@(require_results)
+pow_f64 :: proc "contextless" (x, power: f64) -> f64 {
+ return _pow_f64(x, power)
+}
+
+@(require_results)
+fmuladd_f16 :: proc "contextless" (a, b, c: f16) -> f16 {
+ return _fmuladd_f16(a, b, c)
+}
+@(require_results)
+fmuladd_f32 :: proc "contextless" (a, b, c: f32) -> f32 {
+ return _fmuladd_f32(a, b, c)
+}
+@(require_results)
+fmuladd_f64 :: proc "contextless" (a, b, c: f64) -> f64 {
+ return _fmuladd_f64(a, b, c)
+}
+
+@(require_results)
+exp_f16 :: proc "contextless" (x: f16) -> f16 {
+ return _exp_f16(x)
+}
+@(require_results)
+exp_f32 :: proc "contextless" (x: f32) -> f32 {
+ return _exp_f32(x)
+}
+@(require_results)
+exp_f64 :: proc "contextless" (x: f64) -> f64 {
+ return _exp_f64(x)
+}
+
+
@(require_results)
sqrt_f16 :: proc "contextless" (x: f16) -> f16 {
return intrinsics.sqrt(x)
diff --git a/core/math/math_sincos.odin b/core/math/math_sincos.odin
new file mode 100644
index 000000000..578876ac5
--- /dev/null
+++ b/core/math/math_sincos.odin
@@ -0,0 +1,308 @@
+package math
+
+import "core:math/bits"
+
+// The original C code, the long comment, and the constants
+// below were from http://netlib.sandia.gov/cephes/cmath/sin.c,
+// available from http://www.netlib.org/cephes/cmath.tgz.
+// The go code is a simplified version of the original C.
+//
+// sin.c
+//
+// Circular sine
+//
+// SYNOPSIS:
+//
+// double x, y, sin();
+// y = sin( x );
+//
+// DESCRIPTION:
+//
+// Range reduction is into intervals of pi/4. The reduction error is nearly
+// eliminated by contriving an extended precision modular arithmetic.
+//
+// Two polynomial approximating functions are employed.
+// Between 0 and pi/4 the sine is approximated by
+// x + x**3 P(x**2).
+// Between pi/4 and pi/2 the cosine is represented as
+// 1 - x**2 Q(x**2).
+//
+// ACCURACY:
+//
+// Relative error:
+// arithmetic domain # trials peak rms
+// DEC 0, 10 150000 3.0e-17 7.8e-18
+// IEEE -1.07e9,+1.07e9 130000 2.1e-16 5.4e-17
+//
+// Partial loss of accuracy begins to occur at x = 2**30 = 1.074e9. The loss
+// is not gradual, but jumps suddenly to about 1 part in 10e7. Results may
+// be meaningless for x > 2**49 = 5.6e14.
+//
+// cos.c
+//
+// Circular cosine
+//
+// SYNOPSIS:
+//
+// double x, y, cos();
+// y = cos( x );
+//
+// DESCRIPTION:
+//
+// Range reduction is into intervals of pi/4. The reduction error is nearly
+// eliminated by contriving an extended precision modular arithmetic.
+//
+// Two polynomial approximating functions are employed.
+// Between 0 and pi/4 the cosine is approximated by
+// 1 - x**2 Q(x**2).
+// Between pi/4 and pi/2 the sine is represented as
+// x + x**3 P(x**2).
+//
+// ACCURACY:
+//
+// Relative error:
+// arithmetic domain # trials peak rms
+// IEEE -1.07e9,+1.07e9 130000 2.1e-16 5.4e-17
+// DEC 0,+1.07e9 17000 3.0e-17 7.2e-18
+//
+// Cephes Math Library Release 2.8: June, 2000
+// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
+//
+// The readme file at http://netlib.sandia.gov/cephes/ says:
+// Some software in this archive may be from the book _Methods and
+// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
+// International, 1989) or from the Cephes Mathematical Library, a
+// commercial product. In either event, it is copyrighted by the author.
+// What you see here may be used freely but it comes with no support or
+// guarantee.
+//
+// The two known misprints in the book are repaired here in the
+// source listings for the gamma function and the incomplete beta
+// integral.
+//
+// Stephen L. Moshier
+// moshier@na-net.ornl.gov
+
+sincos :: proc{
+ sincos_f16, sincos_f16le, sincos_f16be,
+ sincos_f32, sincos_f32le, sincos_f32be,
+ sincos_f64, sincos_f64le, sincos_f64be,
+}
+
+sincos_f16 :: proc "contextless" (x: f16) -> (sin, cos: f16) #no_bounds_check {
+ s, c := sincos_f64(f64(x))
+ return f16(s), f16(c)
+}
+sincos_f16le :: proc "contextless" (x: f16le) -> (sin, cos: f16le) #no_bounds_check {
+ s, c := sincos_f64(f64(x))
+ return f16le(s), f16le(c)
+}
+sincos_f16be :: proc "contextless" (x: f16be) -> (sin, cos: f16be) #no_bounds_check {
+ s, c := sincos_f64(f64(x))
+ return f16be(s), f16be(c)
+}
+
+sincos_f32 :: proc "contextless" (x: f32) -> (sin, cos: f32) #no_bounds_check {
+ s, c := sincos_f64(f64(x))
+ return f32(s), f32(c)
+}
+sincos_f32le :: proc "contextless" (x: f32le) -> (sin, cos: f32le) #no_bounds_check {
+ s, c := sincos_f64(f64(x))
+ return f32le(s), f32le(c)
+}
+sincos_f32be :: proc "contextless" (x: f32be) -> (sin, cos: f32be) #no_bounds_check {
+ s, c := sincos_f64(f64(x))
+ return f32be(s), f32be(c)
+}
+
+sincos_f64le :: proc "contextless" (x: f64le) -> (sin, cos: f64le) #no_bounds_check {
+ s, c := sincos_f64(f64(x))
+ return f64le(s), f64le(c)
+}
+sincos_f64be :: proc "contextless" (x: f64be) -> (sin, cos: f64be) #no_bounds_check {
+ s, c := sincos_f64(f64(x))
+ return f64be(s), f64be(c)
+}
+
+sincos_f64 :: proc "contextless" (x: f64) -> (sin, cos: f64) #no_bounds_check {
+ x := x
+
+ PI4A :: 0h3fe921fb40000000 // 7.85398125648498535156e-1 PI/4 split into three parts
+ PI4B :: 0h3e64442d00000000 // 3.77489470793079817668e-8
+ PI4C :: 0h3ce8469898cc5170 // 2.69515142907905952645e-15
+
+ // special cases
+ switch {
+ case x == 0:
+ return x, 1 // return ±0.0, 1.0
+ case is_nan(x) || is_inf(x, 0):
+ return nan_f64(), nan_f64()
+ }
+
+ // make argument positive
+ sin_sign, cos_sign := false, false
+ if x < 0 {
+ x = -x
+ sin_sign = true
+ }
+
+ j: u64
+ y, z: f64
+ if x >= REDUCE_THRESHOLD {
+ j, z = _trig_reduce_f64(x)
+ } else {
+ j = u64(x * (4 / PI)) // integer part of x/(PI/4), as integer for tests on the phase angle
+ y = f64(j) // integer part of x/(PI/4), as float
+
+ if j&1 == 1 { // map zeros to origin
+ j += 1
+ y += 1
+ }
+ j &= 7 // octant modulo TAU radians (360 degrees)
+ z = ((x - y*PI4A) - y*PI4B) - y*PI4C // Extended precision modular arithmetic
+ }
+ if j > 3 { // reflect in x axis
+ j -= 4
+ sin_sign, cos_sign = !sin_sign, !cos_sign
+ }
+ if j > 1 {
+ cos_sign = !cos_sign
+ }
+
+ zz := z * z
+
+ cos = 1.0 - 0.5*zz + zz*zz*((((((_cos[0]*zz)+_cos[1])*zz+_cos[2])*zz+_cos[3])*zz+_cos[4])*zz+_cos[5])
+ sin = z + z*zz*((((((_sin[0]*zz)+_sin[1])*zz+_sin[2])*zz+_sin[3])*zz+_sin[4])*zz+_sin[5])
+
+ if j == 1 || j == 2 {
+ sin, cos = cos, sin
+ }
+ if cos_sign {
+ cos = -cos
+ }
+ if sin_sign {
+ sin = -sin
+ }
+ return
+}
+
+// sin coefficients
+@(private="file")
+_sin := [?]f64{
+ 0h3de5d8fd1fd19ccd, // 1.58962301576546568060e-10
+ 0hbe5ae5e5a9291f5d, // -2.50507477628578072866e-8
+ 0h3ec71de3567d48a1, // 2.75573136213857245213e-6
+ 0hbf2a01a019bfdf03, // -1.98412698295895385996e-4
+ 0h3f8111111110f7d0, // 8.33333333332211858878e-3
+ 0hbfc5555555555548, // -1.66666666666666307295e-1
+}
+
+// cos coefficients
+@(private="file")
+_cos := [?]f64{
+ 0hbda8fa49a0861a9b, // -1.13585365213876817300e-11,
+ 0h3e21ee9d7b4e3f05, // 2.08757008419747316778e-9,
+ 0hbe927e4f7eac4bc6, // -2.75573141792967388112e-7,
+ 0h3efa01a019c844f5, // 2.48015872888517045348e-5,
+ 0hbf56c16c16c14f91, // -1.38888888888730564116e-3,
+ 0h3fa555555555554b, // 4.16666666666665929218e-2,
+}
+
+// REDUCE_THRESHOLD is the maximum value of x where the reduction using Pi/4
+// in 3 f64 parts still gives accurate results. This threshold
+// is set by y*C being representable as a f64 without error
+// where y is given by y = floor(x * (4 / Pi)) and C is the leading partial
+// terms of 4/Pi. Since the leading terms (PI4A and PI4B in sin.go) have 30
+// and 32 trailing zero bits, y should have less than 30 significant bits.
+//
+// y < 1<<30 -> floor(x*4/Pi) < 1<<30 -> x < (1<<30 - 1) * Pi/4
+//
+// So, conservatively we can take x < 1<<29.
+// Above this threshold Payne-Hanek range reduction must be used.
+@(private="file")
+REDUCE_THRESHOLD :: 1 << 29
+
+// _trig_reduce_f64 implements Payne-Hanek range reduction by Pi/4
+// for x > 0. It returns the integer part mod 8 (j) and
+// the fractional part (z) of x / (Pi/4).
+// The implementation is based on:
+// "ARGUMENT REDUCTION FOR HUGE ARGUMENTS: Good to the Last Bit"
+// K. C. Ng et al, March 24, 1992
+// The simulated multi-precision calculation of x*B uses 64-bit integer arithmetic.
+_trig_reduce_f64 :: proc "contextless" (x: f64) -> (j: u64, z: f64) #no_bounds_check {
+ // bd_pi4 is the binary digits of 4/pi as a u64 array,
+ // that is, 4/pi = Sum bd_pi4[i]*2^(-64*i)
+ // 19 64-bit digits and the leading one bit give 1217 bits
+ // of precision to handle the largest possible f64 exponent.
+ @static bd_pi4 := [?]u64{
+ 0x0000000000000001,
+ 0x45f306dc9c882a53,
+ 0xf84eafa3ea69bb81,
+ 0xb6c52b3278872083,
+ 0xfca2c757bd778ac3,
+ 0x6e48dc74849ba5c0,
+ 0x0c925dd413a32439,
+ 0xfc3bd63962534e7d,
+ 0xd1046bea5d768909,
+ 0xd338e04d68befc82,
+ 0x7323ac7306a673e9,
+ 0x3908bf177bf25076,
+ 0x3ff12fffbc0b301f,
+ 0xde5e2316b414da3e,
+ 0xda6cfd9e4f96136e,
+ 0x9e8c7ecd3cbfd45a,
+ 0xea4f758fd7cbe2f6,
+ 0x7a0e73ef14a525d4,
+ 0xd7f6bf623f1aba10,
+ 0xac06608df8f6d757,
+ }
+
+ PI4 :: PI / 4
+ if x < PI4 {
+ return 0, x
+ }
+
+ MASK :: 0x7FF
+ SHIFT :: 64 - 11 - 1
+ BIAS :: 1023
+
+ // Extract out the integer and exponent such that,
+ // x = ix * 2 ** exp.
+ ix := transmute(u64)x
+ exp := int(ix>>SHIFT&MASK) - BIAS - SHIFT
+ ix &~= MASK << SHIFT
+ ix |= 1 << SHIFT
+ // Use the exponent to extract the 3 appropriate u64 digits from bd_pi4,
+ // B ~ (z0, z1, z2), such that the product leading digit has the exponent -61.
+ // Note, exp >= -53 since x >= PI4 and exp < 971 for maximum f64.
+ digit, bitshift := uint(exp+61)/64, uint(exp+61)%64
+ z0 := (bd_pi4[digit] << bitshift) | (bd_pi4[digit+1] >> (64 - bitshift))
+ z1 := (bd_pi4[digit+1] << bitshift) | (bd_pi4[digit+2] >> (64 - bitshift))
+ z2 := (bd_pi4[digit+2] << bitshift) | (bd_pi4[digit+3] >> (64 - bitshift))
+ // Multiply mantissa by the digits and extract the upper two digits (hi, lo).
+ z2hi, _ := bits.mul(z2, ix)
+ z1hi, z1lo := bits.mul(z1, ix)
+ z0lo := z0 * ix
+ lo, c := bits.add(z1lo, z2hi, 0)
+ hi, _ := bits.add(z0lo, z1hi, c)
+ // The top 3 bits are j.
+ j = hi >> 61
+ // Extract the fraction and find its magnitude.
+ hi = hi<<3 | lo>>61
+ lz := uint(bits.leading_zeros(hi))
+ e := u64(BIAS - (lz + 1))
+ // Clear implicit mantissa bit and shift into place.
+ hi = (hi << (lz + 1)) | (lo >> (64 - (lz + 1)))
+ hi >>= 64 - SHIFT
+ // Include the exponent and convert to a float.
+ hi |= e << SHIFT
+ z = transmute(f64)hi
+ // Map zeros to origin.
+ if j&1 == 1 {
+ j += 1
+ j &= 7
+ z -= 1
+ }
+ // Multiply the fractional part by pi/4.
+ return j, z * PI4
+}
diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin
index 603c2a6c7..7767740c9 100644
--- a/core/mem/allocators.odin
+++ b/core/mem/allocators.odin
@@ -813,22 +813,22 @@ panic_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
switch mode {
case .Alloc:
if size > 0 {
- panic("mem: panic allocator, .Alloc called")
+ panic("mem: panic allocator, .Alloc called", loc=loc)
}
case .Alloc_Non_Zeroed:
if size > 0 {
- panic("mem: panic allocator, .Alloc_Non_Zeroed called")
+ panic("mem: panic allocator, .Alloc_Non_Zeroed called", loc=loc)
}
case .Resize:
if size > 0 {
- panic("mem: panic allocator, .Resize called")
+ panic("mem: panic allocator, .Resize called", loc=loc)
}
case .Free:
if old_memory != nil {
- panic("mem: panic allocator, .Free called")
+ panic("mem: panic allocator, .Free called", loc=loc)
}
case .Free_All:
- panic("mem: panic allocator, .Free_All called")
+ panic("mem: panic allocator, .Free_All called", loc=loc)
case .Query_Features:
set := (^Allocator_Mode_Set)(old_memory)
@@ -838,7 +838,7 @@ panic_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
return nil, nil
case .Query_Info:
- panic("mem: panic allocator, .Query_Info called")
+ panic("mem: panic allocator, .Query_Info called", loc=loc)
}
return nil, nil
diff --git a/core/mem/virtual/arena.odin b/core/mem/virtual/arena.odin
index 027a6ce6e..cfd35ab05 100644
--- a/core/mem/virtual/arena.odin
+++ b/core/mem/virtual/arena.odin
@@ -120,7 +120,7 @@ arena_alloc :: proc(arena: ^Arena, size: uint, alignment: uint, loc := #caller_l
if arena.minimum_block_size == 0 {
arena.minimum_block_size = DEFAULT_ARENA_STATIC_RESERVE_SIZE
}
- arena_init_static(arena=arena, reserved=arena.minimum_block_size, commit_size=DEFAULT_ARENA_STATIC_COMMIT_SIZE) or_return
+ arena_init_static(arena, reserved=arena.minimum_block_size, commit_size=DEFAULT_ARENA_STATIC_COMMIT_SIZE) or_return
}
fallthrough
case .Buffer:
@@ -242,7 +242,7 @@ arena_growing_bootstrap_new_by_name :: proc($T: typeid, $field_name: string, min
return arena_growing_bootstrap_new_by_offset(T, offset_of_by_string(T, field_name), minimum_block_size)
}
-// Ability to bootstrap allocate a struct with an arena within the struct itself using the growing variant strategy.
+// Ability to bootstrap allocate a struct with an arena within the struct itself using the static variant strategy.
@(require_results)
arena_static_bootstrap_new_by_offset :: proc($T: typeid, offset_to_arena: uintptr, reserved: uint) -> (ptr: ^T, err: Allocator_Error) {
bootstrap: Arena
@@ -258,7 +258,7 @@ arena_static_bootstrap_new_by_offset :: proc($T: typeid, offset_to_arena: uintpt
return
}
-// Ability to bootstrap allocate a struct with an arena within the struct itself using the growing variant strategy.
+// Ability to bootstrap allocate a struct with an arena within the struct itself using the static variant strategy.
@(require_results)
arena_static_bootstrap_new_by_name :: proc($T: typeid, $field_name: string, reserved: uint) -> (ptr: ^T, err: Allocator_Error) {
return arena_static_bootstrap_new_by_offset(T, offset_of_by_string(T, field_name), reserved)
@@ -271,7 +271,7 @@ arena_allocator :: proc(arena: ^Arena) -> mem.Allocator {
return mem.Allocator{arena_allocator_proc, arena}
}
-// The allocator procedured by an `Allocator` produced by `arena_allocator`
+// The allocator procedure used by an `Allocator` produced by `arena_allocator`
arena_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,
size, alignment: int,
old_memory: rawptr, old_size: int,
@@ -328,7 +328,7 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,
-// An `Arena_Temp` is a way to produce temporary watermarks to reset a arena to a previous state.
+// An `Arena_Temp` is a way to produce temporary watermarks to reset an arena to a previous state.
// All uses of an `Arena_Temp` must be handled by ending them with `arena_temp_end` or ignoring them with `arena_temp_ignore`.
Arena_Temp :: struct {
arena: ^Arena,
diff --git a/core/mem/virtual/virtual_bsd.odin b/core/mem/virtual/virtual_bsd.odin
new file mode 100644
index 000000000..103e48074
--- /dev/null
+++ b/core/mem/virtual/virtual_bsd.odin
@@ -0,0 +1,24 @@
+//+build freebsd, openbsd
+//+private
+package mem_virtual
+
+
+
+_reserve :: proc "contextless" (size: uint) -> (data: []byte, err: Allocator_Error) {
+ return nil, nil
+}
+
+_commit :: proc "contextless" (data: rawptr, size: uint) -> Allocator_Error {
+ return nil
+}
+_decommit :: proc "contextless" (data: rawptr, size: uint) {
+}
+_release :: proc "contextless" (data: rawptr, size: uint) {
+}
+_protect :: proc "contextless" (data: rawptr, size: uint, flags: Protect_Flags) -> bool {
+ return false
+}
+
+_platform_memory_init :: proc() {
+
+}
diff --git a/core/net/dns_unix.odin b/core/net/dns_unix.odin
index bbecc7476..e9b7bd066 100644
--- a/core/net/dns_unix.odin
+++ b/core/net/dns_unix.odin
@@ -44,9 +44,6 @@ _get_dns_records_os :: proc(hostname: string, type: DNS_Record_Type, allocator :
if !hosts_ok {
return nil, .Invalid_Hosts_Config_Error
}
- if len(hosts) == 0 {
- return
- }
host_overrides := make([dynamic]DNS_Record)
for host in hosts {
@@ -80,4 +77,4 @@ _get_dns_records_os :: proc(hostname: string, type: DNS_Record_Type, allocator :
}
return get_dns_records_from_nameservers(hostname, type, name_servers, host_overrides[:])
-}
\ No newline at end of file
+}
diff --git a/core/net/socket_darwin.odin b/core/net/socket_darwin.odin
index f00be9915..081892afd 100644
--- a/core/net/socket_darwin.odin
+++ b/core/net/socket_darwin.odin
@@ -268,9 +268,9 @@ _set_option :: proc(s: Any_Socket, option: Socket_Option, value: any, loc := #ca
t, ok := value.(time.Duration)
if !ok do panic("set_option() value must be a time.Duration here", loc)
- nanos := time.duration_nanoseconds(t)
- timeval_value.nanoseconds = int(nanos % 1e9)
- timeval_value.seconds = (nanos - i64(timeval_value.nanoseconds)) / 1e9
+ micros := i64(time.duration_microseconds(t))
+ timeval_value.microseconds = int(micros % 1e6)
+ timeval_value.seconds = (micros - i64(timeval_value.microseconds)) / 1e6
ptr = &timeval_value
len = size_of(timeval_value)
@@ -368,4 +368,4 @@ _sockaddr_to_endpoint :: proc(native_addr: ^os.SOCKADDR_STORAGE_LH) -> (ep: Endp
panic("native_addr is neither IP4 or IP6 address")
}
return
-}
\ No newline at end of file
+}
diff --git a/core/net/socket_linux.odin b/core/net/socket_linux.odin
index 690e09ab7..b7141e8ba 100644
--- a/core/net/socket_linux.odin
+++ b/core/net/socket_linux.odin
@@ -283,9 +283,9 @@ _set_option :: proc(s: Any_Socket, option: Socket_Option, value: any, loc := #ca
t, ok := value.(time.Duration)
if !ok do panic("set_option() value must be a time.Duration here", loc)
- nanos := time.duration_nanoseconds(t)
- timeval_value.nanoseconds = int(nanos % 1e9)
- timeval_value.seconds = (nanos - i64(timeval_value.nanoseconds)) / 1e9
+ micros := i64(time.duration_microseconds(t))
+ timeval_value.microseconds = int(micros % 1e6)
+ timeval_value.seconds = (micros - i64(timeval_value.microseconds)) / 1e6
ptr = &timeval_value
len = size_of(timeval_value)
@@ -404,4 +404,4 @@ _sockaddr_basic_to_endpoint :: proc(native_addr: ^os.SOCKADDR) -> (ep: Endpoint)
panic("native_addr is neither IP4 or IP6 address")
}
return
-}
\ No newline at end of file
+}
diff --git a/core/os/os2/errors_windows.odin b/core/os/os2/errors_windows.odin
index 27c16e72e..6500e7ccc 100644
--- a/core/os/os2/errors_windows.odin
+++ b/core/os/os2/errors_windows.odin
@@ -37,14 +37,18 @@ _get_platform_error :: proc() -> Error {
case win32.ERROR_NOT_SUPPORTED:
return .Unsupported
+ case win32.ERROR_HANDLE_EOF:
+ return .EOF
+
+ case win32.ERROR_INVALID_HANDLE:
+ return .Invalid_File
+
case
win32.ERROR_BAD_ARGUMENTS,
win32.ERROR_INVALID_PARAMETER,
win32.ERROR_NOT_ENOUGH_MEMORY,
- win32.ERROR_INVALID_HANDLE,
win32.ERROR_NO_MORE_FILES,
win32.ERROR_LOCK_VIOLATION,
- win32.ERROR_HANDLE_EOF,
win32.ERROR_BROKEN_PIPE,
win32.ERROR_CALL_NOT_IMPLEMENTED,
win32.ERROR_INSUFFICIENT_BUFFER,
diff --git a/core/os/os2/file.odin b/core/os/os2/file.odin
index eb6d9e366..da822374a 100644
--- a/core/os/os2/file.odin
+++ b/core/os/os2/file.odin
@@ -8,12 +8,6 @@ File :: struct {
impl: _File,
}
-Seek_From :: enum {
- Start = 0, // seek relative to the origin of the file
- Current = 1, // seek relative to the current offset
- End = 2, // seek relative to the end
-}
-
File_Mode :: distinct u32
File_Mode_Dir :: File_Mode(1<<16)
File_Mode_Named_Pipe :: File_Mode(1<<17)
@@ -72,54 +66,68 @@ fd :: proc(f: ^File) -> uintptr {
return _fd(f)
}
-
-close :: proc(f: ^File) -> Error {
- return _close(f)
-}
-
name :: proc(f: ^File) -> string {
return _name(f)
}
-seek :: proc(f: ^File, offset: i64, whence: Seek_From) -> (ret: i64, err: Error) {
- return _seek(f, offset, whence)
+close :: proc(f: ^File) -> Error {
+ if f != nil {
+ return io.close(f.impl.stream)
+ }
+ return nil
+}
+
+seek :: proc(f: ^File, offset: i64, whence: io.Seek_From) -> (ret: i64, err: Error) {
+ if f != nil {
+ return io.seek(f.impl.stream, offset, whence)
+ }
+ return 0, .Invalid_File
}
read :: proc(f: ^File, p: []byte) -> (n: int, err: Error) {
- return _read(f, p)
+ if f != nil {
+ return io.read(f.impl.stream, p)
+ }
+ return 0, .Invalid_File
}
read_at :: proc(f: ^File, p: []byte, offset: i64) -> (n: int, err: Error) {
- return _read_at(f, p, offset)
-}
-
-read_from :: proc(f: ^File, r: io.Reader) -> (n: i64, err: Error) {
- return _read_from(f, r)
+ if f != nil {
+ return io.read_at(f.impl.stream, p, offset)
+ }
+ return 0, .Invalid_File
}
write :: proc(f: ^File, p: []byte) -> (n: int, err: Error) {
- return _write(f, p)
+ if f != nil {
+ return io.write(f.impl.stream, p)
+ }
+ return 0, .Invalid_File
}
write_at :: proc(f: ^File, p: []byte, offset: i64) -> (n: int, err: Error) {
- return _write_at(f, p, offset)
-}
-
-write_to :: proc(f: ^File, w: io.Writer) -> (n: i64, err: Error) {
- return _write_to(f, w)
+ if f != nil {
+ return io.write_at(f.impl.stream, p, offset)
+ }
+ return 0, .Invalid_File
}
file_size :: proc(f: ^File) -> (n: i64, err: Error) {
- return _file_size(f)
-}
-
-
-sync :: proc(f: ^File) -> Error {
- return _sync(f)
+ if f != nil {
+ return io.size(f.impl.stream)
+ }
+ return 0, .Invalid_File
}
flush :: proc(f: ^File) -> Error {
- return _flush(f)
+ if f != nil {
+ return io.flush(f.impl.stream)
+ }
+ return nil
+}
+
+sync :: proc(f: ^File) -> Error {
+ return _sync(f)
}
truncate :: proc(f: ^File, size: i64) -> Error {
diff --git a/core/os/os2/file_linux.odin b/core/os/os2/file_linux.odin
index 890bbfc43..ddd827bce 100644
--- a/core/os/os2/file_linux.odin
+++ b/core/os/os2/file_linux.odin
@@ -33,6 +33,8 @@ _File :: struct {
name: string,
fd: int,
allocator: runtime.Allocator,
+
+ stream: io.Stream,
}
_file_allocator :: proc() -> runtime.Allocator {
@@ -73,6 +75,10 @@ _new_file :: proc(fd: uintptr, _: string) -> ^File {
file.impl.fd = int(fd)
file.impl.allocator = _file_allocator()
file.impl.name = _get_full_path(file.impl.fd, file.impl.allocator)
+ file.impl.stream = {
+ data = file,
+ procedure = _file_stream_proc,
+ }
return file
}
@@ -102,7 +108,7 @@ _name :: proc(f: ^File) -> string {
return f.impl.name if f != nil else ""
}
-_seek :: proc(f: ^File, offset: i64, whence: Seek_From) -> (ret: i64, err: Error) {
+_seek :: proc(f: ^File, offset: i64, whence: io.Seek_From) -> (ret: i64, err: Error) {
res := unix.sys_lseek(f.impl.fd, offset, int(whence))
if res < 0 {
return -1, _get_platform_error(int(res))
@@ -110,18 +116,18 @@ _seek :: proc(f: ^File, offset: i64, whence: Seek_From) -> (ret: i64, err: Error
return res, nil
}
-_read :: proc(f: ^File, p: []byte) -> (n: int, err: Error) {
+_read :: proc(f: ^File, p: []byte) -> (i64, Error) {
if len(p) == 0 {
return 0, nil
}
- n = unix.sys_read(f.impl.fd, &p[0], len(p))
+ n := unix.sys_read(f.impl.fd, &p[0], len(p))
if n < 0 {
return -1, _get_platform_error(n)
}
- return n, nil
+ return i64(n), nil
}
-_read_at :: proc(f: ^File, p: []byte, offset: i64) -> (n: int, err: Error) {
+_read_at :: proc(f: ^File, p: []byte, offset: i64) -> (n: i64, err: Error) {
if offset < 0 {
return 0, .Invalid_Offset
}
@@ -132,30 +138,25 @@ _read_at :: proc(f: ^File, p: []byte, offset: i64) -> (n: int, err: Error) {
if m < 0 {
return -1, _get_platform_error(m)
}
- n += m
+ n += i64(m)
b = b[m:]
offset += i64(m)
}
return
}
-_read_from :: proc(f: ^File, r: io.Reader) -> (n: i64, err: Error) {
- //TODO
- return
-}
-
-_write :: proc(f: ^File, p: []byte) -> (n: int, err: Error) {
+_write :: proc(f: ^File, p: []byte) -> (i64, Error) {
if len(p) == 0 {
return 0, nil
}
- n = unix.sys_write(f.impl.fd, &p[0], uint(len(p)))
+ n := unix.sys_write(f.impl.fd, &p[0], uint(len(p)))
if n < 0 {
return -1, _get_platform_error(n)
}
- return int(n), nil
+ return i64(n), nil
}
-_write_at :: proc(f: ^File, p: []byte, offset: i64) -> (n: int, err: Error) {
+_write_at :: proc(f: ^File, p: []byte, offset: i64) -> (n: i64, err: Error) {
if offset < 0 {
return 0, .Invalid_Offset
}
@@ -166,18 +167,13 @@ _write_at :: proc(f: ^File, p: []byte, offset: i64) -> (n: int, err: Error) {
if m < 0 {
return -1, _get_platform_error(m)
}
- n += m
+ n += i64(m)
b = b[m:]
offset += i64(m)
}
return
}
-_write_to :: proc(f: ^File, w: io.Writer) -> (n: i64, err: Error) {
- //TODO
- return
-}
-
_file_size :: proc(f: ^File) -> (n: i64, err: Error) {
s: _Stat = ---
res := unix.sys_fstat(f.impl.fd, &s)
@@ -366,3 +362,49 @@ _is_dir_fd :: proc(fd: int) -> bool {
_temp_name_to_cstring :: proc(name: string) -> (cname: cstring) {
return strings.clone_to_cstring(name, context.temp_allocator)
}
+
+
+@(private="package")
+_file_stream_proc :: proc(stream_data: rawptr, mode: io.Stream_Mode, p: []byte, offset: i64, whence: io.Seek_From) -> (n: i64, err: io.Error) {
+ f := (^File)(stream_data)
+ ferr: Error
+ i: int
+ switch mode {
+ case .Read:
+ n, ferr = _read(f, p)
+ err = error_to_io_error(ferr)
+ return
+ case .Read_At:
+ n, ferr = _read_at(f, p, offset)
+ err = error_to_io_error(ferr)
+ return
+ case .Write:
+ n, ferr = _write(f, p)
+ err = error_to_io_error(ferr)
+ return
+ case .Write_At:
+ n, ferr = _write_at(f, p, offset)
+ err = error_to_io_error(ferr)
+ return
+ case .Seek:
+ n, ferr = _seek(f, offset, whence)
+ err = error_to_io_error(ferr)
+ return
+ case .Size:
+ n, ferr = _file_size(f)
+ err = error_to_io_error(ferr)
+ return
+ case .Flush:
+ ferr = _flush(f)
+ err = error_to_io_error(ferr)
+ return
+ case .Close, .Destroy:
+ ferr = _close(f)
+ err = error_to_io_error(ferr)
+ return
+ case .Query:
+ return io.query_utility({.Read, .Read_At, .Write, .Write_At, .Seek, .Size, .Flush, .Close, .Destroy, .Query})
+ }
+ return 0, .Empty
+}
+
diff --git a/core/os/os2/file_stream.odin b/core/os/os2/file_stream.odin
index 7edbd68fa..da1e3344f 100644
--- a/core/os/os2/file_stream.odin
+++ b/core/os/os2/file_stream.odin
@@ -3,17 +3,15 @@ package os2
import "core:io"
to_stream :: proc(f: ^File) -> (s: io.Stream) {
- s.stream_data = f
- s.stream_vtable = &_file_stream_vtable
+ if f != nil {
+ assert(f.impl.stream.procedure != nil)
+ s = f.impl.stream
+ }
return
}
-to_writer :: proc(f: ^File) -> (s: io.Writer) {
- return {to_stream(f)}
-}
-to_reader :: proc(f: ^File) -> (s: io.Reader) {
- return {to_stream(f)}
-}
+to_writer :: to_stream
+to_reader :: to_stream
@(private)
@@ -23,71 +21,3 @@ error_to_io_error :: proc(ferr: Error) -> io.Error {
}
return ferr.(io.Error) or_else .Unknown
}
-
-
-@(private)
-_file_stream_vtable := io.Stream_VTable{
- impl_read = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
- f := (^File)(s.stream_data)
- ferr: Error
- n, ferr = read(f, p)
- err = error_to_io_error(ferr)
- return
- },
- impl_read_at = proc(s: io.Stream, p: []byte, offset: i64) -> (n: int, err: io.Error) {
- f := (^File)(s.stream_data)
- ferr: Error
- n, ferr = read_at(f, p, offset)
- err = error_to_io_error(ferr)
- return
- },
- impl_write_to = proc(s: io.Stream, w: io.Writer) -> (n: i64, err: io.Error) {
- f := (^File)(s.stream_data)
- ferr: Error
- n, ferr = write_to(f, w)
- err = error_to_io_error(ferr)
- return
- },
- impl_write = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
- f := (^File)(s.stream_data)
- ferr: Error
- n, ferr = write(f, p)
- err = error_to_io_error(ferr)
- return
- },
- impl_write_at = proc(s: io.Stream, p: []byte, offset: i64) -> (n: int, err: io.Error) {
- f := (^File)(s.stream_data)
- ferr: Error
- n, ferr = write_at(f, p, offset)
- err = error_to_io_error(ferr)
- return
- },
- impl_read_from = proc(s: io.Stream, r: io.Reader) -> (n: i64, err: io.Error) {
- f := (^File)(s.stream_data)
- ferr: Error
- n, ferr = read_from(f, r)
- err = error_to_io_error(ferr)
- return
- },
- impl_seek = proc(s: io.Stream, offset: i64, whence: io.Seek_From) -> (i64, io.Error) {
- f := (^File)(s.stream_data)
- n, ferr := seek(f, offset, Seek_From(whence))
- err := error_to_io_error(ferr)
- return n, err
- },
- impl_size = proc(s: io.Stream) -> i64 {
- f := (^File)(s.stream_data)
- sz, _ := file_size(f)
- return sz
- },
- impl_flush = proc(s: io.Stream) -> io.Error {
- f := (^File)(s.stream_data)
- ferr := flush(f)
- return error_to_io_error(ferr)
- },
- impl_close = proc(s: io.Stream) -> io.Error {
- f := (^File)(s.stream_data)
- ferr := close(f)
- return error_to_io_error(ferr)
- },
-}
diff --git a/core/os/os2/file_windows.odin b/core/os/os2/file_windows.odin
index e4ae4856a..600ecde21 100644
--- a/core/os/os2/file_windows.odin
+++ b/core/os/os2/file_windows.odin
@@ -38,6 +38,8 @@ _File :: struct {
wname: win32.wstring,
kind: _File_Kind,
+ stream: io.Stream,
+
allocator: runtime.Allocator,
rw_mutex: sync.RW_Mutex, // read write calls
@@ -144,6 +146,11 @@ _new_file :: proc(handle: uintptr, name: string) -> ^File {
}
f.impl.kind = kind
+ f.impl.stream = {
+ data = f,
+ procedure = _file_stream_proc,
+ }
+
return f
}
@@ -181,7 +188,7 @@ _name :: proc(f: ^File) -> string {
return f.impl.name if f != nil else ""
}
-_seek :: proc(f: ^File, offset: i64, whence: Seek_From) -> (ret: i64, err: Error) {
+_seek :: proc(f: ^File, offset: i64, whence: io.Seek_From) -> (ret: i64, err: Error) {
handle := _handle(f)
if handle == win32.INVALID_HANDLE {
return 0, .Invalid_File
@@ -208,7 +215,7 @@ _seek :: proc(f: ^File, offset: i64, whence: Seek_From) -> (ret: i64, err: Error
return i64(hi)<<32 + i64(dw_ptr), nil
}
-_read :: proc(f: ^File, p: []byte) -> (n: int, err: Error) {
+_read :: proc(f: ^File, p: []byte) -> (n: i64, err: Error) {
read_console :: proc(handle: win32.HANDLE, b: []byte) -> (n: int, err: Error) {
if len(b) == 0 {
return 0, nil
@@ -274,7 +281,7 @@ _read :: proc(f: ^File, p: []byte) -> (n: int, err: Error) {
n, err := read_console(handle, p[total_read:][:to_read])
total_read += n
if err != nil {
- return int(total_read), err
+ return i64(total_read), err
}
} else {
ok = win32.ReadFile(handle, &p[total_read], to_read, &single_read_length, nil)
@@ -287,11 +294,11 @@ _read :: proc(f: ^File, p: []byte) -> (n: int, err: Error) {
}
}
- return int(total_read), nil
+ return i64(total_read), err
}
-_read_at :: proc(f: ^File, p: []byte, offset: i64) -> (n: int, err: Error) {
- pread :: proc(f: ^File, data: []byte, offset: i64) -> (n: int, err: Error) {
+_read_at :: proc(f: ^File, p: []byte, offset: i64) -> (n: i64, err: Error) {
+ pread :: proc(f: ^File, data: []byte, offset: i64) -> (n: i64, err: Error) {
buf := data
if len(buf) > MAX_RW {
buf = buf[:MAX_RW]
@@ -313,7 +320,7 @@ _read_at :: proc(f: ^File, p: []byte, offset: i64) -> (n: int, err: Error) {
err = _get_platform_error()
done = 0
}
- n = int(done)
+ n = i64(done)
return
}
@@ -329,12 +336,7 @@ _read_at :: proc(f: ^File, p: []byte, offset: i64) -> (n: int, err: Error) {
return
}
-_read_from :: proc(f: ^File, r: io.Reader) -> (n: i64, err: Error) {
- // TODO(bill)
- return
-}
-
-_write :: proc(f: ^File, p: []byte) -> (n: int, err: Error) {
+_write :: proc(f: ^File, p: []byte) -> (n: i64, err: Error) {
if len(p) == 0 {
return
}
@@ -352,17 +354,17 @@ _write :: proc(f: ^File, p: []byte) -> (n: int, err: Error) {
e := win32.WriteFile(handle, &p[total_write], to_write, &single_write_length, nil)
if single_write_length <= 0 || !e {
- n = int(total_write)
+ n = i64(total_write)
err = _get_platform_error()
return
}
total_write += i64(single_write_length)
}
- return int(total_write), nil
+ return i64(total_write), nil
}
-_write_at :: proc(f: ^File, p: []byte, offset: i64) -> (n: int, err: Error) {
- pwrite :: proc(f: ^File, data: []byte, offset: i64) -> (n: int, err: Error) {
+_write_at :: proc(f: ^File, p: []byte, offset: i64) -> (n: i64, err: Error) {
+ pwrite :: proc(f: ^File, data: []byte, offset: i64) -> (n: i64, err: Error) {
buf := data
if len(buf) > MAX_RW {
buf = buf[:MAX_RW]
@@ -382,7 +384,7 @@ _write_at :: proc(f: ^File, p: []byte, offset: i64) -> (n: int, err: Error) {
err = _get_platform_error()
done = 0
}
- n = int(done)
+ n = i64(done)
return
}
@@ -397,11 +399,6 @@ _write_at :: proc(f: ^File, p: []byte, offset: i64) -> (n: int, err: Error) {
return
}
-_write_to :: proc(f: ^File, w: io.Writer) -> (n: i64, err: Error) {
- // TODO(bill)
- return
-}
-
_file_size :: proc(f: ^File) -> (n: i64, err: Error) {
length: win32.LARGE_INTEGER
handle := _handle(f)
@@ -727,3 +724,51 @@ _is_dir :: proc(path: string) -> bool {
}
return false
}
+
+
+@(private="package")
+_file_stream_proc :: proc(stream_data: rawptr, mode: io.Stream_Mode, p: []byte, offset: i64, whence: io.Seek_From) -> (n: i64, err: io.Error) {
+ f := (^File)(stream_data)
+ ferr: Error
+ i: int
+ switch mode {
+ case .Read:
+ n, ferr = _read(f, p)
+ err = error_to_io_error(ferr)
+ return
+ case .Read_At:
+ n, ferr = _read_at(f, p, offset)
+ err = error_to_io_error(ferr)
+ return
+ case .Write:
+ n, ferr = _write(f, p)
+ err = error_to_io_error(ferr)
+ return
+ case .Write_At:
+ n, ferr = _write_at(f, p, offset)
+ err = error_to_io_error(ferr)
+ return
+ case .Seek:
+ n, ferr = _seek(f, offset, whence)
+ err = error_to_io_error(ferr)
+ return
+ case .Size:
+ n, ferr = _file_size(f)
+ err = error_to_io_error(ferr)
+ return
+ case .Flush:
+ ferr = _flush(f)
+ err = error_to_io_error(ferr)
+ return
+ case .Close:
+ ferr = _close(f)
+ err = error_to_io_error(ferr)
+ return
+ case .Query:
+ return io.query_utility({.Read, .Read_At, .Write, .Write_At, .Seek, .Size, .Flush, .Close, .Query})
+ case .Destroy:
+ return 0, .Empty
+ }
+ return 0, .Empty
+}
+
diff --git a/core/os/os2/path_windows.odin b/core/os/os2/path_windows.odin
index 2dc667822..a2306784e 100644
--- a/core/os/os2/path_windows.odin
+++ b/core/os/os2/path_windows.odin
@@ -23,7 +23,7 @@ _mkdir_all :: proc(path: string, perm: File_Mode) -> Error {
fix_root_directory :: proc(p: string) -> (s: string, allocated: bool, err: runtime.Allocator_Error) {
if len(p) == len(`\\?\c:`) {
if is_path_separator(p[0]) && is_path_separator(p[1]) && p[2] == '?' && is_path_separator(p[3]) && p[5] == ':' {
- s = strings.concatenate_safe({p, `\`}, _file_allocator()) or_return
+ s = strings.concatenate({p, `\`}, _file_allocator()) or_return
allocated = true
return
}
diff --git a/core/os/os_darwin.odin b/core/os/os_darwin.odin
index a2d68aeed..d8ba40fd0 100644
--- a/core/os/os_darwin.odin
+++ b/core/os/os_darwin.odin
@@ -314,15 +314,16 @@ Dirent :: struct {
Dir :: distinct rawptr // DIR*
+ADDRESS_FAMILY :: c.char
SOCKADDR :: struct #packed {
len: c.char,
- family: c.char,
+ family: ADDRESS_FAMILY,
sa_data: [14]c.char,
}
SOCKADDR_STORAGE_LH :: struct #packed {
len: c.char,
- family: c.char,
+ family: ADDRESS_FAMILY,
__ss_pad1: [6]c.char,
__ss_align: i64,
__ss_pad2: [112]c.char,
@@ -330,7 +331,7 @@ SOCKADDR_STORAGE_LH :: struct #packed {
sockaddr_in :: struct #packed {
sin_len: c.char,
- sin_family: c.char,
+ sin_family: ADDRESS_FAMILY,
sin_port: u16be,
sin_addr: in_addr,
sin_zero: [8]c.char,
@@ -338,7 +339,7 @@ sockaddr_in :: struct #packed {
sockaddr_in6 :: struct #packed {
sin6_len: c.char,
- sin6_family: c.char,
+ sin6_family: ADDRESS_FAMILY,
sin6_port: u16be,
sin6_flowinfo: c.uint,
sin6_addr: in6_addr,
@@ -355,7 +356,7 @@ in6_addr :: struct #packed {
Timeval :: struct {
seconds: i64,
- nanoseconds: int,
+ microseconds: int,
}
Linger :: struct {
@@ -440,7 +441,7 @@ foreign libc {
@(link_name="closedir") _unix_closedir :: proc(dirp: Dir) -> c.int ---
@(link_name="rewinddir") _unix_rewinddir :: proc(dirp: Dir) ---
- @(link_name="__fcntl") _unix__fcntl :: proc(fd: Handle, cmd: c.int, #c_vararg args: ..any) -> c.int ---
+ @(link_name="__fcntl") _unix__fcntl :: proc(fd: Handle, cmd: c.int, arg: uintptr) -> c.int ---
@(link_name="rename") _unix_rename :: proc(old: cstring, new: cstring) -> c.int ---
@(link_name="remove") _unix_remove :: proc(path: cstring) -> c.int ---
@@ -794,14 +795,14 @@ _readlink :: proc(path: string) -> (string, Errno) {
}
absolute_path_from_handle :: proc(fd: Handle) -> (string, Errno) {
- buf : [256]byte
- res := _unix__fcntl(fd, F_GETPATH, &buf[0])
- if res != 0 {
- return "", Errno(get_last_error())
+ buf: [DARWIN_MAXPATHLEN]byte
+ _, err := fcntl(int(fd), F_GETPATH, int(uintptr(&buf[0])))
+ if err != ERROR_NONE {
+ return "", err
}
path := strings.clone_from_cstring(cstring(&buf[0]))
- return path, ERROR_NONE
+ return path, err
}
absolute_path_from_relative :: proc(rel: string) -> (path: string, err: Errno) {
@@ -1068,7 +1069,7 @@ shutdown :: proc(sd: Socket, how: int) -> (Errno) {
}
fcntl :: proc(fd: int, cmd: int, arg: int) -> (int, Errno) {
- result := _unix__fcntl(Handle(fd), c.int(cmd), c.int(arg))
+ result := _unix__fcntl(Handle(fd), c.int(cmd), uintptr(arg))
if result < 0 {
return 0, Errno(get_last_error())
}
diff --git a/core/os/os_linux.odin b/core/os/os_linux.odin
index 3dc48087a..1a4c1fddb 100644
--- a/core/os/os_linux.odin
+++ b/core/os/os_linux.odin
@@ -241,7 +241,7 @@ socklen_t :: c.int
Timeval :: struct {
seconds: i64,
- nanoseconds: int,
+ microseconds: int,
}
// "Argv" arguments converted to Odin strings
@@ -432,6 +432,14 @@ AT_FDCWD :: ~uintptr(99) /* -100 */
AT_REMOVEDIR :: uintptr(0x200)
AT_SYMLINK_NOFOLLOW :: uintptr(0x100)
+pollfd :: struct {
+ fd: c.int,
+ events: c.short,
+ revents: c.short,
+}
+
+sigset_t :: distinct u64
+
foreign libc {
@(link_name="__errno_location") __errno_location :: proc() -> ^int ---
@@ -450,6 +458,7 @@ foreign libc {
@(link_name="execvp") _unix_execvp :: proc(path: cstring, argv: [^]cstring) -> int ---
@(link_name="getenv") _unix_getenv :: proc(cstring) -> cstring ---
@(link_name="putenv") _unix_putenv :: proc(cstring) -> c.int ---
+ @(link_name="setenv") _unix_setenv :: proc(key: cstring, value: cstring, overwrite: c.int) -> c.int ---
@(link_name="realpath") _unix_realpath :: proc(path: cstring, resolved_path: rawptr) -> rawptr ---
@(link_name="exit") _unix_exit :: proc(status: c.int) -> ! ---
@@ -885,8 +894,10 @@ get_env :: proc(key: string, allocator := context.allocator) -> (value: string)
set_env :: proc(key, value: string) -> Errno {
runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
- s := strings.concatenate({key, "=", value, "\x00"}, context.temp_allocator)
- res := _unix_putenv(strings.unsafe_string_to_cstring(s))
+ key_cstring := strings.clone_to_cstring(key, context.temp_allocator)
+ value_cstring := strings.clone_to_cstring(value, context.temp_allocator)
+ // NOTE(GoNZooo): `setenv` instead of `putenv` because it copies both key and value more commonly
+ res := _unix_setenv(key_cstring, value_cstring, 1)
if res < 0 {
return Errno(get_last_error())
}
@@ -1086,4 +1097,20 @@ fcntl :: proc(fd: int, cmd: int, arg: int) -> (int, Errno) {
return 0, _get_errno(result)
}
return result, ERROR_NONE
-}
\ No newline at end of file
+}
+
+poll :: proc(fds: []pollfd, timeout: int) -> (int, Errno) {
+ result := unix.sys_poll(raw_data(fds), uint(len(fds)), timeout)
+ if result < 0 {
+ return 0, _get_errno(result)
+ }
+ return result, ERROR_NONE
+}
+
+ppoll :: proc(fds: []pollfd, timeout: ^unix.timespec, sigmask: ^sigset_t) -> (int, Errno) {
+ result := unix.sys_ppoll(raw_data(fds), uint(len(fds)), timeout, sigmask, size_of(sigset_t))
+ if result < 0 {
+ return 0, _get_errno(result)
+ }
+ return result, ERROR_NONE
+}
diff --git a/core/os/stream.odin b/core/os/stream.odin
index 9506ed3a3..2b4c83663 100644
--- a/core/os/stream.odin
+++ b/core/os/stream.odin
@@ -4,66 +4,60 @@ import "core:io"
stream_from_handle :: proc(fd: Handle) -> io.Stream {
s: io.Stream
- s.stream_data = rawptr(uintptr(fd))
- s.stream_vtable = &_file_stream_vtable
+ s.data = rawptr(uintptr(fd))
+ s.procedure = _file_stream_proc
return s
}
@(private)
-_file_stream_vtable := io.Stream_VTable{
- impl_read = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
- fd := Handle(uintptr(s.stream_data))
- os_err: Errno
- n, os_err = read(fd, p)
- return
- },
- impl_read_at = proc(s: io.Stream, p: []byte, offset: i64) -> (n: int, err: io.Error) {
- when ODIN_OS == .Windows || ODIN_OS == .WASI {
- fd := Handle(uintptr(s.stream_data))
- os_err: Errno
- n, os_err = read_at(fd, p, offset)
- }
- return
- },
- impl_write = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
- fd := Handle(uintptr(s.stream_data))
- os_err: Errno
- n, os_err = write(fd, p)
- return
- },
- impl_write_at = proc(s: io.Stream, p: []byte, offset: i64) -> (n: int, err: io.Error) {
- when ODIN_OS == .Windows || ODIN_OS == .WASI {
- fd := Handle(uintptr(s.stream_data))
- os_err: Errno
- n, os_err = write_at(fd, p, offset)
- _ = os_err
- }
- return
- },
- impl_seek = proc(s: io.Stream, offset: i64, whence: io.Seek_From) -> (i64, io.Error) {
- fd := Handle(uintptr(s.stream_data))
- n, os_err := seek(fd, offset, int(whence))
- _ = os_err
- return n, nil
- },
- impl_size = proc(s: io.Stream) -> i64 {
- fd := Handle(uintptr(s.stream_data))
- sz, _ := file_size(fd)
- return sz
- },
- impl_flush = proc(s: io.Stream) -> io.Error {
+_file_stream_proc :: proc(stream_data: rawptr, mode: io.Stream_Mode, p: []byte, offset: i64, whence: io.Seek_From) -> (n: i64, err: io.Error) {
+ fd := Handle(uintptr(stream_data))
+ n_int: int
+ os_err: Errno
+ switch mode {
+ case .Close:
+ close(fd)
+ case .Flush:
when ODIN_OS == .Windows {
- fd := Handle(uintptr(s.stream_data))
flush(fd)
} else {
// TOOD(bill): other operating systems
}
- return nil
- },
- impl_close = proc(s: io.Stream) -> io.Error {
- fd := Handle(uintptr(s.stream_data))
- close(fd)
- return nil
- },
+ case .Read:
+ n_int, os_err = read(fd, p)
+ n = i64(n_int)
+ if os_err != 0 {
+ err = .Unknown
+ }
+ case .Read_At:
+ when !(ODIN_OS == .FreeBSD || ODIN_OS == .OpenBSD) {
+ n_int, os_err = read_at(fd, p, offset)
+ n = i64(n_int)
+ }
+ case .Write:
+ n_int, os_err = write(fd, p)
+ n = i64(n_int)
+ case .Write_At:
+ when !(ODIN_OS == .FreeBSD || ODIN_OS == .OpenBSD) {
+ n_int, os_err = write_at(fd, p, offset)
+ n = i64(n_int)
+ }
+ case .Seek:
+ n, os_err = seek(fd, offset, int(whence))
+ case .Size:
+ n, os_err = file_size(fd)
+ case .Destroy:
+ err = .Empty
+ case .Query:
+ when ODIN_OS == .FreeBSD || ODIN_OS == .OpenBSD {
+ return io.query_utility({.Close, .Flush, .Read, .Write, .Seek, .Size, .Query})
+ } else {
+ return io.query_utility({.Close, .Flush, .Read, .Read_At, .Write, .Write_At, .Seek, .Size, .Query})
+ }
+ }
+ if err == nil && os_err != 0 {
+ err = .Unknown
+ }
+ return
}
diff --git a/core/reflect/reflect.odin b/core/reflect/reflect.odin
index f8343ead2..a88557e0e 100644
--- a/core/reflect/reflect.odin
+++ b/core/reflect/reflect.odin
@@ -132,7 +132,7 @@ type_info_core :: runtime.type_info_core
type_info_base_without_enum :: type_info_core
-when !ODIN_DISALLOW_RTTI {
+when !ODIN_NO_RTTI {
typeid_base :: runtime.typeid_base
typeid_core :: runtime.typeid_core
typeid_base_without_enum :: typeid_core
@@ -781,7 +781,7 @@ set_union_variant_raw_tag :: proc(a: any, tag: i64) {
tag_ptr := uintptr(a.data) + info.tag_offset
tag_any := any{rawptr(tag_ptr), info.tag_type.id}
- switch i in &tag_any {
+ switch &i in tag_any {
case u8: i = u8(tag)
case i8: i = i8(tag)
case u16: i = u16(tag)
@@ -1312,7 +1312,7 @@ relative_pointer_to_absolute_raw :: proc(data: rawptr, base_integer_id: typeid)
ptr_any := any{data, base_integer_id}
ptr: rawptr
- switch i in &ptr_any {
+ switch &i in ptr_any {
case u8: ptr = _handle(&i)
case u16: ptr = _handle(&i)
case u32: ptr = _handle(&i)
diff --git a/core/runtime/core.odin b/core/runtime/core.odin
index 058ca6161..83504c9ee 100644
--- a/core/runtime/core.odin
+++ b/core/runtime/core.odin
@@ -566,7 +566,7 @@ __type_info_of :: proc "contextless" (id: typeid) -> ^Type_Info #no_bounds_check
return &type_table[n]
}
-when !ODIN_DISALLOW_RTTI {
+when !ODIN_NO_RTTI {
typeid_base :: proc "contextless" (id: typeid) -> typeid {
ti := type_info_of(id)
ti = type_info_base(ti)
diff --git a/core/runtime/core_builtin.odin b/core/runtime/core_builtin.odin
index c5cb8cc07..9f2899bcc 100644
--- a/core/runtime/core_builtin.odin
+++ b/core/runtime/core_builtin.odin
@@ -112,7 +112,7 @@ remove_range :: proc(array: ^$D/[dynamic]$T, lo, hi: int, loc := #caller_locatio
// Note: If the dynamic array as no elements (`len(array) == 0`), this procedure will panic.
@builtin
pop :: proc(array: ^$T/[dynamic]$E, loc := #caller_location) -> (res: E) #no_bounds_check {
- assert(len(array) > 0, "", loc)
+ assert(len(array) > 0, loc=loc)
res = array[len(array)-1]
(^Raw_Dynamic_Array)(array).len -= 1
return res
@@ -136,7 +136,7 @@ pop_safe :: proc(array: ^$T/[dynamic]$E) -> (res: E, ok: bool) #no_bounds_check
// Note: If the dynamic array as no elements (`len(array) == 0`), this procedure will panic.
@builtin
pop_front :: proc(array: ^$T/[dynamic]$E, loc := #caller_location) -> (res: E) #no_bounds_check {
- assert(len(array) > 0, "", loc)
+ assert(len(array) > 0, loc=loc)
res = array[0]
if len(array) > 1 {
copy(array[0:], array[1:])
@@ -424,7 +424,7 @@ append_elem :: proc(array: ^$T/[dynamic]$E, arg: E, loc := #caller_location) ->
a := (^Raw_Dynamic_Array)(array)
when size_of(E) != 0 {
data := ([^]E)(a.data)
- assert(condition=data != nil, loc=loc)
+ assert(data != nil, loc=loc)
data[a.len] = arg
}
a.len += 1
@@ -459,7 +459,7 @@ append_elems :: proc(array: ^$T/[dynamic]$E, args: ..E, loc := #caller_location)
a := (^Raw_Dynamic_Array)(array)
when size_of(E) != 0 {
data := ([^]E)(a.data)
- assert(condition=data != nil, loc=loc)
+ assert(data != nil, loc=loc)
intrinsics.mem_copy(&data[a.len], raw_data(args), size_of(E) * arg_len)
}
a.len += arg_len
@@ -472,7 +472,7 @@ append_elems :: proc(array: ^$T/[dynamic]$E, args: ..E, loc := #caller_location)
@builtin
append_elem_string :: proc(array: ^$T/[dynamic]$E/u8, arg: $A/string, loc := #caller_location) -> (n: int, err: Allocator_Error) #optional_allocator_error {
args := transmute([]E)arg
- return append_elems(array=array, args=args, loc=loc)
+ return append_elems(array, ..args, loc=loc)
}
@@ -481,7 +481,7 @@ append_elem_string :: proc(array: ^$T/[dynamic]$E/u8, arg: $A/string, loc := #ca
append_string :: proc(array: ^$T/[dynamic]$E/u8, args: ..string, loc := #caller_location) -> (n: int, err: Allocator_Error) #optional_allocator_error {
n_arg: int
for arg in args {
- n_arg, err = append(array = array, args = transmute([]E)(arg), loc = loc)
+ n_arg, err = append(array, ..transmute([]E)(arg), loc=loc)
n += n_arg
if err != nil {
return
diff --git a/core/runtime/default_allocators_js.odin b/core/runtime/default_allocators_js.odin
index cc70b963a..715073f08 100644
--- a/core/runtime/default_allocators_js.odin
+++ b/core/runtime/default_allocators_js.odin
@@ -1,5 +1,5 @@
//+build js
package runtime
-default_allocator_proc :: nil_allocator_proc
-default_allocator :: nil_allocator
+default_allocator_proc :: panic_allocator_proc
+default_allocator :: panic_allocator
diff --git a/core/runtime/default_allocators_nil.odin b/core/runtime/default_allocators_nil.odin
index f86990581..a340050eb 100644
--- a/core/runtime/default_allocators_nil.odin
+++ b/core/runtime/default_allocators_nil.odin
@@ -35,4 +35,52 @@ nil_allocator :: proc() -> Allocator {
when ODIN_OS == .Freestanding {
default_allocator_proc :: nil_allocator_proc
default_allocator :: nil_allocator
-}
\ No newline at end of file
+}
+
+
+
+panic_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
+ size, alignment: int,
+ old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
+ switch mode {
+ case .Alloc:
+ if size > 0 {
+ panic("panic allocator, .Alloc called", loc=loc)
+ }
+ case .Alloc_Non_Zeroed:
+ if size > 0 {
+ panic("panic allocator, .Alloc_Non_Zeroed called", loc=loc)
+ }
+ case .Resize:
+ if size > 0 {
+ panic("panic allocator, .Resize called", loc=loc)
+ }
+ case .Free:
+ if old_memory != nil {
+ panic("panic allocator, .Free called", loc=loc)
+ }
+ case .Free_All:
+ panic("panic allocator, .Free_All called", loc=loc)
+
+ case .Query_Features:
+ set := (^Allocator_Mode_Set)(old_memory)
+ if set != nil {
+ set^ = {.Query_Features}
+ }
+ return nil, nil
+
+ case .Query_Info:
+ panic("panic allocator, .Query_Info called", loc=loc)
+ }
+
+ return nil, nil
+}
+
+panic_allocator :: proc() -> Allocator {
+ return Allocator{
+ procedure = nil_allocator_proc,
+ data = nil,
+ }
+}
+
+
diff --git a/core/runtime/default_allocators_wasi.odin b/core/runtime/default_allocators_wasi.odin
index 2e475e055..a7e6842a6 100644
--- a/core/runtime/default_allocators_wasi.odin
+++ b/core/runtime/default_allocators_wasi.odin
@@ -1,5 +1,5 @@
//+build wasi
package runtime
-default_allocator_proc :: nil_allocator_proc
-default_allocator :: nil_allocator
+default_allocator_proc :: panic_allocator_proc
+default_allocator :: panic_allocator
diff --git a/core/runtime/dynamic_map_internal.odin b/core/runtime/dynamic_map_internal.odin
index 05c03028f..d34c29d4b 100644
--- a/core/runtime/dynamic_map_internal.odin
+++ b/core/runtime/dynamic_map_internal.odin
@@ -414,68 +414,21 @@ map_insert_hash_dynamic :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^
tk := map_cell_index_dynamic(sk, info.ks, 1)
tv := map_cell_index_dynamic(sv, info.vs, 1)
- for {
- hp := &hs[pos]
- element_hash := hp^
+ swap_loop: for {
+ element_hash := hs[pos]
if map_hash_is_empty(element_hash) {
- kp := map_cell_index_dynamic(ks, info.ks, pos)
- vp := map_cell_index_dynamic(vs, info.vs, pos)
- intrinsics.mem_copy_non_overlapping(rawptr(kp), rawptr(k), size_of_k)
- intrinsics.mem_copy_non_overlapping(rawptr(vp), rawptr(v), size_of_v)
- hp^ = h
+ k_dst := map_cell_index_dynamic(ks, info.ks, pos)
+ v_dst := map_cell_index_dynamic(vs, info.vs, pos)
+ intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k), size_of_k)
+ intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v), size_of_v)
+ hs[pos] = h
- return result if result != 0 else vp
+ return result if result != 0 else v_dst
}
if map_hash_is_deleted(element_hash) {
- next_pos := (pos + 1) & mask
-
- // backward shift
- for !map_hash_is_empty(hs[next_pos]) {
- probe_distance := map_probe_distance(m^, hs[next_pos], next_pos)
- if probe_distance == 0 {
- break
- }
- probe_distance -= 1
-
- kp := map_cell_index_dynamic(ks, info.ks, pos)
- vp := map_cell_index_dynamic(vs, info.vs, pos)
- kn := map_cell_index_dynamic(ks, info.ks, next_pos)
- vn := map_cell_index_dynamic(vs, info.vs, next_pos)
-
- if distance > probe_distance {
- if result == 0 {
- result = vp
- }
- // move stored into pos; store next
- intrinsics.mem_copy_non_overlapping(rawptr(kp), rawptr(k), size_of_k)
- intrinsics.mem_copy_non_overlapping(rawptr(vp), rawptr(v), size_of_v)
- hs[pos] = h
-
- intrinsics.mem_copy_non_overlapping(rawptr(k), rawptr(kn), size_of_k)
- intrinsics.mem_copy_non_overlapping(rawptr(v), rawptr(vn), size_of_v)
- h = hs[next_pos]
- } else {
- // move next back 1
- intrinsics.mem_copy_non_overlapping(rawptr(kp), rawptr(kn), size_of_k)
- intrinsics.mem_copy_non_overlapping(rawptr(vp), rawptr(vn), size_of_v)
- hs[pos] = hs[next_pos]
- distance = probe_distance
- }
- hs[next_pos] = 0
- pos = (pos + 1) & mask
- next_pos = (next_pos + 1) & mask
- distance += 1
- }
-
- kp := map_cell_index_dynamic(ks, info.ks, pos)
- vp := map_cell_index_dynamic(vs, info.vs, pos)
- intrinsics.mem_copy_non_overlapping(rawptr(kp), rawptr(k), size_of_k)
- intrinsics.mem_copy_non_overlapping(rawptr(vp), rawptr(v), size_of_v)
- hs[pos] = h
-
- return result if result != 0 else vp
+ break swap_loop
}
if probe_distance := map_probe_distance(m^, element_hash, pos); distance > probe_distance {
@@ -495,8 +448,8 @@ map_insert_hash_dynamic :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^
intrinsics.mem_copy_non_overlapping(rawptr(vp), rawptr(tv), size_of_v)
th := h
- h = hp^
- hp^ = th
+ h = hs[pos]
+ hs[pos] = th
distance = probe_distance
}
@@ -504,6 +457,103 @@ map_insert_hash_dynamic :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^
pos = (pos + 1) & mask
distance += 1
}
+
+ // backward shift loop
+ hs[pos] = 0
+ look_ahead: uintptr = 1
+ for {
+ la_pos := (pos + look_ahead) & mask
+ element_hash := hs[la_pos]
+
+ if map_hash_is_deleted(element_hash) {
+ look_ahead += 1
+ hs[la_pos] = 0
+ continue
+ }
+
+ k_dst := map_cell_index_dynamic(ks, info.ks, pos)
+ v_dst := map_cell_index_dynamic(vs, info.vs, pos)
+
+ if map_hash_is_empty(element_hash) {
+ intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k), size_of_k)
+ intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v), size_of_v)
+ hs[pos] = h
+
+ return result if result != 0 else v_dst
+ }
+
+ k_src := map_cell_index_dynamic(ks, info.ks, la_pos)
+ v_src := map_cell_index_dynamic(vs, info.vs, la_pos)
+ probe_distance := map_probe_distance(m^, element_hash, la_pos)
+
+ if probe_distance < look_ahead {
+ // probed can be made ideal while placing saved (ending condition)
+ if result == 0 {
+ result = v_dst
+ }
+ intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k), size_of_k)
+ intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v), size_of_v)
+ hs[pos] = h
+
+ // This will be an ideal move
+ pos = (la_pos - probe_distance) & mask
+ look_ahead -= probe_distance
+
+ // shift until we hit ideal/empty
+ for probe_distance != 0 {
+ k_dst = map_cell_index_dynamic(ks, info.ks, pos)
+ v_dst = map_cell_index_dynamic(vs, info.vs, pos)
+
+ intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k_src), size_of_k)
+ intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v_src), size_of_v)
+ hs[pos] = element_hash
+ hs[la_pos] = 0
+
+ pos = (pos + 1) & mask
+ la_pos = (la_pos + 1) & mask
+ look_ahead = (la_pos - pos) & mask
+ element_hash = hs[la_pos]
+ if map_hash_is_empty(element_hash) {
+ return
+ }
+
+ probe_distance = map_probe_distance(m^, element_hash, la_pos)
+ if probe_distance == 0 {
+ return
+ }
+ // can be ideal?
+ if probe_distance < look_ahead {
+ pos = (la_pos - probe_distance) & mask
+ }
+ k_src = map_cell_index_dynamic(ks, info.ks, la_pos)
+ v_src = map_cell_index_dynamic(vs, info.vs, la_pos)
+ }
+ return
+ } else if distance < probe_distance - look_ahead {
+ // shift back probed
+ intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k_src), size_of_k)
+ intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v_src), size_of_v)
+ hs[pos] = element_hash
+ hs[la_pos] = 0
+ } else {
+ // place saved, save probed
+ if result == 0 {
+ result = v_dst
+ }
+ intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k), size_of_k)
+ intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v), size_of_v)
+ hs[pos] = h
+
+ intrinsics.mem_copy_non_overlapping(rawptr(k), rawptr(k_src), size_of_k)
+ intrinsics.mem_copy_non_overlapping(rawptr(v), rawptr(v_src), size_of_v)
+ h = hs[la_pos]
+ hs[la_pos] = 0
+ distance = probe_distance - look_ahead
+ }
+
+ pos = (pos + 1) & mask
+ distance += 1
+ }
}
@(require_results)
@@ -696,49 +746,19 @@ map_erase_dynamic :: #force_inline proc "contextless" (#no_alias m: ^Raw_Map, #n
m.len -= 1
ok = true
- { // coalesce tombstones
- // HACK NOTE(bill): This is an ugly bodge but it is coalescing the tombstone slots
- mask := (uintptr(1)< ! {
print_caller_location(Source_Code_Location{file, line, column, ""})
print_string(" Index ")
print_i64(i64(index))
@@ -83,7 +83,7 @@ dynamic_array_expr_error :: proc "contextless" (file: string, line, column: i32,
return
}
@(cold)
- handle_error :: proc "contextless" (file: string, line, column: i32, low, high, max: int) {
+ handle_error :: proc "contextless" (file: string, line, column: i32, low, high, max: int) -> ! {
print_caller_location(Source_Code_Location{file, line, column, ""})
print_string(" Invalid dynamic array indices ")
print_i64(i64(low))
@@ -104,7 +104,7 @@ matrix_bounds_check_error :: proc "contextless" (file: string, line, column: i32
return
}
@(cold)
- handle_error :: proc "contextless" (file: string, line, column: i32, row_index, column_index, row_count, column_count: int) {
+ handle_error :: proc "contextless" (file: string, line, column: i32, row_index, column_index, row_count, column_count: int) -> ! {
print_caller_location(Source_Code_Location{file, line, column, ""})
print_string(" Matrix indices [")
print_i64(i64(row_index))
@@ -122,13 +122,13 @@ matrix_bounds_check_error :: proc "contextless" (file: string, line, column: i32
}
-when ODIN_DISALLOW_RTTI {
+when ODIN_NO_RTTI {
type_assertion_check :: proc "contextless" (ok: bool, file: string, line, column: i32) {
if ok {
return
}
@(cold)
- handle_error :: proc "contextless" (file: string, line, column: i32) {
+ handle_error :: proc "contextless" (file: string, line, column: i32) -> ! {
print_caller_location(Source_Code_Location{file, line, column, ""})
print_string(" Invalid type assertion\n")
type_assertion_trap()
@@ -141,7 +141,7 @@ when ODIN_DISALLOW_RTTI {
return
}
@(cold)
- handle_error :: proc "contextless" (file: string, line, column: i32) {
+ handle_error :: proc "contextless" (file: string, line, column: i32) -> ! {
print_caller_location(Source_Code_Location{file, line, column, ""})
print_string(" Invalid type assertion\n")
type_assertion_trap()
@@ -154,7 +154,7 @@ when ODIN_DISALLOW_RTTI {
return
}
@(cold)
- handle_error :: proc "contextless" (file: string, line, column: i32, from, to: typeid) {
+ handle_error :: proc "contextless" (file: string, line, column: i32, from, to: typeid) -> ! {
print_caller_location(Source_Code_Location{file, line, column, ""})
print_string(" Invalid type assertion from ")
print_typeid(from)
@@ -199,7 +199,7 @@ when ODIN_DISALLOW_RTTI {
}
@(cold)
- handle_error :: proc "contextless" (file: string, line, column: i32, from, to: typeid, from_data: rawptr) {
+ handle_error :: proc "contextless" (file: string, line, column: i32, from, to: typeid, from_data: rawptr) -> ! {
actual := variant_type(from, from_data)
@@ -225,7 +225,7 @@ make_slice_error_loc :: #force_inline proc "contextless" (loc := #caller_locatio
return
}
@(cold)
- handle_error :: proc "contextless" (loc: Source_Code_Location, len: int) {
+ handle_error :: proc "contextless" (loc: Source_Code_Location, len: int) -> ! {
print_caller_location(loc)
print_string(" Invalid slice length for make: ")
print_i64(i64(len))
@@ -240,7 +240,7 @@ make_dynamic_array_error_loc :: #force_inline proc "contextless" (using loc := #
return
}
@(cold)
- handle_error :: proc "contextless" (loc: Source_Code_Location, len, cap: int) {
+ handle_error :: proc "contextless" (loc: Source_Code_Location, len, cap: int) -> ! {
print_caller_location(loc)
print_string(" Invalid dynamic array parameters for make: ")
print_i64(i64(len))
@@ -257,7 +257,7 @@ make_map_expr_error_loc :: #force_inline proc "contextless" (loc := #caller_loca
return
}
@(cold)
- handle_error :: proc "contextless" (loc: Source_Code_Location, cap: int) {
+ handle_error :: proc "contextless" (loc: Source_Code_Location, cap: int) -> ! {
print_caller_location(loc)
print_string(" Invalid map capacity for make: ")
print_i64(i64(cap))
diff --git a/core/runtime/internal.odin b/core/runtime/internal.odin
index 71ad9386a..ad8a40acf 100644
--- a/core/runtime/internal.odin
+++ b/core/runtime/internal.odin
@@ -566,16 +566,37 @@ max_f64 :: #force_inline proc "contextless" (a, b: f64) -> f64 {
}
abs_complex32 :: #force_inline proc "contextless" (x: complex32) -> f16 {
- r, i := real(x), imag(x)
- return f16(intrinsics.sqrt(f32(r*r + i*i)))
+ p, q := abs(real(x)), abs(imag(x))
+ if p < q {
+ p, q = q, p
+ }
+ if p == 0 {
+ return 0
+ }
+ q = q / p
+ return p * f16(intrinsics.sqrt(f32(1 + q*q)))
}
abs_complex64 :: #force_inline proc "contextless" (x: complex64) -> f32 {
- r, i := real(x), imag(x)
- return intrinsics.sqrt(r*r + i*i)
+ p, q := abs(real(x)), abs(imag(x))
+ if p < q {
+ p, q = q, p
+ }
+ if p == 0 {
+ return 0
+ }
+ q = q / p
+ return p * intrinsics.sqrt(1 + q*q)
}
abs_complex128 :: #force_inline proc "contextless" (x: complex128) -> f64 {
- r, i := real(x), imag(x)
- return intrinsics.sqrt(r*r + i*i)
+ p, q := abs(real(x)), abs(imag(x))
+ if p < q {
+ p, q = q, p
+ }
+ if p == 0 {
+ return 0
+ }
+ q = q / p
+ return p * intrinsics.sqrt(1 + q*q)
}
abs_quaternion64 :: #force_inline proc "contextless" (x: quaternion64) -> f16 {
r, i, j, k := real(x), imag(x), jmag(x), kmag(x)
diff --git a/core/runtime/print.odin b/core/runtime/print.odin
index 326a29667..732ed9c12 100644
--- a/core/runtime/print.odin
+++ b/core/runtime/print.odin
@@ -5,7 +5,7 @@ _INTEGER_DIGITS :: "0123456789abcdefghijklmnopqrstuvwxyz"
@(private="file")
_INTEGER_DIGITS_VAR := _INTEGER_DIGITS
-when !ODIN_DISALLOW_RTTI {
+when !ODIN_NO_RTTI {
print_any_single :: proc "contextless" (arg: any) {
x := arg
if loc, ok := x.(Source_Code_Location); ok {
@@ -234,7 +234,7 @@ print_caller_location :: proc "contextless" (using loc: Source_Code_Location) {
}
}
print_typeid :: proc "contextless" (id: typeid) {
- when ODIN_DISALLOW_RTTI {
+ when ODIN_NO_RTTI {
if id == nil {
print_string("nil")
} else {
diff --git a/core/slice/ptr.odin b/core/slice/ptr.odin
index e2f1c3e7b..b17a27dc8 100644
--- a/core/slice/ptr.odin
+++ b/core/slice/ptr.odin
@@ -1,7 +1,7 @@
package slice
import "core:builtin"
-import "core:mem"
+import "core:runtime"
ptr_add :: proc(p: $P/^$T, x: int) -> ^T {
return ([^]T)(p)[x:]
@@ -27,9 +27,9 @@ ptr_swap_non_overlapping :: proc(x, y: rawptr, len: int) {
a := rawptr(uintptr(x) + uintptr(i))
b := rawptr(uintptr(y) + uintptr(i))
- mem.copy(t, a, BLOCK_SIZE)
- mem.copy(a, b, BLOCK_SIZE)
- mem.copy(b, t, BLOCK_SIZE)
+ runtime.mem_copy(t, a, BLOCK_SIZE)
+ runtime.mem_copy(a, b, BLOCK_SIZE)
+ runtime.mem_copy(b, t, BLOCK_SIZE)
}
if i < len {
@@ -38,9 +38,9 @@ ptr_swap_non_overlapping :: proc(x, y: rawptr, len: int) {
a := rawptr(uintptr(x) + uintptr(i))
b := rawptr(uintptr(y) + uintptr(i))
- mem.copy(t, a, rem)
- mem.copy(a, b, rem)
- mem.copy(b, t, rem)
+ runtime.mem_copy(t, a, rem)
+ runtime.mem_copy(a, b, rem)
+ runtime.mem_copy(b, t, rem)
}
}
@@ -59,9 +59,9 @@ ptr_swap_overlapping :: proc(x, y: rawptr, len: int) {
for n := len; n > 0; n -= N {
m := builtin.min(n, N)
- mem.copy(&buffer, a, m)
- mem.copy(a, b, m)
- mem.copy(b, &buffer, m)
+ runtime.mem_copy(&buffer, a, m)
+ runtime.mem_copy(a, b, m)
+ runtime.mem_copy(b, &buffer, m)
a, b = a[N:], b[N:]
}
diff --git a/core/slice/slice.odin b/core/slice/slice.odin
index 412c90fc8..9a810141d 100644
--- a/core/slice/slice.odin
+++ b/core/slice/slice.odin
@@ -3,12 +3,12 @@ package slice
import "core:intrinsics"
import "core:builtin"
import "core:math/bits"
-import "core:mem"
+import "core:runtime"
_ :: intrinsics
_ :: builtin
_ :: bits
-_ :: mem
+_ :: runtime
/*
Turn a pointer and a length into a slice.
@@ -164,7 +164,7 @@ equal :: proc(a, b: $T/[]$E) -> bool where intrinsics.type_is_comparable(E) {
return false
}
when intrinsics.type_is_simple_compare(E) {
- return mem.compare_ptrs(raw_data(a), raw_data(b), len(a)*size_of(E)) == 0
+ return runtime.memory_compare(raw_data(a), raw_data(b), len(a)*size_of(E)) == 0
} else {
for i in 0.. bool where intrinsics.type_is_simple_comp
if len(a) != len(b) {
return false
}
- return mem.compare_ptrs(raw_data(a), raw_data(b), len(a)*size_of(E)) == 0
+ return runtime.memory_compare(raw_data(a), raw_data(b), len(a)*size_of(E)) == 0
}
/*
@@ -220,6 +220,12 @@ has_suffix :: proc(array: $T/[]$E, needle: E) -> bool where intrinsics.type_is_c
return false
}
+zero :: proc(array: $T/[]$E) #no_bounds_check {
+ if len(array) > 0 {
+ intrinsics.mem_zero(raw_data(array), size_of(E)*len(array))
+ }
+}
+
fill :: proc(array: $T/[]$E, value: E) #no_bounds_check {
if len(array) <= 0 {
return
@@ -250,7 +256,7 @@ swap_with_slice :: proc(a, b: $T/[]$E, loc := #caller_location) {
}
@(require_results)
-concatenate :: proc(a: []$T/[]$E, allocator := context.allocator) -> (res: T, err: mem.Allocator_Error) #optional_allocator_error {
+concatenate :: proc(a: []$T/[]$E, allocator := context.allocator) -> (res: T, err: runtime.Allocator_Error) #optional_allocator_error {
if len(a) == 0 {
return
}
@@ -268,7 +274,7 @@ concatenate :: proc(a: []$T/[]$E, allocator := context.allocator) -> (res: T, er
// copies a slice into a new slice
@(require_results)
-clone :: proc(a: $T/[]$E, allocator := context.allocator) -> ([]E, mem.Allocator_Error) #optional_allocator_error {
+clone :: proc(a: $T/[]$E, allocator := context.allocator) -> ([]E, runtime.Allocator_Error) #optional_allocator_error {
d, err := make([]E, len(a), allocator)
copy(d[:], a)
return d, err
@@ -276,7 +282,7 @@ clone :: proc(a: $T/[]$E, allocator := context.allocator) -> ([]E, mem.Allocator
// copies slice into a new dynamic array
-clone_to_dynamic :: proc(a: $T/[]$E, allocator := context.allocator) -> ([dynamic]E, mem.Allocator_Error) #optional_allocator_error {
+clone_to_dynamic :: proc(a: $T/[]$E, allocator := context.allocator) -> ([dynamic]E, runtime.Allocator_Error) #optional_allocator_error {
d, err := make([dynamic]E, len(a), allocator)
copy(d[:], a)
return d, err
@@ -286,12 +292,12 @@ to_dynamic :: clone_to_dynamic
// Converts slice into a dynamic array without cloning or allocating memory
@(require_results)
into_dynamic :: proc(a: $T/[]$E) -> [dynamic]E {
- s := transmute(mem.Raw_Slice)a
- d := mem.Raw_Dynamic_Array{
+ s := transmute(runtime.Raw_Slice)a
+ d := runtime.Raw_Dynamic_Array{
data = s.data,
len = 0,
cap = s.len,
- allocator = mem.nil_allocator(),
+ allocator = runtime.nil_allocator(),
}
return transmute([dynamic]E)d
}
@@ -373,7 +379,7 @@ as_ptr :: proc(array: $T/[]$E) -> [^]E {
@(require_results)
-mapper :: proc(s: $S/[]$U, f: proc(U) -> $V, allocator := context.allocator) -> (r: []V, err: mem.Allocator_Error) #optional_allocator_error {
+mapper :: proc(s: $S/[]$U, f: proc(U) -> $V, allocator := context.allocator) -> (r: []V, err: runtime.Allocator_Error) #optional_allocator_error {
r = make([]V, len(s), allocator) or_return
for v, i in s {
r[i] = f(v)
@@ -402,7 +408,7 @@ filter :: proc(s: $S/[]$U, f: proc(U) -> bool, allocator := context.allocator) -
}
@(require_results)
-scanner :: proc (s: $S/[]$U, initializer: $V, f: proc(V, U) -> V, allocator := context.allocator) -> (res: []V, err: mem.Allocator_Error) #optional_allocator_error {
+scanner :: proc (s: $S/[]$U, initializer: $V, f: proc(V, U) -> V, allocator := context.allocator) -> (res: []V, err: runtime.Allocator_Error) #optional_allocator_error {
if len(s) == 0 { return }
res = make([]V, len(s), allocator) or_return
diff --git a/core/strings/builder.odin b/core/strings/builder.odin
index edde4b297..28c56f6f9 100644
--- a/core/strings/builder.odin
+++ b/core/strings/builder.odin
@@ -164,36 +164,27 @@ builder_init :: proc{
builder_init_len_cap,
}
@(private)
-_builder_stream_vtable_obj := io.Stream_VTable{
- impl_write = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
- b := (^Builder)(s.stream_data)
- n = write_bytes(b, p)
- if n < len(p) {
+_builder_stream_proc :: proc(stream_data: rawptr, mode: io.Stream_Mode, p: []byte, offset: i64, whence: io.Seek_From) -> (n: i64, err: io.Error) {
+ b := (^Builder)(stream_data)
+ #partial switch mode {
+ case .Write:
+ n = i64(write_bytes(b, p))
+ if n < i64(len(p)) {
err = .EOF
}
return
- },
- impl_write_byte = proc(s: io.Stream, c: byte) -> (err: io.Error) {
- b := (^Builder)(s.stream_data)
- n := write_byte(b, c)
- if n == 0 {
- err = .EOF
- }
+ case .Size:
+ n = i64(len(b.buf))
return
- },
- impl_size = proc(s: io.Stream) -> i64 {
- b := (^Builder)(s.stream_data)
- return i64(len(b.buf))
- },
- impl_destroy = proc(s: io.Stream) -> io.Error {
- b := (^Builder)(s.stream_data)
+ case .Destroy:
builder_destroy(b)
- return .None
- },
+ return
+ case .Query:
+ return io.query_utility({.Write, .Size, .Destroy, .Query})
+ }
+ return 0, .Empty
}
-// NOTE(dweiler): Work around a miscompilation bug on Linux still.
-@(private)
-_builder_stream_vtable := &_builder_stream_vtable_obj
+
/*
Returns an io.Stream from a Builder
@@ -204,7 +195,7 @@ Returns:
- res: the io.Stream
*/
to_stream :: proc(b: ^Builder) -> (res: io.Stream) {
- return io.Stream{stream_vtable=_builder_stream_vtable, stream_data=b}
+ return io.Stream{procedure=_builder_stream_proc, data=b}
}
/*
Returns an io.Writer from a Builder
diff --git a/core/strings/reader.odin b/core/strings/reader.odin
index 081e59b4b..bb49bf917 100644
--- a/core/strings/reader.odin
+++ b/core/strings/reader.odin
@@ -35,8 +35,8 @@ Returns:
- s: An io.Stream for the given Reader
*/
reader_to_stream :: proc(r: ^Reader) -> (s: io.Stream) {
- s.stream_data = r
- s.stream_vtable = &_reader_vtable
+ s.data = r
+ s.procedure = _reader_proc
return
}
/*
@@ -294,41 +294,21 @@ This VTable is used by the Reader struct to provide its functionality
as an `io.Stream`.
*/
@(private)
-_reader_vtable := io.Stream_VTable{
- impl_size = proc(s: io.Stream) -> i64 {
- r := (^Reader)(s.stream_data)
- return reader_size(r)
- },
- impl_read = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
- r := (^Reader)(s.stream_data)
- return reader_read(r, p)
- },
- impl_read_at = proc(s: io.Stream, p: []byte, off: i64) -> (n: int, err: io.Error) {
- r := (^Reader)(s.stream_data)
- return reader_read_at(r, p, off)
- },
- impl_read_byte = proc(s: io.Stream) -> (byte, io.Error) {
- r := (^Reader)(s.stream_data)
- return reader_read_byte(r)
- },
- impl_unread_byte = proc(s: io.Stream) -> io.Error {
- r := (^Reader)(s.stream_data)
- return reader_unread_byte(r)
- },
- impl_read_rune = proc(s: io.Stream) -> (ch: rune, size: int, err: io.Error) {
- r := (^Reader)(s.stream_data)
- return reader_read_rune(r)
- },
- impl_unread_rune = proc(s: io.Stream) -> io.Error {
- r := (^Reader)(s.stream_data)
- return reader_unread_rune(r)
- },
- impl_seek = proc(s: io.Stream, offset: i64, whence: io.Seek_From) -> (i64, io.Error) {
- r := (^Reader)(s.stream_data)
- return reader_seek(r, offset, whence)
- },
- impl_write_to = proc(s: io.Stream, w: io.Writer) -> (n: i64, err: io.Error) {
- r := (^Reader)(s.stream_data)
- return reader_write_to(r, w)
- },
+_reader_proc :: proc(stream_data: rawptr, mode: io.Stream_Mode, p: []byte, offset: i64, whence: io.Seek_From) -> (n: i64, err: io.Error) {
+ r := (^Reader)(stream_data)
+ #partial switch mode {
+ case .Size:
+ n = reader_size(r)
+ return
+ case .Read:
+ return io._i64_err(reader_read(r, p))
+ case .Read_At:
+ return io._i64_err(reader_read_at(r, p, offset))
+ case .Seek:
+ n, err = reader_seek(r, offset, whence)
+ return
+ case .Query:
+ return io.query_utility({.Size, .Read, .Read_At, .Seek, .Query})
+ }
+ return 0, .Empty
}
diff --git a/core/strings/strings.odin b/core/strings/strings.odin
index 6daa5f9c9..66a75f96a 100644
--- a/core/strings/strings.odin
+++ b/core/strings/strings.odin
@@ -1194,7 +1194,7 @@ Output:
split_lines :: proc(s: string, allocator := context.allocator) -> (res: []string, err: mem.Allocator_Error) #optional_allocator_error {
sep :: "\n"
lines := _split(s, sep, 0, -1, allocator) or_return
- for line in &lines {
+ for &line in lines {
line = _trim_cr(line)
}
return lines, nil
@@ -1234,7 +1234,7 @@ Output:
split_lines_n :: proc(s: string, n: int, allocator := context.allocator) -> (res: []string, err: mem.Allocator_Error) #optional_allocator_error {
sep :: "\n"
lines := _split(s, sep, 0, n, allocator) or_return
- for line in &lines {
+ for &line in lines {
line = _trim_cr(line)
}
return lines, nil
@@ -1273,7 +1273,7 @@ Output:
split_lines_after :: proc(s: string, allocator := context.allocator) -> (res: []string, err: mem.Allocator_Error) #optional_allocator_error {
sep :: "\n"
lines := _split(s, sep, len(sep), -1, allocator) or_return
- for line in &lines {
+ for &line in lines {
line = _trim_cr(line)
}
return lines, nil
@@ -1314,7 +1314,7 @@ Output:
split_lines_after_n :: proc(s: string, n: int, allocator := context.allocator) -> (res: []string, err: mem.Allocator_Error) #optional_allocator_error {
sep :: "\n"
lines := _split(s, sep, len(sep), n, allocator) or_return
- for line in &lines {
+ for &line in lines {
line = _trim_cr(line)
}
return lines, nil
diff --git a/core/sync/primitives.odin b/core/sync/primitives.odin
index b8bcfad70..5e71f6336 100644
--- a/core/sync/primitives.odin
+++ b/core/sync/primitives.odin
@@ -7,10 +7,21 @@ current_thread_id :: proc "contextless" () -> int {
return _current_thread_id()
}
-// A Mutex is a mutual exclusion lock
-// The zero value for a Mutex is an unlocked mutex
+// A Mutex is a [[mutual exclusion lock; https://en.wikipedia.org/wiki/Mutual_exclusion]]
+// It can be used to prevent more than one thread from executing the same piece of code,
+// and thus prevent access to same piece of memory by multiple threads, at the same time.
//
-// A Mutex must not be copied after first use
+// A Mutex's zero value represents an initial, *unlocked* state.
+//
+// If another thread tries to take the lock while another thread holds it, it will pause
+// until the lock is released. Code or memory that is "surrounded" by a mutex lock is said
+// to be "guarded by a mutex".
+//
+// A Mutex must not be copied after first use (e.g., after locking it the first time).
+// This is because, in order to coordinate with other threads, all threads must watch
+// the same memory address to know when the lock has been released. Trying to use a
+// copy of the lock at a different memory address will result in broken and unsafe
+// behavior. For this reason, Mutexes are marked as `#no_copy`.
Mutex :: struct #no_copy {
impl: _Mutex,
}
diff --git a/core/sys/unix/syscalls_linux.odin b/core/sys/unix/syscalls_linux.odin
index abdcf0b92..3083c084b 100644
--- a/core/sys/unix/syscalls_linux.odin
+++ b/core/sys/unix/syscalls_linux.odin
@@ -1567,6 +1567,23 @@ MADV_HWPOISON :: 100
// pipe2 flags
O_CLOEXEC :: 0o2000000
+// poll events
+POLLIN :: 0x0001
+POLLPRI :: 0x0002
+POLLOUT :: 0x0004
+POLLERR :: 0x0008
+POLLHUP :: 0x0010
+POLLNVAL :: 0x0020
+POLLRDNORM :: 0x0040
+POLLRDBAND :: 0x0080
+POLLWRNORM :: 0x0100
+POLLWRBAND :: 0x0200
+POLLMSG :: 0x0400
+POLLREMOVE :: 0x1000
+POLLRDHUP :: 0x2000
+POLLFREE :: 0x4000
+POLL_BUSY_LOOP :: 0x8000
+
// perf event data
Perf_Sample :: struct #raw_union {
period: u64,
@@ -2057,6 +2074,23 @@ sys_fcntl :: proc "contextless" (fd: int, cmd: int, arg: int) -> int {
return int(intrinsics.syscall(SYS_fcntl, uintptr(fd), uintptr(cmd), uintptr(arg)))
}
+sys_poll :: proc "contextless" (fds: rawptr, nfds: uint, timeout: int) -> int {
+ // NOTE: specialcased here because `arm64` does not have `poll`
+ when ODIN_ARCH == .arm64 {
+ seconds := i64(timeout / 1_000)
+ nanoseconds := i64((timeout % 1000) * 1_000_000)
+ timeout_spec := timespec{seconds, nanoseconds}
+
+ return int(intrinsics.syscall(SYS_ppoll, uintptr(fds), uintptr(nfds), uintptr(&timeout_spec), uintptr(0), uintptr(8)))
+ } else {
+ return int(intrinsics.syscall(SYS_poll, uintptr(fds), uintptr(nfds), uintptr(timeout)))
+ }
+}
+
+sys_ppoll :: proc "contextless" (fds: rawptr, nfds: uint, timeout: rawptr, sigmask: rawptr, sigsetsize: uint) -> int {
+ return int(intrinsics.syscall(SYS_ppoll, uintptr(fds), uintptr(nfds), uintptr(timeout), uintptr(sigmask), uintptr(sigsetsize)))
+}
+
get_errno :: proc "contextless" (res: int) -> i32 {
if res < 0 && res > -4096 {
return i32(-res)
diff --git a/core/sys/windows/kernel32.odin b/core/sys/windows/kernel32.odin
index beed3a7e5..fcd9e55ed 100644
--- a/core/sys/windows/kernel32.odin
+++ b/core/sys/windows/kernel32.odin
@@ -159,6 +159,11 @@ foreign kernel32 {
WaitForSingleObject :: proc(hHandle: HANDLE, dwMilliseconds: DWORD) -> DWORD ---
Sleep :: proc(dwMilliseconds: DWORD) ---
GetProcessId :: proc(handle: HANDLE) -> DWORD ---
+ CopyFileW :: proc(
+ lpExistingFileName: LPCWSTR,
+ lpNewFileName: LPCWSTR,
+ bFailIfExists: BOOL,
+ ) -> BOOL ---
CopyFileExW :: proc(
lpExistingFileName: LPCWSTR,
lpNewFileName: LPCWSTR,
diff --git a/core/sys/windows/ws2_32.odin b/core/sys/windows/ws2_32.odin
index 631ef4241..7b9cf1b89 100644
--- a/core/sys/windows/ws2_32.odin
+++ b/core/sys/windows/ws2_32.odin
@@ -206,4 +206,14 @@ foreign ws2_32 {
optval: ^c_char,
optlen: ^c_int,
) -> c_int ---
+ // [MS-Docs](https://learn.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-ntohl)
+ ntohl :: proc(netlong: c_ulong) -> c_ulong ---
+ // [MS-Docs](https://learn.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-ntohs)
+ ntohs :: proc(netshort: c_ushort) -> c_ushort ---
+ // [MS-Docs](https://learn.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-htonl)
+ @(deprecated="Use endian specific integers instead, https://odin-lang.org/docs/overview/#basic-types")
+ htonl :: proc(hostlong: c_ulong) -> c_ulong ---
+ // [MS-Docs](https://learn.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-htons)
+ @(deprecated="Use endian specific integers instead, https://odin-lang.org/docs/overview/#basic-types")
+ htons :: proc(hostshort: c_ushort) -> c_ushort ---
}
diff --git a/core/testing/runner_windows.odin b/core/testing/runner_windows.odin
index 525eae685..17bcfce26 100644
--- a/core/testing/runner_windows.odin
+++ b/core/testing/runner_windows.odin
@@ -191,7 +191,7 @@ run_internal_test :: proc(t: ^T, it: Internal_Test) {
global_exception_handler = win32.AddVectoredExceptionHandler(0, exception_handler_proc)
context.assertion_failure_proc = proc(prefix, message: string, loc: runtime.Source_Code_Location) -> ! {
- errorf(t=global_current_t, format="%s %s", args={prefix, message}, loc=loc)
+ errorf(global_current_t, "%s %s", prefix, message, loc=loc)
intrinsics.trap()
}
diff --git a/core/testing/testing.odin b/core/testing/testing.odin
index 37f9fe4d9..1ba05315c 100644
--- a/core/testing/testing.odin
+++ b/core/testing/testing.odin
@@ -4,6 +4,9 @@ import "core:fmt"
import "core:io"
import "core:time"
import "core:intrinsics"
+import "core:reflect"
+
+_ :: reflect // alias reflect to nothing to force visibility for -vet
// IMPORTANT NOTE: Compiler requires this layout
Test_Signature :: proc(^T)
@@ -46,15 +49,15 @@ errorf :: proc(t: ^T, format: string, args: ..any, loc := #caller_location) {
}
fail :: proc(t: ^T, loc := #caller_location) {
- error(t=t, args={"FAIL"}, loc=loc)
+ error(t, "FAIL", loc=loc)
t.error_count += 1
}
fail_now :: proc(t: ^T, msg := "", loc := #caller_location) {
if msg != "" {
- error(t=t, args={"FAIL:", msg}, loc=loc)
+ error(t, "FAIL:", msg, loc=loc)
} else {
- error(t=t, args={"FAIL"}, loc=loc)
+ error(t, "FAIL", loc=loc)
}
t.error_count += 1
if t._fail_now != nil {
@@ -84,14 +87,14 @@ cleanup :: proc(t: ^T, procedure: proc(rawptr), user_data: rawptr) {
expect :: proc(t: ^T, ok: bool, msg: string = "", loc := #caller_location) -> bool {
if !ok {
- error(t=t, args={msg}, loc=loc)
+ error(t, msg, loc=loc)
}
return ok
}
expect_value :: proc(t: ^T, value, expected: $T, loc := #caller_location) -> bool where intrinsics.type_is_comparable(T) {
- ok := value == expected
+ ok := value == expected || reflect.is_nil(value) && reflect.is_nil(expected)
if !ok {
- errorf(t=t, format="expected %v, got %v", args={expected, value}, loc=loc)
+ errorf(t, "expected %v, got %v", expected, value, loc=loc)
}
return ok
}
@@ -100,4 +103,4 @@ expect_value :: proc(t: ^T, value, expected: $T, loc := #caller_location) -> boo
set_fail_timeout :: proc(t: ^T, duration: time.Duration, loc := #caller_location) {
_fail_timeout(t, duration, loc)
-}
\ No newline at end of file
+}
diff --git a/core/text/edit/text_edit.odin b/core/text/edit/text_edit.odin
index c49a5d0d1..8520ba674 100644
--- a/core/text/edit/text_edit.odin
+++ b/core/text/edit/text_edit.odin
@@ -113,15 +113,16 @@ set_text :: proc(s: ^State, text: string) {
}
-undo_state_push :: proc(s: ^State, undo: ^[dynamic]^Undo_State) {
+undo_state_push :: proc(s: ^State, undo: ^[dynamic]^Undo_State) -> mem.Allocator_Error {
text := string(s.builder.buf[:])
- item := (^Undo_State)(mem.alloc(size_of(Undo_State) + len(text), align_of(Undo_State), s.undo_text_allocator))
+ item := (^Undo_State)(mem.alloc(size_of(Undo_State) + len(text), align_of(Undo_State), s.undo_text_allocator) or_return)
item.selection = s.selection
item.len = len(text)
#no_bounds_check {
runtime.copy(item.text[:len(text)], text)
}
- append(undo, item)
+ append(undo, item) or_return
+ return nil
}
undo :: proc(s: ^State, undo, redo: ^[dynamic]^Undo_State) {
diff --git a/core/text/i18n/i18n.odin b/core/text/i18n/i18n.odin
index 9d030db16..8513f30c8 100644
--- a/core/text/i18n/i18n.odin
+++ b/core/text/i18n/i18n.odin
@@ -170,8 +170,8 @@ destroy :: proc(catalog: ^Translation = ACTIVE, allocator := context.allocator)
return
}
- for section in &catalog.k_v {
- for key in &catalog.k_v[section] {
+ for section in catalog.k_v {
+ for key in catalog.k_v[section] {
delete(catalog.k_v[section][key])
}
delete(catalog.k_v[section])
diff --git a/core/text/match/strlib.odin b/core/text/match/strlib.odin
index b8c2861fa..654996bc7 100644
--- a/core/text/match/strlib.odin
+++ b/core/text/match/strlib.odin
@@ -266,6 +266,7 @@ match_balance :: proc(ms: ^Match_State, s, p: int) -> (unused: int, err: Error)
return INVALID, .Invalid_Pattern_Capture
}
+
schar, ssize := utf8_peek(ms.src[s:]) or_return
pchar, psize := utf8_peek(ms.pattern[p:]) or_return
@@ -274,9 +275,9 @@ match_balance :: proc(ms: ^Match_State, s, p: int) -> (unused: int, err: Error)
return INVALID, .OK
}
- s_begin := s
cont := 1
- s := s + ssize
+ s := s
+ s += ssize
begin := pchar
end, _ := utf8_peek(ms.pattern[p + psize:]) or_return
diff --git a/core/text/table/table.odin b/core/text/table/table.odin
index df93ee44e..8d96cb26f 100644
--- a/core/text/table/table.odin
+++ b/core/text/table/table.odin
@@ -9,12 +9,10 @@
package text_table
import "core:io"
-import "core:os"
import "core:fmt"
import "core:mem"
import "core:mem/virtual"
import "core:runtime"
-import "core:strings"
Cell :: struct {
text: string,
@@ -116,7 +114,7 @@ set_cell_alignment :: proc(tbl: ^Table, row, col: int, alignment: Cell_Alignment
format :: proc(tbl: ^Table, _fmt: string, args: ..any, loc := #caller_location) -> string {
context.allocator = tbl.format_allocator
- return fmt.aprintf(fmt = _fmt, args = args)
+ return fmt.aprintf(_fmt, ..args)
}
header :: proc(tbl: ^Table, values: ..any, loc := #caller_location) {
diff --git a/core/thread/thread_pool.odin b/core/thread/thread_pool.odin
index 820de8ad4..1a4119e5f 100644
--- a/core/thread/thread_pool.odin
+++ b/core/thread/thread_pool.odin
@@ -81,7 +81,7 @@ pool_destroy :: proc(pool: ^Pool) {
delete(pool.tasks)
delete(pool.tasks_done)
- for t in &pool.threads {
+ for &t in pool.threads {
destroy(t)
}
diff --git a/core/unicode/tools/generate_entity_table.odin b/core/unicode/tools/generate_entity_table.odin
index 328ba9091..fb4e4c2a4 100644
--- a/core/unicode/tools/generate_entity_table.odin
+++ b/core/unicode/tools/generate_entity_table.odin
@@ -221,7 +221,7 @@ named_xml_entity_to_rune :: proc(name: string) -> (decoded: rune, ok: bool) {
delete(entity_map)
delete(names)
- for name in &names {
+ for &name in names {
free(&name)
}
}
diff --git a/examples/all/all_experimental.odin b/examples/all/all_experimental.odin
index 9679e0ca4..cd60c269c 100644
--- a/examples/all/all_experimental.odin
+++ b/examples/all/all_experimental.odin
@@ -3,8 +3,6 @@ package all
import c_tokenizer "core:c/frontend/tokenizer"
import c_preprocessor "core:c/frontend/preprocessor"
-import virtual "core:mem/virtual"
_ :: c_tokenizer
_ :: c_preprocessor
-_ :: virtual
diff --git a/examples/all/all_main.odin b/examples/all/all_main.odin
index 9515d2a00..c6b727e42 100644
--- a/examples/all/all_main.odin
+++ b/examples/all/all_main.odin
@@ -19,6 +19,8 @@ import priority_queue "core:container/priority_queue"
import queue "core:container/queue"
import small_array "core:container/small_array"
import lru "core:container/lru"
+import list "core:container/intrusive/list"
+import topological_sort "core:container/topological_sort"
import crypto "core:crypto"
import blake "core:crypto/blake"
@@ -48,6 +50,8 @@ import crypto_util "core:crypto/util"
import whirlpool "core:crypto/whirlpool"
import x25519 "core:crypto/x25519"
+import pe "core:debug/pe"
+
import dynlib "core:dynlib"
import net "core:net"
@@ -58,9 +62,11 @@ import hxa "core:encoding/hxa"
import json "core:encoding/json"
import varint "core:encoding/varint"
import xml "core:encoding/xml"
+import endian "core:encoding/endian"
import fmt "core:fmt"
import hash "core:hash"
+import xxhash "core:hash/xxhash"
import image "core:image"
import netpbm "core:image/netpbm"
@@ -80,9 +86,11 @@ import glm "core:math/linalg/glsl"
import hlm "core:math/linalg/hlsl"
import noise "core:math/noise"
import rand "core:math/rand"
+import ease "core:math/ease"
+import cmplx "core:math/cmplx"
import mem "core:mem"
-// import virtual "core:mem/virtual"
+import virtual "core:mem/virtual"
import ast "core:odin/ast"
import doc_format "core:odin/doc-format"
@@ -91,6 +99,8 @@ import odin_parser "core:odin/parser"
import odin_printer "core:odin/printer"
import odin_tokenizer "core:odin/tokenizer"
+import spall "core:prof/spall"
+
import os "core:os"
import slashpath "core:path/slashpath"
@@ -108,6 +118,9 @@ import sync "core:sync"
import testing "core:testing"
import scanner "core:text/scanner"
import i18n "core:text/i18n"
+import match "core:text/match"
+import table "core:text/table"
+import edit "core:text/edit"
import thread "core:thread"
import time "core:time"
@@ -134,6 +147,8 @@ _ :: priority_queue
_ :: queue
_ :: small_array
_ :: lru
+_ :: list
+_ :: topological_sort
_ :: crypto
_ :: blake
_ :: blake2b
@@ -161,6 +176,7 @@ _ :: tiger2
_ :: crypto_util
_ :: whirlpool
_ :: x25519
+_ :: pe
_ :: dynlib
_ :: net
_ :: base32
@@ -170,8 +186,10 @@ _ :: hxa
_ :: json
_ :: varint
_ :: xml
+_ :: endian
_ :: fmt
_ :: hash
+_ :: xxhash
_ :: image
_ :: netpbm
_ :: png
@@ -188,7 +206,10 @@ _ :: glm
_ :: hlm
_ :: noise
_ :: rand
+_ :: ease
+_ :: cmplx
_ :: mem
+_ :: virtual
_ :: ast
_ :: doc_format
_ :: odin_format
@@ -196,6 +217,7 @@ _ :: odin_parser
_ :: odin_printer
_ :: odin_tokenizer
_ :: os
+_ :: spall
_ :: slashpath
_ :: filepath
_ :: reflect
@@ -210,6 +232,9 @@ _ :: sync
_ :: testing
_ :: scanner
_ :: i18n
+_ :: match
+_ :: table
+_ :: edit
_ :: thread
_ :: time
_ :: sysinfo
diff --git a/examples/all/all_vendor.odin b/examples/all/all_vendor.odin
index 22c55c14e..fa1e8d995 100644
--- a/examples/all/all_vendor.odin
+++ b/examples/all/all_vendor.odin
@@ -1,6 +1,23 @@
package all
-import botan "vendor:botan"
+import botan_bindings "vendor:botan/bindings"
+import botan_blake2b "vendor:botan/blake2b"
+import gost "vendor:botan/gost"
+import keccak "vendor:botan/keccak"
+import md4 "vendor:botan/md4"
+import md5 "vendor:botan/md5"
+import ripemd "vendor:botan/ripemd"
+import sha1 "vendor:botan/sha1"
+import sha2 "vendor:botan/sha2"
+import sha3 "vendor:botan/sha3"
+import shake "vendor:botan/shake"
+import siphash "vendor:botan/siphash"
+import skein512 "vendor:botan/skein512"
+import sm3 "vendor:botan/sm3"
+import streebog "vendor:botan/streebog"
+import tiger "vendor:botan/tiger"
+import whirlpool "vendor:botan/whirlpool"
+
import cgltf "vendor:cgltf"
// import commonmark "vendor:commonmark"
import ENet "vendor:ENet"
@@ -30,7 +47,29 @@ import CA "vendor:darwin/QuartzCore"
// NOTE(bill): only one can be checked at a time
import lua_5_4 "vendor:lua/5.4"
-_ :: botan
+import nvg "vendor:nanovg"
+import nvg_gl "vendor:nanovg/gl"
+import fontstash "vendor:fontstash"
+
+_ :: botan_bindings
+_ :: botan_blake2b
+_ :: gost
+_ :: keccak
+_ :: md4
+_ :: md5
+_ :: ripemd
+_ :: sha1
+_ :: sha2
+_ :: sha3
+_ :: shake
+_ :: siphash
+_ :: skein512
+_ :: sm3
+_ :: streebog
+_ :: tiger
+_ :: whirlpool
+
+
_ :: cgltf
// _ :: commonmark
_ :: ENet
@@ -57,4 +96,8 @@ _ :: MTL
_ :: MTK
_ :: CA
-_ :: lua_5_4
\ No newline at end of file
+_ :: lua_5_4
+
+_ :: nvg
+_ :: nvg_gl
+_ :: fontstash
\ No newline at end of file
diff --git a/examples/demo/demo.odin b/examples/demo/demo.odin
index 3b9aa73ca..7c98ca728 100644
--- a/examples/demo/demo.odin
+++ b/examples/demo/demo.odin
@@ -352,7 +352,7 @@ control_flow :: proc() {
if false {
f, err := os.open("my_file.txt")
- if err != 0 {
+ if err != os.ERROR_NONE {
// handle error
}
defer os.close(f)
@@ -1175,13 +1175,13 @@ threading_example :: proc() {
N :: 3
pool: thread.Pool
- thread.pool_init(pool=&pool, thread_count=N, allocator=context.allocator)
+ thread.pool_init(&pool, allocator=context.allocator, thread_count=N)
defer thread.pool_destroy(&pool)
for i in 0..<30 {
// be mindful of the allocator used for tasks. The allocator needs to be thread safe, or be owned by the task for exclusive use
- thread.pool_add_task(pool=&pool, procedure=task_proc, data=nil, user_index=i, allocator=context.allocator)
+ thread.pool_add_task(&pool, allocator=context.allocator, procedure=task_proc, data=nil, user_index=i)
}
thread.pool_start(&pool)
diff --git a/misc/shell.bat b/misc/shell.bat
index 60f603bc1..bfb444396 100644
--- a/misc/shell.bat
+++ b/misc/shell.bat
@@ -7,5 +7,7 @@ rem call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Auxil
rem call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Auxiliary\Build\vcvarsall.bat" x64 1> NUL
set _NO_DEBUG_HEAP=1
+set ODIN_IGNORE_MSVC_CHECK=1
+
rem set path=w:\Odin\misc;%path%
cls
diff --git a/src/array.cpp b/src/array.cpp
index f1a1f93e2..d8e25d25d 100644
--- a/src/array.cpp
+++ b/src/array.cpp
@@ -80,7 +80,9 @@ gb_internal Slice slice_make(gbAllocator const &allocator, isize count) {
GB_ASSERT(count >= 0);
Slice s = {};
s.data = gb_alloc_array(allocator, T, count);
- GB_ASSERT(s.data != nullptr);
+ if (count > 0) {
+ GB_ASSERT(s.data != nullptr);
+ }
s.count = count;
return s;
}
diff --git a/src/build_settings.cpp b/src/build_settings.cpp
index 92e0df38b..866631f9a 100644
--- a/src/build_settings.cpp
+++ b/src/build_settings.cpp
@@ -309,7 +309,7 @@ struct BuildContext {
bool copy_file_contents;
- bool disallow_rtti;
+ bool no_rtti;
bool dynamic_map_calls;
@@ -1227,8 +1227,8 @@ gb_internal void init_build_context(TargetMetrics *cross_target) {
if (bc->metrics.os == TargetOs_freestanding) {
bc->no_entry_point = true;
} else {
- if (bc->disallow_rtti) {
- gb_printf_err("-disallow-rtti is only allowed on freestanding targets\n");
+ if (bc->no_rtti) {
+ gb_printf_err("-no-rtti is only allowed on freestanding targets\n");
gb_exit(1);
}
}
diff --git a/src/check_builtin.cpp b/src/check_builtin.cpp
index 46ee6b7f9..269a0ec48 100644
--- a/src/check_builtin.cpp
+++ b/src/check_builtin.cpp
@@ -2063,7 +2063,7 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As
if (c->scope->flags&ScopeFlag_Global) {
compiler_error("'type_info_of' Cannot be declared within the runtime package due to how the internals of the compiler works");
}
- if (build_context.disallow_rtti) {
+ if (build_context.no_rtti) {
error(call, "'%.*s' has been disallowed", LIT(builtin_name));
return false;
}
@@ -2106,7 +2106,7 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As
if (c->scope->flags&ScopeFlag_Global) {
compiler_error("'typeid_of' Cannot be declared within the runtime package due to how the internals of the compiler works");
}
- if (build_context.disallow_rtti) {
+ if (build_context.no_rtti) {
error(call, "'%.*s' has been disallowed", LIT(builtin_name));
return false;
}
diff --git a/src/check_decl.cpp b/src/check_decl.cpp
index b651e33e6..2b2fb867c 100644
--- a/src/check_decl.cpp
+++ b/src/check_decl.cpp
@@ -757,6 +757,66 @@ gb_internal String handle_link_name(CheckerContext *ctx, Token token, String lin
return link_name;
}
+gb_internal void check_objc_methods(CheckerContext *ctx, Entity *e, AttributeContext const &ac) {
+ if (!(ac.objc_name.len || ac.objc_is_class_method || ac.objc_type)) {
+ return;
+ }
+ if (ac.objc_name.len == 0 && ac.objc_is_class_method) {
+ error(e->token, "@(objc_name) is required with @(objc_is_class_method)");
+ } else if (ac.objc_type == nullptr) {
+ error(e->token, "@(objc_name) requires that @(objc_type) to be set");
+ } else if (ac.objc_name.len == 0 && ac.objc_type) {
+ error(e->token, "@(objc_name) is required with @(objc_type)");
+ } else {
+ Type *t = ac.objc_type;
+ if (t->kind == Type_Named) {
+ Entity *tn = t->Named.type_name;
+
+ GB_ASSERT(tn->kind == Entity_TypeName);
+
+ if (tn->scope != e->scope) {
+ error(e->token, "@(objc_name) attribute may only be applied to procedures and types within the same scope");
+ } else {
+ mutex_lock(&global_type_name_objc_metadata_mutex);
+ defer (mutex_unlock(&global_type_name_objc_metadata_mutex));
+
+ if (!tn->TypeName.objc_metadata) {
+ tn->TypeName.objc_metadata = create_type_name_obj_c_metadata();
+ }
+ auto *md = tn->TypeName.objc_metadata;
+ mutex_lock(md->mutex);
+ defer (mutex_unlock(md->mutex));
+
+ if (!ac.objc_is_class_method) {
+ bool ok = true;
+ for (TypeNameObjCMetadataEntry const &entry : md->value_entries) {
+ if (entry.name == ac.objc_name) {
+ error(e->token, "Previous declaration of @(objc_name=\"%.*s\")", LIT(ac.objc_name));
+ ok = false;
+ break;
+ }
+ }
+ if (ok) {
+ array_add(&md->value_entries, TypeNameObjCMetadataEntry{ac.objc_name, e});
+ }
+ } else {
+ bool ok = true;
+ for (TypeNameObjCMetadataEntry const &entry : md->type_entries) {
+ if (entry.name == ac.objc_name) {
+ error(e->token, "Previous declaration of @(objc_name=\"%.*s\")", LIT(ac.objc_name));
+ ok = false;
+ break;
+ }
+ }
+ if (ok) {
+ array_add(&md->type_entries, TypeNameObjCMetadataEntry{ac.objc_name, e});
+ }
+ }
+ }
+ }
+ }
+}
+
gb_internal void check_proc_decl(CheckerContext *ctx, Entity *e, DeclInfo *d) {
GB_ASSERT(e->type == nullptr);
if (d->proc_lit->kind != Ast_ProcLit) {
@@ -840,62 +900,7 @@ gb_internal void check_proc_decl(CheckerContext *ctx, Entity *e, DeclInfo *d) {
}
e->Procedure.optimization_mode = cast(ProcedureOptimizationMode)ac.optimization_mode;
- if (ac.objc_name.len || ac.objc_is_class_method || ac.objc_type) {
- if (ac.objc_name.len == 0 && ac.objc_is_class_method) {
- error(e->token, "@(objc_name) is required with @(objc_is_class_method)");
- } else if (ac.objc_type == nullptr) {
- error(e->token, "@(objc_name) requires that @(objc_type) to be set");
- } else if (ac.objc_name.len == 0 && ac.objc_type) {
- error(e->token, "@(objc_name) is required with @(objc_type)");
- } else {
- Type *t = ac.objc_type;
- if (t->kind == Type_Named) {
- Entity *tn = t->Named.type_name;
-
- GB_ASSERT(tn->kind == Entity_TypeName);
-
- if (tn->scope != e->scope) {
- error(e->token, "@(objc_name) attribute may only be applied to procedures and types within the same scope");
- } else {
- mutex_lock(&global_type_name_objc_metadata_mutex);
- defer (mutex_unlock(&global_type_name_objc_metadata_mutex));
-
- if (!tn->TypeName.objc_metadata) {
- tn->TypeName.objc_metadata = create_type_name_obj_c_metadata();
- }
- auto *md = tn->TypeName.objc_metadata;
- mutex_lock(md->mutex);
- defer (mutex_unlock(md->mutex));
-
- if (!ac.objc_is_class_method) {
- bool ok = true;
- for (TypeNameObjCMetadataEntry const &entry : md->value_entries) {
- if (entry.name == ac.objc_name) {
- error(e->token, "Previous declaration of @(objc_name=\"%.*s\")", LIT(ac.objc_name));
- ok = false;
- break;
- }
- }
- if (ok) {
- array_add(&md->value_entries, TypeNameObjCMetadataEntry{ac.objc_name, e});
- }
- } else {
- bool ok = true;
- for (TypeNameObjCMetadataEntry const &entry : md->type_entries) {
- if (entry.name == ac.objc_name) {
- error(e->token, "Previous declaration of @(objc_name=\"%.*s\")", LIT(ac.objc_name));
- ok = false;
- break;
- }
- }
- if (ok) {
- array_add(&md->type_entries, TypeNameObjCMetadataEntry{ac.objc_name, e});
- }
- }
- }
- }
- }
- }
+ check_objc_methods(ctx, e, ac);
if (ac.require_target_feature.len != 0 && ac.enable_target_feature.len != 0) {
error(e->token, "Attributes @(require_target_feature=...) and @(enable_target_feature=...) cannot be used together");
@@ -1241,7 +1246,7 @@ gb_internal void check_global_variable_decl(CheckerContext *ctx, Entity *&e, Ast
check_rtti_type_disallowed(e->token, e->type, "A variable declaration is using a type, %s, which has been disallowed");
}
-gb_internal void check_proc_group_decl(CheckerContext *ctx, Entity *&pg_entity, DeclInfo *d) {
+gb_internal void check_proc_group_decl(CheckerContext *ctx, Entity *pg_entity, DeclInfo *d) {
GB_ASSERT(pg_entity->kind == Entity_ProcGroup);
auto *pge = &pg_entity->ProcGroup;
String proc_group_name = pg_entity->token.string;
@@ -1366,6 +1371,11 @@ gb_internal void check_proc_group_decl(CheckerContext *ctx, Entity *&pg_entity,
}
}
+ AttributeContext ac = {};
+ check_decl_attributes(ctx, d->attributes, proc_group_attribute, &ac);
+ check_objc_methods(ctx, pg_entity, ac);
+
+
}
gb_internal void check_entity_decl(CheckerContext *ctx, Entity *e, DeclInfo *d, Type *named_type) {
diff --git a/src/check_expr.cpp b/src/check_expr.cpp
index 830b5315d..98154f33d 100644
--- a/src/check_expr.cpp
+++ b/src/check_expr.cpp
@@ -14,6 +14,7 @@ enum CallArgumentError {
CallArgumentError_ParameterMissing,
CallArgumentError_DuplicateParameter,
CallArgumentError_NoneConstantParameter,
+ CallArgumentError_OutOfOrderParameters,
CallArgumentError_MAX,
};
@@ -33,12 +34,13 @@ gb_global char const *CallArgumentError_strings[CallArgumentError_MAX] = {
"ParameterMissing",
"DuplicateParameter",
"NoneConstantParameter",
+ "OutOfOrderParameters",
};
-enum CallArgumentErrorMode {
- CallArgumentMode_NoErrors,
- CallArgumentMode_ShowErrors,
+enum struct CallArgumentErrorMode {
+ NoErrors,
+ ShowErrors,
};
struct CallArgumentData {
@@ -65,11 +67,6 @@ gb_internal int valid_index_and_score_cmp(void const *a, void const *b) {
-#define CALL_ARGUMENT_CHECKER(name) CallArgumentError name(CheckerContext *c, Ast *call, Type *proc_type, Entity *entity, Array operands, CallArgumentErrorMode show_error_mode, CallArgumentData *data)
-typedef CALL_ARGUMENT_CHECKER(CallArgumentCheckerType);
-
-
-
gb_internal void check_expr (CheckerContext *c, Operand *operand, Ast *expression);
gb_internal void check_multi_expr (CheckerContext *c, Operand *operand, Ast *expression);
gb_internal void check_multi_expr_or_type (CheckerContext *c, Operand *operand, Ast *expression);
@@ -94,14 +91,13 @@ gb_internal void check_stmt (CheckerContext *c, Ast *nod
gb_internal void check_stmt_list (CheckerContext *c, Slice const &stmts, u32 flags);
gb_internal void check_init_constant (CheckerContext *c, Entity *e, Operand *operand);
gb_internal bool check_representable_as_constant(CheckerContext *c, ExactValue in_value, Type *type, ExactValue *out_value);
-gb_internal bool check_procedure_type (CheckerContext *c, Type *type, Ast *proc_type_node, Array *operands = nullptr);
+gb_internal bool check_procedure_type (CheckerContext *c, Type *type, Ast *proc_type_node, Array const *operands = nullptr);
gb_internal void check_struct_type (CheckerContext *c, Type *struct_type, Ast *node, Array *poly_operands,
Type *named_type = nullptr, Type *original_type_for_poly = nullptr);
gb_internal void check_union_type (CheckerContext *c, Type *union_type, Ast *node, Array *poly_operands,
Type *named_type = nullptr, Type *original_type_for_poly = nullptr);
-gb_internal CallArgumentData check_call_arguments (CheckerContext *c, Operand *operand, Type *proc_type, Ast *call);
-gb_internal Type * check_init_variable (CheckerContext *c, Entity *e, Operand *operand, String context_name);
+gb_internal Type * check_init_variable (CheckerContext *c, Entity *e, Operand *operand, String context_name);
gb_internal void check_assignment_error_suggestion(CheckerContext *c, Operand *o, Type *type);
@@ -121,6 +117,7 @@ gb_internal void check_or_return_split_types(CheckerContext *c, Operand *x, Stri
gb_internal bool is_diverging_expr(Ast *expr);
+gb_internal isize get_procedure_param_count_excluding_defaults(Type *pt, isize *param_count_);
enum LoadDirectiveResult {
LoadDirective_Success = 0,
@@ -335,7 +332,7 @@ gb_internal void check_scope_decls(CheckerContext *c, Slice const &nodes,
}
gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, Entity *base_entity, Type *type,
- Array *param_operands, Ast *poly_def_node, PolyProcData *poly_proc_data) {
+ Array const *param_operands, Ast *poly_def_node, PolyProcData *poly_proc_data) {
///////////////////////////////////////////////////////////////////////////////
// //
// TODO CLEANUP(bill): This procedure is very messy and hacky. Clean this!!! //
@@ -602,7 +599,7 @@ gb_internal bool check_polymorphic_procedure_assignment(CheckerContext *c, Opera
return find_or_generate_polymorphic_procedure(c, base_entity, type, nullptr, poly_def_node, poly_proc_data);
}
-gb_internal bool find_or_generate_polymorphic_procedure_from_parameters(CheckerContext *c, Entity *base_entity, Array *operands, Ast *poly_def_node, PolyProcData *poly_proc_data) {
+gb_internal bool find_or_generate_polymorphic_procedure_from_parameters(CheckerContext *c, Entity *base_entity, Array const *operands, Ast *poly_def_node, PolyProcData *poly_proc_data) {
return find_or_generate_polymorphic_procedure(c, base_entity, nullptr, operands, poly_def_node, poly_proc_data);
}
@@ -667,6 +664,11 @@ gb_internal i64 check_distance_between_types(CheckerContext *c, Operand *operand
if (check_representable_as_constant(c, operand->value, dst, nullptr)) {
if (is_type_typed(dst) && src->kind == Type_Basic) {
switch (src->Basic.kind) {
+ case Basic_UntypedBool:
+ if (is_type_boolean(dst)) {
+ return 1;
+ }
+ break;
case Basic_UntypedRune:
if (is_type_integer(dst) || is_type_rune(dst)) {
return 1;
@@ -677,6 +679,11 @@ gb_internal i64 check_distance_between_types(CheckerContext *c, Operand *operand
return 1;
}
break;
+ case Basic_UntypedString:
+ if (is_type_string(dst)) {
+ return 1;
+ }
+ break;
case Basic_UntypedFloat:
if (is_type_float(dst)) {
return 1;
@@ -701,23 +708,58 @@ gb_internal i64 check_distance_between_types(CheckerContext *c, Operand *operand
}
return -1;
}
- if (src->kind == Type_Basic && src->Basic.kind == Basic_UntypedRune) {
- if (is_type_integer(dst) || is_type_rune(dst)) {
- if (is_type_typed(type)) {
- return 2;
+ if (src->kind == Type_Basic) {
+ Type *d = base_array_type(dst);
+ i64 score = -1;
+ switch (src->Basic.kind) {
+ case Basic_UntypedBool:
+ if (is_type_boolean(d)) {
+ score = 1;
}
- return 1;
- }
- return -1;
- }
- if (src->kind == Type_Basic && src->Basic.kind == Basic_UntypedBool) {
- if (is_type_boolean(dst)) {
- if (is_type_typed(type)) {
- return 2;
+ break;
+ case Basic_UntypedRune:
+ if (is_type_integer(d) || is_type_rune(d)) {
+ score = 1;
}
- return 1;
+ break;
+ case Basic_UntypedInteger:
+ if (is_type_integer(d) || is_type_rune(d)) {
+ score = 1;
+ }
+ break;
+ case Basic_UntypedString:
+ if (is_type_string(d)) {
+ score = 1;
+ }
+ break;
+ case Basic_UntypedFloat:
+ if (is_type_float(d)) {
+ score = 1;
+ }
+ break;
+ case Basic_UntypedComplex:
+ if (is_type_complex(d)) {
+ score = 1;
+ }
+ if (is_type_quaternion(d)) {
+ score = 2;
+ }
+ break;
+ case Basic_UntypedQuaternion:
+ if (is_type_quaternion(d)) {
+ score = 1;
+ }
+ break;
}
- return -1;
+ if (score > 0) {
+ if (is_type_typed(d)) {
+ score += 1;
+ }
+ if (d != dst) {
+ score += 6;
+ }
+ }
+ return score;
}
}
}
@@ -1677,7 +1719,7 @@ gb_internal bool check_unary_op(CheckerContext *c, Operand *o, Token op) {
break;
case Token_Not:
- if (!is_type_boolean(type)) {
+ if (!is_type_boolean(type) || is_type_array_like(o->type)) {
ERROR_BLOCK();
str = expr_to_string(o->expr);
error(op, "Operator '%.*s' is only allowed on boolean expressions", LIT(op.string));
@@ -2218,6 +2260,37 @@ gb_internal bool check_is_not_addressable(CheckerContext *c, Operand *o) {
return o->mode != Addressing_Variable && o->mode != Addressing_SoaVariable;
}
+gb_internal void check_old_for_or_switch_value_usage(Ast *expr) {
+ if (!build_context.strict_style) {
+ return;
+ }
+
+ Entity *e = entity_of_node(expr);
+ if (e != nullptr && (e->flags & EntityFlag_OldForOrSwitchValue) != 0) {
+ GB_ASSERT(e->kind == Entity_Variable);
+
+ begin_error_block();
+ defer (end_error_block());
+
+ if ((e->flags & EntityFlag_ForValue) != 0) {
+ Type *parent_type = type_deref(e->Variable.for_loop_parent_type);
+
+ error(expr, "Assuming a for-in defined value is addressable as the iterable is passed by value has been disallowed with '-strict-style'.");
+
+ if (is_type_map(parent_type)) {
+ error_line("\tSuggestion: Prefer doing 'for key, &%.*s in ...'\n", LIT(e->token.string));
+ } else {
+ error_line("\tSuggestion: Prefer doing 'for &%.*s in ...'\n", LIT(e->token.string));
+ }
+ } else {
+ GB_ASSERT((e->flags & EntityFlag_SwitchValue) != 0);
+
+ error(expr, "Assuming a switch-in defined value is addressable as the iterable is passed by value has been disallowed with '-strict-style'.");
+ error_line("\tSuggestion: Prefer doing 'switch &%.*s in ...'\n", LIT(e->token.string));
+ }
+ }
+}
+
gb_internal void check_unary_expr(CheckerContext *c, Operand *o, Token op, Ast *node) {
switch (op.kind) {
case Token_And: { // Pointer address
@@ -2227,7 +2300,7 @@ gb_internal void check_unary_expr(CheckerContext *c, Operand *o, Token op, Ast *
gbString str = expr_to_string(ue->expr);
defer (gb_string_free(str));
- Entity *e = entity_of_node(o->expr);
+ Entity *e = entity_of_node(ue->expr);
if (e != nullptr && (e->flags & EntityFlag_Param) != 0) {
error(op, "Cannot take the pointer address of '%s' which is a procedure parameter", str);
} else {
@@ -2245,7 +2318,15 @@ gb_internal void check_unary_expr(CheckerContext *c, Operand *o, Token op, Ast *
defer (end_error_block());
error(op, "Cannot take the pointer address of '%s'", str);
if (e != nullptr && (e->flags & EntityFlag_ForValue) != 0) {
- error_line("\tSuggestion: Did you want to pass the iterable value to the for statement by pointer to get addressable semantics?\n");
+ Type *parent_type = type_deref(e->Variable.for_loop_parent_type);
+
+ if (parent_type != nullptr && is_type_string(parent_type)) {
+ error_line("\tSuggestion: Iterating over a string produces an intermediate 'rune' value which cannot be addressed.\n");
+ } else if (parent_type != nullptr && is_type_tuple(parent_type)) {
+ error_line("\tSuggestion: Iterating over a procedure does not produce values which are addressable.\n");
+ } else {
+ error_line("\tSuggestion: Did you want to pass the iterable value to the for statement by pointer to get addressable semantics?\n");
+ }
}
if (e != nullptr && (e->flags & EntityFlag_SwitchValue) != 0) {
error_line("\tSuggestion: Did you want to pass the value to the switch statement by pointer to get addressable semantics?\n");
@@ -2270,6 +2351,11 @@ gb_internal void check_unary_expr(CheckerContext *c, Operand *o, Token op, Ast *
o->type = alloc_type_pointer(o->type);
}
} else {
+ if (build_context.strict_style && ast_node_expect(node, Ast_UnaryExpr)) {
+ ast_node(ue, UnaryExpr, node);
+ check_old_for_or_switch_value_usage(ue->expr);
+ }
+
o->type = alloc_type_pointer(o->type);
}
@@ -4688,7 +4774,10 @@ gb_internal Entity *check_selector(CheckerContext *c, Operand *operand, Ast *nod
if (entity == nullptr && selector->kind == Ast_Ident) {
String field_name = selector->Ident.token.string;
- if (is_type_dynamic_array(type_deref(operand->type))) {
+ Type *t = type_deref(operand->type);
+ if (t == nullptr) {
+ error(operand->expr, "Cannot use a selector expression on 0-value expression");
+ } else if (is_type_dynamic_array(t)) {
init_mem_allocator(c->checker);
}
sel = lookup_field(operand->type, field_name, operand->mode == Addressing_Type);
@@ -5150,14 +5239,19 @@ enum UnpackFlag : u32 {
};
-gb_internal bool check_unpack_arguments(CheckerContext *ctx, Entity **lhs, isize lhs_count, Array *operands, Slice const &rhs, UnpackFlags flags) {
+gb_internal bool check_unpack_arguments(CheckerContext *ctx, Entity **lhs, isize lhs_count, Array *operands, Slice const &rhs_arguments, UnpackFlags flags) {
bool allow_ok = (flags & UnpackFlag_AllowOk) != 0;
bool is_variadic = (flags & UnpackFlag_IsVariadic) != 0;
bool allow_undef = (flags & UnpackFlag_AllowUndef) != 0;
bool optional_ok = false;
isize tuple_index = 0;
- for_array(i, rhs) {
+ for (Ast *rhs : rhs_arguments) {
+ if (rhs->kind == Ast_FieldValue) {
+ error(rhs, "Invalid use of 'field = value'");
+ rhs = rhs->FieldValue.value;
+ }
+
CheckerContext c_ = *ctx;
CheckerContext *c = &c_;
@@ -5165,12 +5259,11 @@ gb_internal bool check_unpack_arguments(CheckerContext *ctx, Entity **lhs, isize
Type *type_hint = nullptr;
+
if (lhs != nullptr && tuple_index < lhs_count) {
// NOTE(bill): override DeclInfo for dependency
Entity *e = lhs[tuple_index];
if (e != nullptr) {
- // DeclInfo *decl = decl_info_of_entity(e);
- // if (decl) c->decl = decl;
type_hint = e->type;
if (e->flags & EntityFlag_Ellipsis) {
GB_ASSERT(is_type_slice(e->type));
@@ -5182,8 +5275,6 @@ gb_internal bool check_unpack_arguments(CheckerContext *ctx, Entity **lhs, isize
// NOTE(bill): override DeclInfo for dependency
Entity *e = lhs[lhs_count-1];
if (e != nullptr) {
- // DeclInfo *decl = decl_info_of_entity(e);
- // if (decl) c->decl = decl;
type_hint = e->type;
if (e->flags & EntityFlag_Ellipsis) {
GB_ASSERT(is_type_slice(e->type));
@@ -5193,15 +5284,15 @@ gb_internal bool check_unpack_arguments(CheckerContext *ctx, Entity **lhs, isize
}
}
- Ast *rhs_expr = unparen_expr(rhs[i]);
+ Ast *rhs_expr = unparen_expr(rhs);
if (allow_undef && rhs_expr != nullptr && rhs_expr->kind == Ast_Uninit) {
// NOTE(bill): Just handle this very specific logic here
o.type = t_untyped_uninit;
o.mode = Addressing_Value;
- o.expr = rhs[i];
- add_type_and_value(c, rhs[i], o.mode, o.type, o.value);
+ o.expr = rhs;
+ add_type_and_value(c, rhs, o.mode, o.type, o.value);
} else {
- check_expr_base(c, &o, rhs[i], type_hint);
+ check_expr_base(c, &o, rhs, type_hint);
}
if (o.mode == Addressing_NoValue) {
error_operand_no_value(&o);
@@ -5209,7 +5300,7 @@ gb_internal bool check_unpack_arguments(CheckerContext *ctx, Entity **lhs, isize
}
if (o.type == nullptr || o.type->kind != Type_Tuple) {
- if (allow_ok && lhs_count == 2 && rhs.count == 1 &&
+ if (allow_ok && lhs_count == 2 && rhs_arguments.count == 1 &&
(o.mode == Addressing_MapIndex || o.mode == Addressing_OptionalOk || o.mode == Addressing_OptionalOkPtr)) {
Ast *expr = unparen_expr(o.expr);
@@ -5309,7 +5400,36 @@ gb_internal isize get_procedure_param_count_excluding_defaults(Type *pt, isize *
}
-gb_internal CALL_ARGUMENT_CHECKER(check_call_arguments_internal) {
+gb_internal isize lookup_procedure_parameter(TypeProc *pt, String const ¶meter_name) {
+ isize param_count = pt->param_count;
+ for (isize i = 0; i < param_count; i++) {
+ Entity *e = pt->params->Tuple.variables[i];
+ String name = e->token.string;
+ if (is_blank_ident(name)) {
+ continue;
+ }
+ if (name == parameter_name) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+gb_internal isize lookup_procedure_parameter(Type *type, String const ¶meter_name) {
+ type = base_type(type);
+ GB_ASSERT(type->kind == Type_Proc);
+ return lookup_procedure_parameter(&type->Proc, parameter_name);
+}
+
+gb_internal CallArgumentError check_call_arguments_internal(CheckerContext *c, Ast *call,
+ Entity *entity, Type *proc_type,
+ Array positional_operands, Array const &named_operands,
+ CallArgumentErrorMode show_error_mode,
+ CallArgumentData *data) {
+ TEMPORARY_ALLOCATOR_GUARD();
+
+ CallArgumentError err = CallArgumentError_None;
+
ast_node(ce, CallExpr, call);
GB_ASSERT(is_type_proc(proc_type));
proc_type = base_type(proc_type);
@@ -5320,16 +5440,8 @@ gb_internal CALL_ARGUMENT_CHECKER(check_call_arguments_internal) {
bool variadic = pt->variadic;
bool vari_expand = (ce->ellipsis.pos.line != 0);
i64 score = 0;
- bool show_error = show_error_mode == CallArgumentMode_ShowErrors;
+ bool show_error = show_error_mode == CallArgumentErrorMode::ShowErrors;
-
- TypeTuple *param_tuple = nullptr;
- if (pt->params != nullptr) {
- param_tuple = &pt->params->Tuple;
- }
-
-
- CallArgumentError err = CallArgumentError_None;
Type *final_proc_type = proc_type;
Entity *gen_entity = nullptr;
@@ -5347,192 +5459,304 @@ gb_internal CALL_ARGUMENT_CHECKER(check_call_arguments_internal) {
LIT(ce->proc->Ident.token.string));
}
err = CallArgumentError_NonVariadicExpand;
- } else if (operands.count == 0 && param_count_excluding_defaults == 0) {
+ }
+
+ GB_ASSERT(ce->split_args);
+ auto visited = slice_make(temporary_allocator(), pt->param_count);
+ auto ordered_operands = array_make(temporary_allocator(), pt->param_count);
+ defer ({
+ for (Operand const &o : ordered_operands) {
+ if (o.expr != nullptr) {
+ call->viral_state_flags |= o.expr->viral_state_flags;
+ }
+ }
+ });
+
+ isize positional_operand_count = positional_operands.count;
+ if (variadic) {
+ positional_operand_count = gb_min(positional_operands.count, pt->variadic_index);
+ } else if (positional_operand_count > pt->param_count) {
+ err = CallArgumentError_TooManyArguments;
+ char const *err_fmt = "Too many arguments for '%s', expected %td arguments, got %td";
+ if (show_error) {
+ gbString proc_str = expr_to_string(ce->proc);
+ defer (gb_string_free(proc_str));
+ error(call, err_fmt, proc_str, param_count_excluding_defaults, positional_operands.count);
+ }
+ return err;
+ }
+ positional_operand_count = gb_min(positional_operand_count, pt->param_count);
+
+ for (isize i = 0; i < positional_operand_count; i++) {
+ ordered_operands[i] = positional_operands[i];
+ visited[i] = true;
+ }
+
+ auto variadic_operands = slice(slice_from_array(positional_operands), positional_operand_count, positional_operands.count);
+
+ if (named_operands.count != 0) {
+ GB_ASSERT(ce->split_args->named.count == named_operands.count);
+ for_array(i, ce->split_args->named) {
+ Ast *arg = ce->split_args->named[i];
+ Operand operand = named_operands[i];
+
+ ast_node(fv, FieldValue, arg);
+ if (fv->field->kind != Ast_Ident) {
+ if (show_error) {
+ gbString expr_str = expr_to_string(fv->field);
+ error(arg, "Invalid parameter name '%s' in procedure call", expr_str);
+ gb_string_free(expr_str);
+ }
+ err = CallArgumentError_InvalidFieldValue;
+ continue;
+ }
+ String name = fv->field->Ident.token.string;
+ isize param_index = lookup_procedure_parameter(pt, name);
+ if (param_index < 0) {
+ if (show_error) {
+ error(arg, "No parameter named '%.*s' for this procedure type", LIT(name));
+ }
+ err = CallArgumentError_ParameterNotFound;
+ continue;
+ }
+ if (visited[param_index]) {
+ if (show_error) {
+ error(arg, "Duplicate parameter '%.*s' in procedure call", LIT(name));
+ }
+ err = CallArgumentError_DuplicateParameter;
+ continue;
+ }
+
+ visited[param_index] = true;
+ ordered_operands[param_index] = operand;
+ }
+ }
+
+ isize dummy_argument_count = 0;
+ bool actually_variadic = false;
+
+ if (variadic) {
+ if (visited[pt->variadic_index] &&
+ positional_operand_count < positional_operands.count) {
+ if (show_error) {
+ String name = pt->params->Tuple.variables[pt->variadic_index]->token.string;
+ error(call, "Variadic parameters already handled with a named argument '%.*s' in procedure call", LIT(name));
+ }
+ err = CallArgumentError_DuplicateParameter;
+ } else if (!visited[pt->variadic_index]) {
+ visited[pt->variadic_index] = true;
+
+ Operand *variadic_operand = &ordered_operands[pt->variadic_index];
+
+ if (vari_expand) {
+ GB_ASSERT(variadic_operands.count != 0);
+ *variadic_operand = variadic_operands[0];
+ variadic_operand->type = default_type(variadic_operand->type);
+ actually_variadic = true;
+ } else {
+ AstFile *f = call->file();
+
+ // HACK(bill): this is an awful hack
+ Operand o = {};
+ o.mode = Addressing_Value;
+ o.expr = ast_ident(f, make_token_ident("nil"));
+ o.expr->Ident.token.pos = ast_token(call).pos;
+ if (variadic_operands.count != 0) {
+ actually_variadic = true;
+ o.expr->Ident.token.pos = ast_token(variadic_operands[0].expr).pos;
+
+ Entity *vt = pt->params->Tuple.variables[pt->variadic_index];
+ if (is_type_polymorphic(vt->type)) {
+ o.type = alloc_type_slice(default_type(variadic_operands[0].type));
+ } else {
+ o.type = vt->type;
+ }
+ } else {
+ dummy_argument_count += 1;
+ o.type = t_untyped_nil;
+ }
+ *variadic_operand = o;
+ }
+ }
+
+ }
+
+ for (Operand const &o : ordered_operands) {
+ if (o.mode != Addressing_Invalid) {
+ check_no_copy_assignment(o, str_lit("procedure call expression"));
+ }
+ }
+
+ for (isize i = 0; i < pt->param_count; i++) {
+ if (!visited[i]) {
+ Entity *e = pt->params->Tuple.variables[i];
+ if (is_blank_ident(e->token)) {
+ continue;
+ }
+ if (e->kind == Entity_Variable) {
+ if (e->Variable.param_value.kind != ParameterValue_Invalid) {
+ ordered_operands[i].mode = Addressing_Value;
+ ordered_operands[i].type = e->type;
+ ordered_operands[i].expr = e->Variable.param_value.original_ast_expr;
+
+ dummy_argument_count += 1;
+ score += assign_score_function(1);
+ continue;
+ }
+ }
+
+ if (show_error) {
+ if (e->kind == Entity_TypeName) {
+ error(call, "Type parameter '%.*s' is missing in procedure call",
+ LIT(e->token.string));
+ } else if (e->kind == Entity_Constant && e->Constant.value.kind != ExactValue_Invalid) {
+ // Ignore
+ } else {
+ gbString str = type_to_string(e->type);
+ error(call, "Parameter '%.*s' of type '%s' is missing in procedure call",
+ LIT(e->token.string), str);
+ gb_string_free(str);
+ }
+ }
+ err = CallArgumentError_ParameterMissing;
+ }
+ }
+
+ auto eval_param_and_score = [](CheckerContext *c, Operand *o, Type *param_type, CallArgumentError &err, bool param_is_variadic, Entity *e, bool show_error) -> i64 {
+ i64 s = 0;
+ if (!check_is_assignable_to_with_score(c, o, param_type, &s, param_is_variadic)) {
+ bool ok = false;
+ if (e && e->flags & EntityFlag_AnyInt) {
+ if (is_type_integer(param_type)) {
+ ok = check_is_castable_to(c, o, param_type);
+ }
+ }
+ if (ok) {
+ s = assign_score_function(MAXIMUM_TYPE_DISTANCE);
+ } else {
+ if (show_error) {
+ check_assignment(c, o, param_type, str_lit("procedure argument"));
+ }
+ err = CallArgumentError_WrongTypes;
+ }
+
+ } else if (show_error) {
+ check_assignment(c, o, param_type, str_lit("procedure argument"));
+ }
+
+ if (e && e->flags & EntityFlag_ConstInput) {
+ if (o->mode != Addressing_Constant) {
+ if (show_error) {
+ error(o->expr, "Expected a constant value for the argument '%.*s'", LIT(e->token.string));
+ }
+ err = CallArgumentError_NoneConstantParameter;
+ }
+ }
+
+
+ if (!err && is_type_any(param_type)) {
+ add_type_info_type(c, o->type);
+ }
+ if (o->mode == Addressing_Type && is_type_typeid(param_type)) {
+ add_type_info_type(c, o->type);
+ add_type_and_value(c, o->expr, Addressing_Value, param_type, exact_value_typeid(o->type));
+ } else if (show_error && is_type_untyped(o->type)) {
+ update_untyped_expr_type(c, o->expr, param_type, true);
+ }
+
+ return s;
+ };
+
+
+ if (ordered_operands.count == 0 && param_count_excluding_defaults == 0) {
err = CallArgumentError_None;
if (variadic) {
- GB_ASSERT(param_tuple != nullptr && param_tuple->variables.count > 0);
- Type *t = param_tuple->variables[0]->type;
+ GB_ASSERT(pt->params != nullptr && pt->params->Tuple.variables.count > 0);
+ Type *t = pt->params->Tuple.variables[0]->type;
if (is_type_polymorphic(t)) {
- error(call, "Ambiguous call to a polymorphic variadic procedure with no variadic input");
+ if (show_error) {
+ error(call, "Ambiguous call to a polymorphic variadic procedure with no variadic input");
+ }
err = CallArgumentError_AmbiguousPolymorphicVariadic;
}
}
} else {
- i32 error_code = 0;
- if (operands.count < param_count_excluding_defaults) {
- error_code = -1;
- } else if (!variadic && operands.count > param_count) {
- error_code = +1;
+ if (pt->is_polymorphic && !pt->is_poly_specialized && err == CallArgumentError_None) {
+ PolyProcData poly_proc_data = {};
+ if (find_or_generate_polymorphic_procedure_from_parameters(c, entity, &ordered_operands, call, &poly_proc_data)) {
+ gen_entity = poly_proc_data.gen_entity;
+ Type *gept = base_type(gen_entity->type);
+ GB_ASSERT(is_type_proc(gept));
+ final_proc_type = gen_entity->type;
+ pt = &gept->Proc;
+
+ } else {
+ err = CallArgumentError_WrongTypes;
+ }
}
- if (error_code != 0) {
- err = CallArgumentError_TooManyArguments;
- char const *err_fmt = "Too many arguments for '%s', expected %td arguments, got %td";
- if (error_code < 0) {
- err = CallArgumentError_TooFewArguments;
- err_fmt = "Too few arguments for '%s', expected %td arguments, got %td";
+
+ for (isize i = 0; i < pt->param_count; i++) {
+ Operand *o = &ordered_operands[i];
+ if (o->mode == Addressing_Invalid) {
+ continue;
}
- if (show_error) {
- gbString proc_str = expr_to_string(ce->proc);
- defer (gb_string_free(proc_str));
- error(call, err_fmt, proc_str, param_count_excluding_defaults, operands.count);
+ Entity *e = pt->params->Tuple.variables[i];
+ bool param_is_variadic = pt->variadic && pt->variadic_index == i;
- #if 0
- error_line("\t");
- for_array(i, operands) {
- if (i > 0) {
- error_line(", ");
+ if (e->kind == Entity_TypeName) {
+ GB_ASSERT(pt->is_polymorphic);
+ if (o->mode != Addressing_Type) {
+ if (show_error) {
+ error(o->expr, "Expected a type for the argument '%.*s'", LIT(e->token.string));
}
- gbString s = expr_to_string(operands[i].expr);
- error_line("%s", s);
- gb_string_free(s);
- }
- error_line("\n");
- #endif
- }
- } else {
- // NOTE(bill): Generate the procedure type for this generic instance
- if (pt->is_polymorphic && !pt->is_poly_specialized) {
- PolyProcData poly_proc_data = {};
- if (find_or_generate_polymorphic_procedure_from_parameters(c, entity, &operands, call, &poly_proc_data)) {
- gen_entity = poly_proc_data.gen_entity;
- GB_ASSERT(is_type_proc(gen_entity->type));
- final_proc_type = gen_entity->type;
- } else {
err = CallArgumentError_WrongTypes;
}
+ if (are_types_identical(e->type, o->type)) {
+ score += assign_score_function(1);
+ } else {
+ score += assign_score_function(MAXIMUM_TYPE_DISTANCE);
+ }
+ continue;
}
- GB_ASSERT(is_type_proc(final_proc_type));
- TypeProc *pt = &final_proc_type->Proc;
-
- GB_ASSERT(pt->params != nullptr);
- auto sig_params = pt->params->Tuple.variables;
- isize operand_index = 0;
- isize max_operand_count = gb_min(param_count, operands.count);
- for (; operand_index < max_operand_count; operand_index++) {
- Entity *e = sig_params[operand_index];
- Type *t = e->type;
- Operand o = operands[operand_index];
- if (o.expr != nullptr) {
- call->viral_state_flags |= o.expr->viral_state_flags;
- }
-
- if (e->kind == Entity_TypeName) {
- // GB_ASSERT(!variadic);
- if (o.mode == Addressing_Invalid) {
- continue;
- } else if (o.mode != Addressing_Type) {
- if (show_error) {
- error(o.expr, "Expected a type for the argument '%.*s'", LIT(e->token.string));
- }
- err = CallArgumentError_WrongTypes;
- }
-
- if (are_types_identical(e->type, o.type)) {
- score += assign_score_function(1);
- } else {
- score += assign_score_function(MAXIMUM_TYPE_DISTANCE);
- }
-
- continue;
- }
-
- bool param_is_variadic = pt->variadic && pt->variadic_index == operand_index;
-
- i64 s = 0;
- if (!check_is_assignable_to_with_score(c, &o, t, &s, param_is_variadic)) {
- bool ok = false;
- if (e->flags & EntityFlag_AnyInt) {
- if (is_type_integer(t)) {
- ok = check_is_castable_to(c, &o, t);
- }
- }
- if (ok) {
- s = assign_score_function(MAXIMUM_TYPE_DISTANCE);
- } else {
- if (show_error) {
- check_assignment(c, &o, t, str_lit("argument"));
- }
- // TODO(bill, 2021-05-05): Is this incorrect logic to only fail if there is ambiguity for definite?
- if (o.mode == Addressing_Invalid) {
- err = CallArgumentError_WrongTypes;
- }
- }
- } else if (show_error) {
- check_assignment(c, &o, t, str_lit("argument"));
- }
- score += s;
-
- if (e->flags & EntityFlag_ConstInput) {
- if (o.mode != Addressing_Constant) {
- if (show_error) {
- error(o.expr, "Expected a constant value for the argument '%.*s'", LIT(e->token.string));
- }
- err = CallArgumentError_NoneConstantParameter;
- }
- }
-
- if (o.mode == Addressing_Type && is_type_typeid(e->type)) {
- add_type_info_type(c, o.type);
- add_type_and_value(c, o.expr, Addressing_Value, e->type, exact_value_typeid(o.type));
- } else if (show_error && is_type_untyped(o.type)) {
- update_untyped_expr_type(c, o.expr, t, true);
- }
-
+ if (param_is_variadic) {
+ continue;
}
- if (variadic) {
- bool variadic_expand = false;
- Type *slice = sig_params[param_count]->type;
- GB_ASSERT(is_type_slice(slice));
- Type *elem = base_type(slice)->Slice.elem;
- Type *t = elem;
+ score += eval_param_and_score(c, o, e->type, err, param_is_variadic, e, show_error);
+ }
+ }
- if (is_type_polymorphic(t)) {
- error(call, "Ambiguous call to a polymorphic variadic procedure with no variadic input");
- err = CallArgumentError_AmbiguousPolymorphicVariadic;
- }
+ if (variadic) {
+ Type *slice = pt->params->Tuple.variables[pt->variadic_index]->type;
+ GB_ASSERT(is_type_slice(slice));
+ Type *elem = base_type(slice)->Slice.elem;
+ Type *t = elem;
- for (; operand_index < operands.count; operand_index++) {
- Operand o = operands[operand_index];
- if (vari_expand) {
- variadic_expand = true;
- t = slice;
- if (operand_index != param_count) {
- if (show_error) {
- error(o.expr, "'..' in a variadic procedure can only have one variadic argument at the end");
- }
- if (data) {
- data->score = score;
- data->result_type = final_proc_type->Proc.results;
- data->gen_entity = gen_entity;
- }
- return CallArgumentError_MultipleVariadicExpand;
- }
+ if (is_type_polymorphic(t)) {
+ error(call, "Ambiguous call to a polymorphic variadic procedure with no variadic input %s", type_to_string(final_proc_type));
+ err = CallArgumentError_AmbiguousPolymorphicVariadic;
+ }
+
+ for_array(operand_index, variadic_operands) {
+ Operand *o = &variadic_operands[operand_index];
+ if (vari_expand) {
+ t = slice;
+ if (operand_index > 0) {
+ if (show_error) {
+ error(o->expr, "'..' in a variadic procedure can only have one variadic argument at the end");
}
- i64 s = 0;
- if (!check_is_assignable_to_with_score(c, &o, t, &s, true)) {
- if (show_error) {
- check_assignment(c, &o, t, str_lit("argument"));
- }
- err = CallArgumentError_WrongTypes;
- } else if (show_error) {
- check_assignment(c, &o, t, str_lit("argument"));
- }
- score += s;
- if (is_type_any(elem)) {
- add_type_info_type(c, o.type);
- }
- if (o.mode == Addressing_Type && is_type_typeid(t)) {
- add_type_info_type(c, o.type);
- add_type_and_value(c, o.expr, Addressing_Value, t, exact_value_typeid(o.type));
- } else if (show_error && is_type_untyped(o.type)) {
- update_untyped_expr_type(c, o.expr, t, true);
+ if (data) {
+ data->score = score;
+ data->result_type = final_proc_type->Proc.results;
+ data->gen_entity = gen_entity;
}
+ return CallArgumentError_MultipleVariadicExpand;
}
}
+ score += eval_param_and_score(c, o, t, err, true, nullptr, show_error);
}
}
@@ -5566,203 +5790,6 @@ gb_internal bool is_call_expr_field_value(AstCallExpr *ce) {
return ce->args[0]->kind == Ast_FieldValue;
}
-gb_internal isize lookup_procedure_parameter(TypeProc *pt, String parameter_name) {
- isize param_count = pt->param_count;
- for (isize i = 0; i < param_count; i++) {
- Entity *e = pt->params->Tuple.variables[i];
- String name = e->token.string;
- if (is_blank_ident(name)) {
- continue;
- }
- if (name == parameter_name) {
- return i;
- }
- }
- return -1;
-}
-
-gb_internal CALL_ARGUMENT_CHECKER(check_named_call_arguments) {
- ast_node(ce, CallExpr, call);
- GB_ASSERT(is_type_proc(proc_type));
- proc_type = base_type(proc_type);
- TypeProc *pt = &proc_type->Proc;
-
- i64 score = 0;
- bool show_error = show_error_mode == CallArgumentMode_ShowErrors;
- CallArgumentError err = CallArgumentError_None;
-
- TEMPORARY_ALLOCATOR_GUARD();
-
- isize param_count = pt->param_count;
- bool *visited = gb_alloc_array(temporary_allocator(), bool, param_count);
- auto ordered_operands = array_make(temporary_allocator(), param_count);
- defer ({
- for (Operand const &o : ordered_operands) {
- if (o.expr != nullptr) {
- call->viral_state_flags |= o.expr->viral_state_flags;
- }
- }
- });
-
- for_array(i, ce->args) {
- Ast *arg = ce->args[i];
- ast_node(fv, FieldValue, arg);
- if (fv->field->kind != Ast_Ident) {
- if (show_error) {
- gbString expr_str = expr_to_string(fv->field);
- error(arg, "Invalid parameter name '%s' in procedure call", expr_str);
- gb_string_free(expr_str);
- }
- err = CallArgumentError_InvalidFieldValue;
- continue;
- }
- String name = fv->field->Ident.token.string;
- isize index = lookup_procedure_parameter(pt, name);
- if (index < 0) {
- if (show_error) {
- error(arg, "No parameter named '%.*s' for this procedure type", LIT(name));
- }
- err = CallArgumentError_ParameterNotFound;
- continue;
- }
- if (visited[index]) {
- if (show_error) {
- error(arg, "Duplicate parameter '%.*s' in procedure call", LIT(name));
- }
- err = CallArgumentError_DuplicateParameter;
- continue;
- }
-
- visited[index] = true;
- ordered_operands[index] = operands[i];
- }
-
- // NOTE(bill): Check for default values and missing parameters
- isize param_count_to_check = param_count;
- if (pt->variadic) {
- param_count_to_check--;
- }
- for (isize i = 0; i < param_count_to_check; i++) {
- if (!visited[i]) {
- Entity *e = pt->params->Tuple.variables[i];
- if (is_blank_ident(e->token)) {
- continue;
- }
- if (e->kind == Entity_Variable) {
- if (e->Variable.param_value.kind != ParameterValue_Invalid) {
- score += assign_score_function(1);
- continue;
- }
- }
-
- if (show_error) {
- if (e->kind == Entity_TypeName) {
- error(call, "Type parameter '%.*s' is missing in procedure call",
- LIT(e->token.string));
- } else if (e->kind == Entity_Constant && e->Constant.value.kind != ExactValue_Invalid) {
- // Ignore
- } else {
- gbString str = type_to_string(e->type);
- error(call, "Parameter '%.*s' of type '%s' is missing in procedure call",
- LIT(e->token.string), str);
- gb_string_free(str);
- }
- }
- err = CallArgumentError_ParameterMissing;
- }
- }
-
- Entity *gen_entity = nullptr;
- if (pt->is_polymorphic && !pt->is_poly_specialized && err == CallArgumentError_None) {
- PolyProcData poly_proc_data = {};
- if (find_or_generate_polymorphic_procedure_from_parameters(c, entity, &ordered_operands, call, &poly_proc_data)) {
- gen_entity = poly_proc_data.gen_entity;
- Type *gept = base_type(gen_entity->type);
- GB_ASSERT(is_type_proc(gept));
- proc_type = gept;
- pt = &gept->Proc;
- } else {
- err = CallArgumentError_WrongTypes;
- }
- }
-
-
- for (isize i = 0; i < param_count; i++) {
- Entity *e = pt->params->Tuple.variables[i];
- Operand *o = &ordered_operands[i];
- bool param_is_variadic = pt->variadic && pt->variadic_index == i;
-
-
- if (o->mode == Addressing_Invalid) {
- if (param_is_variadic) {
- Type *slice = e->type;
- GB_ASSERT(is_type_slice(slice));
- Type *elem = base_type(slice)->Slice.elem;
- if (is_type_polymorphic(elem)) {
- error(call, "Ambiguous call to a polymorphic variadic procedure with no variadic input");
- err = CallArgumentError_AmbiguousPolymorphicVariadic;
- return err;
- }
- }
- continue;
- }
-
- if (e->kind == Entity_TypeName) {
- GB_ASSERT(pt->is_polymorphic);
- if (o->mode != Addressing_Type) {
- if (show_error) {
- error(o->expr, "Expected a type for the argument '%.*s'", LIT(e->token.string));
- }
- err = CallArgumentError_WrongTypes;
- }
- if (are_types_identical(e->type, o->type)) {
- score += assign_score_function(1);
- } else {
- score += assign_score_function(MAXIMUM_TYPE_DISTANCE);
- }
- } else {
- i64 s = 0;
- if (!check_is_assignable_to_with_score(c, o, e->type, &s, param_is_variadic)) {
- bool ok = false;
- if (ok) {
- s = assign_score_function(MAXIMUM_TYPE_DISTANCE);
- } else {
- if (show_error) {
- check_assignment(c, o, e->type, str_lit("procedure argument"));
- }
- err = CallArgumentError_WrongTypes;
- }
-
- if (e->flags & EntityFlag_ConstInput) {
- if (o->mode != Addressing_Constant) {
- if (show_error) {
- error(o->expr, "Expected a constant value for the argument '%.*s'", LIT(e->token.string));
- }
- err = CallArgumentError_NoneConstantParameter;
- }
- }
- } else if (show_error) {
- check_assignment(c, o, e->type, str_lit("procedure argument"));
- }
- score += s;
- }
-
- if (o->mode == Addressing_Type && is_type_typeid(e->type)) {
- add_type_info_type(c, o->type);
- add_type_and_value(c, o->expr, Addressing_Value, e->type, exact_value_typeid(o->type));
- }
- }
-
- if (data) {
- data->score = score;
- data->result_type = pt->results;
- data->gen_entity = gen_entity;
- add_type_and_value(c, ce->proc, Addressing_Value, proc_type, {});
- }
-
- return err;
-}
-
gb_internal Entity **populate_proc_parameter_list(CheckerContext *c, Type *proc_type, isize *lhs_count_, bool *is_variadic) {
Entity **lhs = nullptr;
isize lhs_count = -1;
@@ -5867,535 +5894,704 @@ gb_internal bool evaluate_where_clauses(CheckerContext *ctx, Ast *call_expr, Sco
return true;
}
+gb_internal bool check_named_arguments(CheckerContext *c, Type *type, Slice const &named_args, Array *named_operands, bool show_error) {
+ bool success = true;
-gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *operand, Type *proc_type, Ast *call, Slice const &args) {
- ast_node(ce, CallExpr, call);
-
- CallArgumentCheckerType *call_checker = check_call_arguments_internal;
- Array operands = {};
- defer (array_free(&operands));
-
- Type *result_type = t_invalid;
-
- if (is_call_expr_field_value(ce)) {
- call_checker = check_named_call_arguments;
-
- operands = array_make(heap_allocator(), args.count);
-
- // NOTE(bill): This is give type hints for the named parameters
- // in order to improve the type inference system
-
- StringMap type_hint_map = {}; // Key: String
- string_map_init(&type_hint_map, 2*args.count);
- defer (string_map_destroy(&type_hint_map));
-
- Type *ptype = nullptr;
- bool single_case = true;
-
- if (operand->mode == Addressing_ProcGroup) {
- single_case = false;
- Array procs = proc_group_entities(c, *operand);
- if (procs.count == 1) {
- ptype = procs[0]->type;
- single_case = true;
- }
- } else {
- ptype = proc_type;
+ type = base_type(type);
+ if (named_args.count > 0) {
+ TypeProc *pt = nullptr;
+ if (is_type_proc(type)) {
+ pt = &type->Proc;
}
- if (single_case) {
- Type *bptype = base_type(ptype);
- if (is_type_proc(bptype)) {
- TypeProc *pt = &bptype->Proc;
- TypeTuple *param_tuple = nullptr;
- if (pt->params != nullptr) {
- param_tuple = &pt->params->Tuple;
- }
- if (param_tuple != nullptr) {
- for (Entity *e : param_tuple->variables) {
- if (is_blank_ident(e->token)) {
- continue;
- }
- string_map_set(&type_hint_map, e->token.string, e->type);
- }
+ for_array(i, named_args) {
+ Ast *arg = named_args[i];
+ if (arg->kind != Ast_FieldValue) {
+ if (show_error) {
+ error(arg, "Expected a 'field = value'");
}
+ return false;
}
- } else {
- Array procs = proc_group_entities(c, *operand);
- for (Entity *proc : procs) {
- Type *proc_type = base_type(proc->type);
- if (is_type_proc(proc_type)) {
- TypeProc *pt = &proc_type->Proc;
- TypeTuple *param_tuple = nullptr;
- if (pt->params != nullptr) {
- param_tuple = &pt->params->Tuple;
- }
- if (param_tuple == nullptr) {
- continue;
- }
- for (Entity *e : param_tuple->variables) {
- if (is_blank_ident(e->token)) {
- continue;
- }
- StringHashKey key = string_hash_string(e->token.string);
- Type **found = string_map_get(&type_hint_map, key);
- if (found) {
- Type *t = *found;
- if (t == nullptr) {
- // NOTE(bill): Ambiguous named parameter across all types
- continue;
- }
- if (are_types_identical(t, e->type)) {
- // NOTE(bill): No need to set again
- } else {
- // NOTE(bill): Ambiguous named parameter across all types so set it to a nullptr
- string_map_set(&type_hint_map, key, cast(Type *)nullptr);
- }
- } else {
- string_map_set(&type_hint_map, key, e->type);
- }
- }
- }
- }
-
- }
-
-
- for_array(i, args) {
- Ast *arg = args[i];
ast_node(fv, FieldValue, arg);
- Ast *field = fv->field;
+ if (fv->field->kind != Ast_Ident) {
+ if (show_error) {
+ gbString expr_str = expr_to_string(fv->field);
+ error(arg, "Invalid parameter name '%s' in procedure call", expr_str);
+ gb_string_free(expr_str);
+ }
+ success = false;
+ continue;
+ }
+ String key = fv->field->Ident.token.string;
+ Ast *value = fv->value;
Type *type_hint = nullptr;
-
- if (field != nullptr && field->kind == Ast_Ident) {
- String key = field->Ident.token.string;
- Type **found = string_map_get(&type_hint_map, key);
- if (found) {
- type_hint = *found;
+ if (pt) {
+ isize param_index = lookup_procedure_parameter(pt, key);
+ if (param_index < 0) {
+ if (show_error) {
+ error(value, "No parameter named '%.*s' for this procedure type", LIT(key));
+ }
+ success = false;
+ continue;
}
+
+ Entity *e = pt->params->Tuple.variables[param_index];
+ if (!is_type_polymorphic(e->type)) {
+ type_hint = e->type;
+ }
+
}
- check_expr_or_type(c, &operands[i], fv->value, type_hint);
+ Operand o = {};
+ check_expr_with_type_hint(c, &o, value, type_hint);
+ if (o.mode == Addressing_Invalid) {
+ success = false;
+ }
+ array_add(named_operands, o);
}
- } else {
- operands = array_make(heap_allocator(), 0, 2*args.count);
- Entity **lhs = nullptr;
- isize lhs_count = -1;
- bool is_variadic = false;
- if (proc_type != nullptr && is_type_proc(proc_type)) {
- lhs = populate_proc_parameter_list(c, proc_type, &lhs_count, &is_variadic);
- }
- if (operand->mode != Addressing_ProcGroup) {
- check_unpack_arguments(c, lhs, lhs_count, &operands, args, is_variadic ? UnpackFlag_IsVariadic : UnpackFlag_None);
+
+ }
+ return success;
+}
+
+gb_internal bool check_call_arguments_single(CheckerContext *c, Ast *call, Operand *operand,
+ Entity *e, Type *proc_type,
+ Array const &positional_operands, Array const &named_operands,
+ CallArgumentErrorMode show_error_mode,
+ CallArgumentData *data) {
+
+ bool return_on_failure = show_error_mode == CallArgumentErrorMode::NoErrors;
+
+ Ast *ident = operand->expr;
+ while (ident->kind == Ast_SelectorExpr) {
+ Ast *s = ident->SelectorExpr.selector;
+ ident = s;
+ }
+
+ if (e == nullptr) {
+ e = entity_of_node(ident);
+ if (e != nullptr) {
+ proc_type = e->type;
}
}
- for (Operand const &o : operands) {
- check_no_copy_assignment(o, str_lit("call expression"));
+ GB_ASSERT(proc_type != nullptr);
+ proc_type = base_type(proc_type);
+ GB_ASSERT(proc_type->kind == Type_Proc);
+
+ CallArgumentError err = check_call_arguments_internal(c, call, e, proc_type, positional_operands, named_operands, show_error_mode, data);
+ if (return_on_failure && err != CallArgumentError_None) {
+ return false;
}
- if (operand->mode == Addressing_ProcGroup) {
- check_entity_decl(c, operand->proc_group, nullptr, nullptr);
+ Entity *entity_to_use = data->gen_entity != nullptr ? data->gen_entity : e;
+ if (!return_on_failure && entity_to_use != nullptr) {
+ add_entity_use(c, ident, entity_to_use);
+ update_untyped_expr_type(c, operand->expr, entity_to_use->type, true);
+ add_type_and_value(c, operand->expr, operand->mode, entity_to_use->type, operand->value);
+ }
- auto procs = proc_group_entities_cloned(c, *operand);
+ if (data->gen_entity != nullptr) {
+ Entity *e = data->gen_entity;
+ DeclInfo *decl = data->gen_entity->decl_info;
+ CheckerContext ctx = *c;
+ ctx.scope = decl->scope;
+ ctx.decl = decl;
+ ctx.proc_name = e->token.string;
+ ctx.curr_proc_decl = decl;
+ ctx.curr_proc_sig = e->type;
- if (procs.count > 1) {
- isize max_arg_count = args.count;
- for (Ast *arg : args) {
- // NOTE(bill): The only thing that may have multiple values
- // will be a call expression (assuming `or_return` and `()` will be stripped)
- arg = strip_or_return_expr(arg);
+ GB_ASSERT(decl->proc_lit->kind == Ast_ProcLit);
+ bool ok = evaluate_where_clauses(&ctx, call, decl->scope, &decl->proc_lit->ProcLit.where_clauses, !return_on_failure);
+ if (return_on_failure) {
+ if (!ok) {
+ return false;
+ }
+
+ } else {
+ decl->where_clauses_evaluated = true;
+ if (ok && (data->gen_entity->flags & EntityFlag_ProcBodyChecked) == 0) {
+ check_procedure_later(c->checker, e->file, e->token, decl, e->type, decl->proc_lit->ProcLit.body, decl->proc_lit->ProcLit.tags);
+ }
+ if (is_type_proc(data->gen_entity->type)) {
+ Type *t = base_type(entity_to_use->type);
+ data->result_type = t->Proc.results;
+ }
+ }
+ }
+
+ return true;
+}
+
+
+gb_internal CallArgumentData check_call_arguments_proc_group(CheckerContext *c, Operand *operand, Ast *call) {
+ ast_node(ce, CallExpr, call);
+ GB_ASSERT(ce->split_args != nullptr);
+
+ Slice const &positional_args = ce->split_args->positional;
+ Slice const &named_args = ce->split_args->named;
+
+ CallArgumentData data = {};
+ data.result_type = t_invalid;
+
+ GB_ASSERT(operand->mode == Addressing_ProcGroup);
+ auto procs = proc_group_entities_cloned(c, *operand);
+
+ if (procs.count > 1) {
+ isize max_arg_count = positional_args.count + named_args.count;
+ for (Ast *arg : positional_args) {
+ // NOTE(bill): The only thing that may have multiple values
+ // will be a call expression (assuming `or_return` and `()` will be stripped)
+ arg = strip_or_return_expr(arg);
+ if (arg && arg->kind == Ast_CallExpr) {
+ max_arg_count = ISIZE_MAX;
+ break;
+ }
+ }
+ if (max_arg_count != ISIZE_MAX) for (Ast *arg : named_args) {
+ // NOTE(bill): The only thing that may have multiple values
+ // will be a call expression (assuming `or_return` and `()` will be stripped)
+ if (arg->kind == Ast_FieldValue) {
+ arg = strip_or_return_expr(arg->FieldValue.value);
if (arg && arg->kind == Ast_CallExpr) {
max_arg_count = ISIZE_MAX;
break;
}
}
+ }
- for (isize proc_index = 0; proc_index < procs.count; /**/) {
- Entity *proc = procs[proc_index];
- Type *pt = base_type(proc->type);
- if (!(pt != nullptr && is_type_proc(pt))) {
- proc_index++;
- continue;
+ // ignore named arguments first
+ for (Ast *arg : named_args) {
+ if (arg->kind != Ast_FieldValue) {
+ continue;
+ }
+ ast_node(fv, FieldValue, arg);
+ if (fv->field->kind != Ast_Ident) {
+ continue;
+ }
+ String key = fv->field->Ident.token.string;
+ for (isize proc_index = procs.count-1; proc_index >= 0; proc_index--) {
+ Type *t = procs[proc_index]->type;
+ if (is_type_proc(t)) {
+ isize param_index = lookup_procedure_parameter(t, key);
+ if (param_index < 0) {
+ array_unordered_remove(&procs, proc_index);
+ }
}
+ }
+ }
- isize param_count = 0;
- isize param_count_excluding_defaults = get_procedure_param_count_excluding_defaults(pt, ¶m_count);
+ if (procs.count == 0) {
+ // if any of the named arguments are wrong, the `procs` will be empty
+ // just start from scratch
+ array_free(&procs);
+ procs = proc_group_entities_cloned(c, *operand);
+ }
- if (param_count_excluding_defaults > max_arg_count) {
- array_unordered_remove(&procs, proc_index);
+ // filter by positional argument length
+ for (isize proc_index = 0; proc_index < procs.count; /**/) {
+ Entity *proc = procs[proc_index];
+ Type *pt = base_type(proc->type);
+ if (!(pt != nullptr && is_type_proc(pt))) {
+ proc_index++;
+ continue;
+ }
+
+ isize param_count = 0;
+ isize param_count_excluding_defaults = get_procedure_param_count_excluding_defaults(pt, ¶m_count);
+
+ if (param_count_excluding_defaults > max_arg_count) {
+ array_unordered_remove(&procs, proc_index);
+ continue;
+ }
+ proc_index++;
+ }
+ }
+
+ Entity **lhs = nullptr;
+ isize lhs_count = -1;
+ bool is_variadic = false;
+
+ auto positional_operands = array_make(heap_allocator(), 0, 0);
+ auto named_operands = array_make(heap_allocator(), 0, 0);
+ defer (array_free(&positional_operands));
+ defer (array_free(&named_operands));
+
+ if (procs.count == 1) {
+ Entity *e = procs[0];
+
+ lhs = populate_proc_parameter_list(c, e->type, &lhs_count, &is_variadic);
+ check_unpack_arguments(c, lhs, lhs_count, &positional_operands, positional_args, is_variadic ? UnpackFlag_IsVariadic : UnpackFlag_None);
+
+ if (check_named_arguments(c, e->type, named_args, &named_operands, true)) {
+ check_call_arguments_single(c, call, operand,
+ e, e->type,
+ positional_operands, named_operands,
+ CallArgumentErrorMode::ShowErrors,
+ &data);
+ }
+ return data;
+ }
+
+ {
+ // NOTE(bill, 2019-07-13): This code is used to improve the type inference for procedure groups
+ // where the same positional parameter has the same type value (and ellipsis)
+ isize proc_arg_count = -1;
+ for (Entity *p : procs) {
+ Type *pt = base_type(p->type);
+ if (pt != nullptr && is_type_proc(pt)) {
+ if (proc_arg_count < 0) {
+ proc_arg_count = pt->Proc.param_count;
} else {
- proc_index++;
+ proc_arg_count = gb_min(proc_arg_count, pt->Proc.param_count);
}
}
}
- if (procs.count == 1) {
- Ast *ident = operand->expr;
- while (ident->kind == Ast_SelectorExpr) {
- Ast *s = ident->SelectorExpr.selector;
- ident = s;
- }
+ if (proc_arg_count >= 0) {
+ lhs_count = proc_arg_count;
+ if (lhs_count > 0) {
+ lhs = gb_alloc_array(heap_allocator(), Entity *, lhs_count);
+ for (isize param_index = 0; param_index < lhs_count; param_index++) {
+ Entity *e = nullptr;
+ for (Entity *p : procs) {
+ Type *pt = base_type(p->type);
+ if (!(pt != nullptr && is_type_proc(pt))) {
+ continue;
+ }
- Entity *e = procs[0];
-
- Entity **lhs = nullptr;
- isize lhs_count = -1;
- bool is_variadic = false;
- lhs = populate_proc_parameter_list(c, e->type, &lhs_count, &is_variadic);
- check_unpack_arguments(c, lhs, lhs_count, &operands, args, is_variadic ? UnpackFlag_IsVariadic : UnpackFlag_None);
-
- CallArgumentData data = {};
- CallArgumentError err = call_checker(c, call, e->type, e, operands, CallArgumentMode_ShowErrors, &data);
- if (err != CallArgumentError_None) {
- // handle error
- }
- Entity *entity_to_use = data.gen_entity != nullptr ? data.gen_entity : e;
- add_entity_use(c, ident, entity_to_use);
- if (entity_to_use != nullptr) {
- update_untyped_expr_type(c, operand->expr, entity_to_use->type, true);
- }
- return data;
- }
-
-
- Entity **lhs = nullptr;
- isize lhs_count = -1;
-
- {
- // NOTE(bill, 2019-07-13): This code is used to improve the type inference for procedure groups
- // where the same positional parameter has the same type value (and ellipsis)
- bool proc_arg_count_all_equal = true;
- isize proc_arg_count = -1;
- for (Entity *p : procs) {
- Type *pt = base_type(p->type);
- if (pt != nullptr && is_type_proc(pt)) {
- if (proc_arg_count < 0) {
- proc_arg_count = pt->Proc.param_count;
- } else {
- if (proc_arg_count != pt->Proc.param_count) {
- proc_arg_count_all_equal = false;
+ if (e == nullptr) {
+ e = pt->Proc.params->Tuple.variables[param_index];
+ } else {
+ Entity *f = pt->Proc.params->Tuple.variables[param_index];
+ if (e == f) {
+ continue;
+ }
+ if (are_types_identical(e->type, f->type)) {
+ bool ee = (e->flags & EntityFlag_Ellipsis) != 0;
+ bool fe = (f->flags & EntityFlag_Ellipsis) != 0;
+ if (ee == fe) {
+ continue;
+ }
+ }
+ // NOTE(bill): Entities are not close enough to be used
+ e = nullptr;
break;
}
}
- }
- }
-
-
-
- if (proc_arg_count >= 0 && proc_arg_count_all_equal) {
- lhs_count = proc_arg_count;
- if (lhs_count > 0) {
- lhs = gb_alloc_array(heap_allocator(), Entity *, lhs_count);
- for (isize param_index = 0; param_index < lhs_count; param_index++) {
- Entity *e = nullptr;
- for (Entity *p : procs) {
- Type *pt = base_type(p->type);
- if (pt != nullptr && is_type_proc(pt)) {
- if (e == nullptr) {
- e = pt->Proc.params->Tuple.variables[param_index];
- } else {
- Entity *f = pt->Proc.params->Tuple.variables[param_index];
- if (e == f) {
- continue;
- }
- if (are_types_identical(e->type, f->type)) {
- bool ee = (e->flags & EntityFlag_Ellipsis) != 0;
- bool fe = (f->flags & EntityFlag_Ellipsis) != 0;
- if (ee == fe) {
- continue;
- }
- }
- // NOTE(bill): Entities are not close enough to be used
- e = nullptr;
- break;
- }
- }
- }
- lhs[param_index] = e;
- }
+ lhs[param_index] = e;
}
}
}
+ }
+ check_unpack_arguments(c, lhs, lhs_count, &positional_operands, positional_args, is_variadic ? UnpackFlag_IsVariadic : UnpackFlag_None);
- check_unpack_arguments(c, lhs, lhs_count, &operands, args, UnpackFlag_None);
-
- if (lhs != nullptr) {
- gb_free(heap_allocator(), lhs);
- }
-
- auto valids = array_make(heap_allocator(), 0, procs.count);
- defer (array_free(&valids));
-
- auto proc_entities = array_make(heap_allocator(), 0, procs.count*2 + 1);
- defer (array_free(&proc_entities));
- for (Entity *proc : procs) {
- array_add(&proc_entities, proc);
- }
-
-
- gbString expr_name = expr_to_string(operand->expr);
- defer (gb_string_free(expr_name));
-
- for_array(i, procs) {
- Entity *p = procs[i];
- Type *pt = base_type(p->type);
- if (pt != nullptr && is_type_proc(pt)) {
- CallArgumentError err = CallArgumentError_None;
- CallArgumentData data = {};
- CheckerContext ctx = *c;
-
- ctx.no_polymorphic_errors = true;
- ctx.allow_polymorphic_types = is_type_polymorphic(pt);
- ctx.hide_polymorphic_errors = true;
-
- err = call_checker(&ctx, call, pt, p, operands, CallArgumentMode_NoErrors, &data);
- if (err != CallArgumentError_None) {
- continue;
- }
- isize index = i;
-
- if (data.gen_entity != nullptr) {
- Entity *e = data.gen_entity;
- DeclInfo *decl = data.gen_entity->decl_info;
- ctx.scope = decl->scope;
- ctx.decl = decl;
- ctx.proc_name = e->token.string;
- ctx.curr_proc_decl = decl;
- ctx.curr_proc_sig = e->type;
-
- GB_ASSERT(decl->proc_lit->kind == Ast_ProcLit);
- if (!evaluate_where_clauses(&ctx, call, decl->scope, &decl->proc_lit->ProcLit.where_clauses, false)) {
- continue;
- }
-
- array_add(&proc_entities, data.gen_entity);
- index = proc_entities.count-1;
- }
-
- ValidIndexAndScore item = {};
- item.index = index;
- item.score = data.score;
- array_add(&valids, item);
- }
- }
-
- if (valids.count > 1) {
- gb_sort_array(valids.data, valids.count, valid_index_and_score_cmp);
- i64 best_score = valids[0].score;
- Entity *best_entity = proc_entities[valids[0].index];
- GB_ASSERT(best_entity != nullptr);
- for (isize i = 1; i < valids.count; i++) {
- if (best_score > valids[i].score) {
- valids.count = i;
- break;
- }
- if (best_entity == proc_entities[valids[i].index]) {
- valids.count = i;
- break;
- }
- }
- }
-
-
- if (valids.count == 0) {
- begin_error_block();
- defer (end_error_block());
-
- error(operand->expr, "No procedures or ambiguous call for procedure group '%s' that match with the given arguments", expr_name);
- if (operands.count == 0) {
- error_line("\tNo given arguments\n");
- } else {
- error_line("\tGiven argument types: (");
- for_array(i, operands) {
- Operand o = operands[i];
- if (i > 0) error_line(", ");
- gbString type = type_to_string(o.type);
- defer (gb_string_free(type));
- error_line("%s", type);
- }
- error_line(")\n");
- }
-
- if (procs.count > 0) {
- error_line("Did you mean to use one of the following:\n");
- }
- for (Entity *proc : procs) {
- TokenPos pos = proc->token.pos;
- Type *t = base_type(proc->type);
- if (t == t_invalid) continue;
- GB_ASSERT(t->kind == Type_Proc);
- gbString pt;
- defer (gb_string_free(pt));
- if (t->Proc.node != nullptr) {
- pt = expr_to_string(t->Proc.node);
- } else {
- pt = type_to_string(t);
- }
- String prefix = {};
- String prefix_sep = {};
- if (proc->pkg) {
- prefix = proc->pkg->name;
- prefix_sep = str_lit(".");
- }
- String name = proc->token.string;
-
- char const *sep = "::";
- if (proc->kind == Entity_Variable) {
- sep = ":=";
- }
- error_line("\t%.*s%.*s%.*s %s %s at %s\n", LIT(prefix), LIT(prefix_sep), LIT(name), sep, pt, token_pos_to_string(pos));
- }
- if (procs.count > 0) {
- error_line("\n");
- }
-
- result_type = t_invalid;
- } else if (valids.count > 1) {
- begin_error_block();
- defer (end_error_block());
-
- error(operand->expr, "Ambiguous procedure group call '%s' that match with the given arguments", expr_name);
- error_line("\tGiven argument types: (");
- for_array(i, operands) {
- Operand o = operands[i];
- if (i > 0) error_line(", ");
- gbString type = type_to_string(o.type);
- defer (gb_string_free(type));
- error_line("%s", type);
- }
- error_line(")\n");
-
- for (isize i = 0; i < valids.count; i++) {
- Entity *proc = proc_entities[valids[i].index];
- GB_ASSERT(proc != nullptr);
- TokenPos pos = proc->token.pos;
- Type *t = base_type(proc->type); GB_ASSERT(t->kind == Type_Proc);
- gbString pt = nullptr;
- defer (gb_string_free(pt));
- if (t->Proc.node != nullptr) {
- pt = expr_to_string(t->Proc.node);
- } else {
- pt = type_to_string(t);
- }
- String name = proc->token.string;
- char const *sep = "::";
- if (proc->kind == Entity_Variable) {
- sep = ":=";
- }
- error_line("\t%.*s %s %s ", LIT(name), sep, pt);
- if (proc->decl_info->proc_lit != nullptr) {
- GB_ASSERT(proc->decl_info->proc_lit->kind == Ast_ProcLit);
- auto *pl = &proc->decl_info->proc_lit->ProcLit;
- if (pl->where_token.kind != Token_Invalid) {
- error_line("\n\t\twhere ");
- for_array(j, pl->where_clauses) {
- Ast *clause = pl->where_clauses[j];
- if (j != 0) {
- error_line("\t\t ");
- }
- gbString str = expr_to_string(clause);
- error_line("%s", str);
- gb_string_free(str);
-
- if (j != pl->where_clauses.count-1) {
- error_line(",");
- }
- }
- error_line("\n\t");
- }
- }
- error_line("at %s\n", token_pos_to_string(pos));
- }
- result_type = t_invalid;
- } else {
- GB_ASSERT(valids.count == 1);
- Ast *ident = operand->expr;
- while (ident->kind == Ast_SelectorExpr) {
- Ast *s = ident->SelectorExpr.selector;
- ident = s;
- }
-
- Entity *e = proc_entities[valids[0].index];
- GB_ASSERT(e != nullptr);
-
- proc_type = e->type;
- CallArgumentData data = {};
- CallArgumentError err = call_checker(c, call, proc_type, e, operands, CallArgumentMode_ShowErrors, &data);
- gb_unused(err);
- Entity *entity_to_use = data.gen_entity != nullptr ? data.gen_entity : e;
- add_entity_use(c, ident, entity_to_use);
- if (entity_to_use != nullptr) {
- update_untyped_expr_type(c, operand->expr, entity_to_use->type, true);
- }
-
- if (data.gen_entity != nullptr) {
- Entity *e = data.gen_entity;
- DeclInfo *decl = data.gen_entity->decl_info;
- CheckerContext ctx = *c;
- ctx.scope = decl->scope;
- ctx.decl = decl;
- ctx.proc_name = e->token.string;
- ctx.curr_proc_decl = decl;
- ctx.curr_proc_sig = e->type;
-
- GB_ASSERT(decl->proc_lit->kind == Ast_ProcLit);
- bool ok = evaluate_where_clauses(&ctx, call, decl->scope, &decl->proc_lit->ProcLit.where_clauses, true);
- decl->where_clauses_evaluated = true;
-
- if (ok && (data.gen_entity->flags & EntityFlag_ProcBodyChecked) == 0) {
- check_procedure_later(c->checker, e->file, e->token, decl, e->type, decl->proc_lit->ProcLit.body, decl->proc_lit->ProcLit.tags);
- }
- }
+ for_array(i, named_args) {
+ Ast *arg = named_args[i];
+ if (arg->kind != Ast_FieldValue) {
+ error(arg, "Expected a 'field = value'");
return data;
}
+ ast_node(fv, FieldValue, arg);
+ if (fv->field->kind != Ast_Ident) {
+ gbString expr_str = expr_to_string(fv->field);
+ error(arg, "Invalid parameter name '%s' in procedure call", expr_str);
+ gb_string_free(expr_str);
+ return data;
+ }
+ String key = fv->field->Ident.token.string;
+ Ast *value = fv->value;
+
+ Type *type_hint = nullptr;
+
+ for (isize lhs_idx = 0; lhs_idx < lhs_count; lhs_idx++) {
+ Entity *e = lhs[lhs_idx];
+ if (e != nullptr && e->token.string == key &&
+ !is_type_polymorphic(e->type)) {
+ type_hint = e->type;
+ break;
+ }
+ }
+ Operand o = {};
+ check_expr_with_type_hint(c, &o, value, type_hint);
+ array_add(&named_operands, o);
+ }
+
+ gb_free(heap_allocator(), lhs);
+
+ auto valids = array_make(heap_allocator(), 0, procs.count);
+ defer (array_free(&valids));
+
+ auto proc_entities = array_make(heap_allocator(), 0, procs.count*2 + 1);
+ defer (array_free(&proc_entities));
+ for (Entity *proc : procs) {
+ array_add(&proc_entities, proc);
+ }
+
+
+ gbString expr_name = expr_to_string(operand->expr);
+ defer (gb_string_free(expr_name));
+
+ for_array(i, procs) {
+ Entity *p = procs[i];
+ Type *pt = base_type(p->type);
+ if (pt != nullptr && is_type_proc(pt)) {
+ CallArgumentData data = {};
+ CheckerContext ctx = *c;
+
+ ctx.no_polymorphic_errors = true;
+ ctx.allow_polymorphic_types = is_type_polymorphic(pt);
+ ctx.hide_polymorphic_errors = true;
+
+ bool is_a_candidate = check_call_arguments_single(&ctx, call, operand,
+ p, pt,
+ positional_operands, named_operands,
+ CallArgumentErrorMode::NoErrors,
+ &data);
+ if (!is_a_candidate) {
+ continue;
+ }
+ isize index = i;
+
+ ValidIndexAndScore item = {};
+ item.score = data.score;
+
+ if (data.gen_entity != nullptr) {
+ array_add(&proc_entities, data.gen_entity);
+ index = proc_entities.count-1;
+
+ // prefer non-polymorphic procedures over polymorphic
+ item.score += assign_score_function(1);
+ }
+
+ item.index = index;
+ array_add(&valids, item);
+ }
+ }
+
+ if (valids.count > 1) {
+ gb_sort_array(valids.data, valids.count, valid_index_and_score_cmp);
+ i64 best_score = valids[0].score;
+ Entity *best_entity = proc_entities[valids[0].index];
+ GB_ASSERT(best_entity != nullptr);
+ for (isize i = 1; i < valids.count; i++) {
+ if (best_score > valids[i].score) {
+ valids.count = i;
+ break;
+ }
+ if (best_entity == proc_entities[valids[i].index]) {
+ valids.count = i;
+ break;
+ }
+ }
+ }
+
+ auto print_argument_types = [&]() {
+ error_line("\tGiven argument types: (");
+ isize i = 0;
+ for (Operand const &o : positional_operands) {
+ if (i++ > 0) error_line(", ");
+ gbString type = type_to_string(o.type);
+ defer (gb_string_free(type));
+ error_line("%s", type);
+ }
+ for (Operand const &o : named_operands) {
+ if (i++ > 0) error_line(", ");
+
+ gbString type = type_to_string(o.type);
+ defer (gb_string_free(type));
+
+ if (i < ce->split_args->named.count) {
+ Ast *named_field = ce->split_args->named[i];
+ ast_node(fv, FieldValue, named_field);
+
+ gbString field = expr_to_string(fv->field);
+ defer (gb_string_free(field));
+
+ error_line("%s = %s", field, type);
+ } else {
+ error_line("%s", type);
+ }
+ }
+ error_line(")\n");
+ };
+
+ if (valids.count == 0) {
+ begin_error_block();
+ defer (end_error_block());
+
+ error(operand->expr, "No procedures or ambiguous call for procedure group '%s' that match with the given arguments", expr_name);
+ if (positional_operands.count == 0 && named_operands.count == 0) {
+ error_line("\tNo given arguments\n");
+ } else {
+ print_argument_types();
+ }
+
+ if (procs.count == 0) {
+ procs = proc_group_entities_cloned(c, *operand);
+ }
+ if (procs.count > 0) {
+ error_line("Did you mean to use one of the following:\n");
+ }
+ isize max_name_length = 0;
+ isize max_type_length = 0;
+ for (Entity *proc : procs) {
+ Type *t = base_type(proc->type);
+ if (t == t_invalid) continue;
+ String prefix = {};
+ String prefix_sep = {};
+ if (proc->pkg) {
+ prefix = proc->pkg->name;
+ prefix_sep = str_lit(".");
+ }
+ String name = proc->token.string;
+ max_name_length = gb_max(max_name_length, prefix.len + prefix_sep.len + name.len);
+
+ gbString pt;
+ if (t->Proc.node != nullptr) {
+ pt = expr_to_string(t->Proc.node);
+ } else {
+ pt = type_to_string(t);
+ }
+
+ max_type_length = gb_max(max_type_length, gb_string_length(pt));
+ gb_string_free(pt);
+ }
+
+ isize max_spaces = gb_max(max_name_length, max_type_length);
+ char *spaces = gb_alloc_array(temporary_allocator(), char, max_spaces+1);
+ for (isize i = 0; i < max_spaces; i++) {
+ spaces[i] = ' ';
+ }
+ spaces[max_spaces] = 0;
+
+ for (Entity *proc : procs) {
+ TokenPos pos = proc->token.pos;
+ Type *t = base_type(proc->type);
+ if (t == t_invalid) continue;
+ GB_ASSERT(t->kind == Type_Proc);
+ gbString pt;
+ defer (gb_string_free(pt));
+ if (t->Proc.node != nullptr) {
+ pt = expr_to_string(t->Proc.node);
+ } else {
+ pt = type_to_string(t);
+ }
+ String prefix = {};
+ String prefix_sep = {};
+ if (proc->pkg) {
+ prefix = proc->pkg->name;
+ prefix_sep = str_lit(".");
+ }
+ String name = proc->token.string;
+ isize len = prefix.len + prefix_sep.len + name.len;
+
+ int name_padding = cast(int)gb_max(max_name_length - len, 0);
+ int type_padding = cast(int)gb_max(max_type_length - gb_string_length(pt), 0);
+
+ char const *sep = "::";
+ if (proc->kind == Entity_Variable) {
+ sep = ":=";
+ }
+ error_line("\t%.*s%.*s%.*s %.*s%s %s %.*sat %s\n",
+ LIT(prefix), LIT(prefix_sep), LIT(name),
+ name_padding, spaces,
+ sep,
+ pt,
+ type_padding, spaces,
+ token_pos_to_string(pos)
+ );
+ }
+ if (procs.count > 0) {
+ error_line("\n");
+ }
+
+ data.result_type = t_invalid;
+ } else if (valids.count > 1) {
+ begin_error_block();
+ defer (end_error_block());
+
+ error(operand->expr, "Ambiguous procedure group call '%s' that match with the given arguments", expr_name);
+ print_argument_types();
+
+ for (auto const &valid : valids) {
+ Entity *proc = proc_entities[valid.index];
+ GB_ASSERT(proc != nullptr);
+ TokenPos pos = proc->token.pos;
+ Type *t = base_type(proc->type); GB_ASSERT(t->kind == Type_Proc);
+ gbString pt = nullptr;
+ defer (gb_string_free(pt));
+ if (t->Proc.node != nullptr) {
+ pt = expr_to_string(t->Proc.node);
+ } else {
+ pt = type_to_string(t);
+ }
+ String name = proc->token.string;
+ char const *sep = "::";
+ if (proc->kind == Entity_Variable) {
+ sep = ":=";
+ }
+ error_line("\t%.*s %s %s ", LIT(name), sep, pt);
+ if (proc->decl_info->proc_lit != nullptr) {
+ GB_ASSERT(proc->decl_info->proc_lit->kind == Ast_ProcLit);
+ auto *pl = &proc->decl_info->proc_lit->ProcLit;
+ if (pl->where_token.kind != Token_Invalid) {
+ error_line("\n\t\twhere ");
+ for_array(j, pl->where_clauses) {
+ Ast *clause = pl->where_clauses[j];
+ if (j != 0) {
+ error_line("\t\t ");
+ }
+ gbString str = expr_to_string(clause);
+ error_line("%s", str);
+ gb_string_free(str);
+
+ if (j != pl->where_clauses.count-1) {
+ error_line(",");
+ }
+ }
+ error_line("\n\t");
+ }
+ }
+ error_line("at %s\n", token_pos_to_string(pos));
+ }
+ data.result_type = t_invalid;
} else {
+ GB_ASSERT(valids.count == 1);
Ast *ident = operand->expr;
while (ident->kind == Ast_SelectorExpr) {
Ast *s = ident->SelectorExpr.selector;
ident = s;
}
- Entity *e = entity_of_node(ident);
+ Entity *e = proc_entities[valids[0].index];
+ GB_ASSERT(e != nullptr);
+ Array named_operands = {};
- CallArgumentData data = {};
- CallArgumentError err = call_checker(c, call, proc_type, e, operands, CallArgumentMode_ShowErrors, &data);
- gb_unused(err);
- Entity *entity_to_use = data.gen_entity != nullptr ? data.gen_entity : e;
- add_entity_use(c, ident, entity_to_use);
- if (entity_to_use != nullptr) {
- update_untyped_expr_type(c, operand->expr, entity_to_use->type, true);
- }
- if (data.gen_entity != nullptr) {
- Entity *e = data.gen_entity;
- DeclInfo *decl = data.gen_entity->decl_info;
- CheckerContext ctx = *c;
- ctx.scope = decl->scope;
- ctx.decl = decl;
- ctx.proc_name = e->token.string;
- ctx.curr_proc_decl = decl;
- ctx.curr_proc_sig = e->type;
-
- GB_ASSERT(decl->proc_lit->kind == Ast_ProcLit);
- bool ok = evaluate_where_clauses(&ctx, call, decl->scope, &decl->proc_lit->ProcLit.where_clauses, true);
- decl->where_clauses_evaluated = true;
-
- if (ok && (data.gen_entity->flags & EntityFlag_ProcBodyChecked) == 0) {
- check_procedure_later(c->checker, e->file, e->token, decl, e->type, decl->proc_lit->ProcLit.body, decl->proc_lit->ProcLit.tags);
- }
- }
+ check_call_arguments_single(c, call, operand,
+ e, e->type,
+ positional_operands, named_operands,
+ CallArgumentErrorMode::ShowErrors,
+ &data);
return data;
}
-
- CallArgumentData data = {};
- data.result_type = t_invalid;
return data;
}
+gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *operand, Ast *call) {
+ Type *proc_type = nullptr;
+
+ CallArgumentData data = {};
+ data.result_type = t_invalid;
+
+ proc_type = base_type(operand->type);
+
+ TypeProc *pt = nullptr;
+ if (proc_type) {
+ pt = &proc_type->Proc;
+ }
+
+ TEMPORARY_ALLOCATOR_GUARD();
+ ast_node(ce, CallExpr, call);
+
+ bool any_failure = false;
+
+ // Split positional and named args into separate arrays/slices
+ Slice positional_args = {};
+ Slice named_args = {};
+
+ if (ce->split_args == nullptr) {
+ positional_args = ce->args;
+ for (isize i = 0; i < ce->args.count; i++) {
+ Ast *arg = ce->args.data[i];
+ if (arg->kind == Ast_FieldValue) {
+ positional_args.count = i;
+ break;
+ }
+ }
+ named_args = slice(ce->args, positional_args.count, ce->args.count);
+
+ auto split_args = gb_alloc_item(permanent_allocator(), AstSplitArgs);
+ split_args->positional = positional_args;
+ split_args->named = named_args;
+ ce->split_args = split_args;
+ } else {
+ positional_args = ce->split_args->positional;
+ named_args = ce->split_args->named;
+ }
+
+ if (operand->mode == Addressing_ProcGroup) {
+ return check_call_arguments_proc_group(c, operand, call);
+ }
+
+ auto positional_operands = array_make(heap_allocator(), 0, positional_args.count);
+ auto named_operands = array_make(heap_allocator(), 0, 0);
+
+ defer (array_free(&positional_operands));
+ defer (array_free(&named_operands));
+
+ if (positional_args.count > 0) {
+ isize lhs_count = -1;
+ bool is_variadic = false;
+ Entity **lhs = nullptr;
+ if (pt != nullptr) {
+ lhs = populate_proc_parameter_list(c, proc_type, &lhs_count, &is_variadic);
+ }
+ check_unpack_arguments(c, lhs, lhs_count, &positional_operands, positional_args, is_variadic ? UnpackFlag_IsVariadic : UnpackFlag_None);
+ }
+
+ if (named_args.count > 0) {
+ for_array(i, named_args) {
+ Ast *arg = named_args[i];
+ if (arg->kind != Ast_FieldValue) {
+ error(arg, "Expected a 'field = value'");
+ return data;
+ }
+ ast_node(fv, FieldValue, arg);
+ if (fv->field->kind != Ast_Ident) {
+ gbString expr_str = expr_to_string(fv->field);
+ error(arg, "Invalid parameter name '%s' in procedure call", expr_str);
+ any_failure = true;
+ gb_string_free(expr_str);
+ continue;
+ }
+ String key = fv->field->Ident.token.string;
+ Ast *value = fv->value;
+
+ isize param_index = lookup_procedure_parameter(pt, key);
+ Type *type_hint = nullptr;
+ if (param_index >= 0) {
+ Entity *e = pt->params->Tuple.variables[param_index];
+ type_hint = e->type;
+ }
+
+ Operand o = {};
+ check_expr_with_type_hint(c, &o, value, type_hint);
+ if (o.mode == Addressing_Invalid) {
+ any_failure = true;
+ }
+ array_add(&named_operands, o);
+ }
+
+ }
+
+ if (!any_failure) {
+ check_call_arguments_single(c, call, operand,
+ nullptr, proc_type,
+ positional_operands, named_operands,
+ CallArgumentErrorMode::ShowErrors,
+ &data);
+ } else if (pt) {
+ data.result_type = pt->results;
+ }
+
+ return data;
+}
+
gb_internal isize lookup_polymorphic_record_parameter(Type *t, String parameter_name) {
if (!is_type_polymorphic_record(t)) {
return -1;
@@ -6699,15 +6895,15 @@ gb_internal CallArgumentError check_polymorphic_record_type(CheckerContext *c, O
return err;
}
- String generated_name = make_string_c(expr_to_string(call));
-
CheckerContext ctx = *c;
// NOTE(bill): We need to make sure the lookup scope for the record is the same as where it was created
ctx.scope = polymorphic_record_parent_scope(original_type);
GB_ASSERT(ctx.scope != nullptr);
- Type *named_type = alloc_type_named(generated_name, nullptr, nullptr);
Type *bt = base_type(original_type);
+ String generated_name = make_string_c(expr_to_string(call));
+
+ Type *named_type = alloc_type_named(generated_name, nullptr, nullptr);
if (bt->kind == Type_Struct) {
Ast *node = clone_ast(bt->Struct.node);
Type *struct_type = alloc_type_struct();
@@ -6732,6 +6928,49 @@ gb_internal CallArgumentError check_polymorphic_record_type(CheckerContext *c, O
GB_PANIC("Unsupported parametric polymorphic record type");
}
+
+ bt = base_type(named_type);
+ if (bt->kind == Type_Struct || bt->kind == Type_Union) {
+ GB_ASSERT(original_type->kind == Type_Named);
+ Entity *e = original_type->Named.type_name;
+ GB_ASSERT(e->kind == Entity_TypeName);
+
+ gbString s = gb_string_make_reserve(heap_allocator(), e->token.string.len+3);
+ s = gb_string_append_fmt(s, "%.*s(", LIT(e->token.string));
+
+ Type *params = nullptr;
+ switch (bt->kind) {
+ case Type_Struct: params = bt->Struct.polymorphic_params; break;
+ case Type_Union: params = bt->Union.polymorphic_params; break;
+ }
+
+ if (params != nullptr) for_array(i, params->Tuple.variables) {
+ Entity *v = params->Tuple.variables[i];
+ String name = v->token.string;
+ if (i > 0) {
+ s = gb_string_append_fmt(s, ", ");
+ }
+ s = gb_string_append_fmt(s, "$%.*s", LIT(name));
+
+ if (v->kind == Entity_TypeName) {
+ if (v->type->kind != Type_Generic) {
+ s = gb_string_append_fmt(s, "=");
+ s = write_type_to_string(s, v->type, false);
+ }
+ } else if (v->kind == Entity_Constant) {
+ s = gb_string_append_fmt(s, "=");
+ s = write_exact_value_to_string(s, v->Constant.value);
+ }
+ }
+ s = gb_string_append_fmt(s, ")");
+
+ String new_name = make_string_c(s);
+ named_type->Named.name = new_name;
+ if (named_type->Named.type_name) {
+ named_type->Named.type_name->token.string = new_name;
+ }
+ }
+
operand->mode = Addressing_Type;
operand->type = named_type;
}
@@ -6740,6 +6979,46 @@ gb_internal CallArgumentError check_polymorphic_record_type(CheckerContext *c, O
+// returns true on success
+gb_internal bool check_call_parameter_mixture(Slice const &args, char const *context, bool allow_mixed=false) {
+ bool success = true;
+ if (args.count > 0) {
+ if (allow_mixed) {
+ bool was_named = false;
+ for (Ast *arg : args) {
+ if (was_named && arg->kind != Ast_FieldValue) {
+ error(arg, "Non-named parameter is not allowed to follow named parameter i.e. 'field = value' in a %s", context);
+ success = false;
+ break;
+ }
+ was_named = was_named || arg->kind == Ast_FieldValue;
+ }
+ } else {
+ bool first_is_field_value = (args[0]->kind == Ast_FieldValue);
+ for (Ast *arg : args) {
+ bool mix = false;
+ if (first_is_field_value) {
+ mix = arg->kind != Ast_FieldValue;
+ } else {
+ mix = arg->kind == Ast_FieldValue;
+ }
+ if (mix) {
+ error(arg, "Mixture of 'field = value' and value elements in a %s is not allowed", context);
+ success = false;
+ }
+ }
+ }
+
+ }
+ return success;
+}
+
+#define CHECK_CALL_PARAMETER_MIXTURE_OR_RETURN(context_, ...) if (!check_call_parameter_mixture(args, context_, ##__VA_ARGS__)) { \
+ operand->mode = Addressing_Invalid; \
+ operand->expr = call; \
+ return Expr_Stmt; \
+}
+
gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *call, Ast *proc, Slice const &args, ProcInlining inlining, Type *type_hint) {
if (proc != nullptr &&
@@ -6779,30 +7058,8 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
}
}
- if (args.count > 0) {
- bool fail = false;
- bool first_is_field_value = (args[0]->kind == Ast_FieldValue);
- for (Ast *arg : args) {
- bool mix = false;
- if (first_is_field_value) {
- mix = arg->kind != Ast_FieldValue;
- } else {
- mix = arg->kind == Ast_FieldValue;
- }
- if (mix) {
- error(arg, "Mixture of 'field = value' and value elements in a procedure call is not allowed");
- fail = true;
- }
- }
-
- if (fail) {
- operand->mode = Addressing_Invalid;
- operand->expr = call;
- return Expr_Stmt;
- }
- }
-
if (operand->mode == Addressing_Invalid) {
+ CHECK_CALL_PARAMETER_MIXTURE_OR_RETURN("procedure call");
for (Ast *arg : args) {
if (arg->kind == Ast_FieldValue) {
arg = arg->FieldValue.value;
@@ -6817,6 +7074,8 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
if (operand->mode == Addressing_Type) {
Type *t = operand->type;
if (is_type_polymorphic_record(t)) {
+ CHECK_CALL_PARAMETER_MIXTURE_OR_RETURN("polymorphic type construction");
+
if (!is_type_named(t)) {
gbString s = expr_to_string(operand->expr);
error(call, "Illegal use of an unnamed polymorphic record, %s", s);
@@ -6842,6 +7101,8 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
operand->type = t_invalid;
}
} else {
+ CHECK_CALL_PARAMETER_MIXTURE_OR_RETURN("type conversion");
+
operand->mode = Addressing_Invalid;
isize arg_count = args.count;
switch (arg_count) {
@@ -6887,6 +7148,8 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
}
if (operand->mode == Addressing_Builtin) {
+ CHECK_CALL_PARAMETER_MIXTURE_OR_RETURN("builtin call");
+
i32 id = operand->builtin_id;
Entity *e = entity_of_node(operand->expr);
if (e != nullptr && e->token.string == "expand_to_tuple") {
@@ -6900,6 +7163,8 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
return builtin_procs[id].kind;
}
+ CHECK_CALL_PARAMETER_MIXTURE_OR_RETURN(operand->mode == Addressing_ProcGroup ? "procedure group call": "procedure call", true);
+
Entity *initial_entity = entity_of_node(operand->expr);
if (initial_entity != nullptr && initial_entity->kind == Entity_Procedure) {
@@ -6911,8 +7176,8 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
}
}
- Type *proc_type = base_type(operand->type);
if (operand->mode != Addressing_ProcGroup) {
+ Type *proc_type = base_type(operand->type);
bool valid_type = (proc_type != nullptr) && is_type_proc(proc_type);
bool valid_mode = is_operand_value(*operand);
if (!valid_type || !valid_mode) {
@@ -6930,7 +7195,7 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
}
}
- CallArgumentData data = check_call_arguments(c, operand, proc_type, call, args);
+ CallArgumentData data = check_call_arguments(c, operand, call);
Type *result_type = data.result_type;
gb_zero_item(operand);
operand->expr = call;
@@ -6941,7 +7206,10 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
return Expr_Stmt;
}
- Type *pt = base_type(proc_type);
+ Type *pt = base_type(operand->type);
+ if (pt == nullptr) {
+ pt = t_invalid;
+ }
if (pt == t_invalid) {
if (operand->expr != nullptr && operand->expr->kind == Ast_CallExpr) {
pt = type_of_expr(operand->expr->CallExpr.proc);
@@ -6986,7 +7254,7 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
if (decl->proc_lit) {
ast_node(pl, ProcLit, decl->proc_lit);
if (pl->inlining == ProcInlining_no_inline) {
- error(call, "'inline' cannot be applied to a procedure that has be marked as 'no_inline'");
+ error(call, "'#force_inline' cannot be applied to a procedure that has be marked as '#force_no_inline'");
}
}
}
@@ -6999,9 +7267,6 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
operand->expr = call;
{
- if (proc_type == t_invalid) {
- // gb_printf_err("%s\n", expr_to_string(operand->expr));
- }
Type *type = nullptr;
if (operand->expr != nullptr && operand->expr->kind == Ast_CallExpr) {
type = type_of_expr(operand->expr->CallExpr.proc);
@@ -7019,8 +7284,6 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
}
}
- // add_type_and_value(c, operand->expr, operand->mode, operand->type, operand->value);
-
return Expr_Expr;
}
@@ -9098,13 +9361,13 @@ gb_internal ExprKind check_selector_call_expr(CheckerContext *c, Operand *o, Ast
ExprKind kind = check_expr_base(c, &x, se->expr, nullptr);
c->allow_arrow_right_selector_expr = allow_arrow_right_selector_expr;
- if (x.mode == Addressing_Invalid || x.type == t_invalid) {
+ if (x.mode == Addressing_Invalid || (x.type == t_invalid && x.mode != Addressing_ProcGroup)) {
o->mode = Addressing_Invalid;
o->type = t_invalid;
o->expr = node;
return kind;
}
- if (!is_type_proc(x.type)) {
+ if (!is_type_proc(x.type) && x.mode != Addressing_ProcGroup) {
gbString type_str = type_to_string(x.type);
error(se->call, "Selector call expressions expect a procedure type for the call, got '%s'", type_str);
gb_string_free(type_str);
@@ -9127,76 +9390,76 @@ gb_internal ExprKind check_selector_call_expr(CheckerContext *c, Operand *o, Ast
first_arg->state_flags |= StateFlag_SelectorCallExpr;
}
- Type *pt = base_type(x.type);
- GB_ASSERT(pt->kind == Type_Proc);
- Type *first_type = nullptr;
- String first_arg_name = {};
- if (pt->Proc.param_count > 0) {
- Entity *f = pt->Proc.params->Tuple.variables[0];
- first_type = f->type;
- first_arg_name = f->token.string;
- }
- if (first_arg_name.len == 0) {
- first_arg_name = str_lit("_");
- }
+ if (e->kind != Entity_ProcGroup) {
+ Type *pt = base_type(x.type);
+ GB_ASSERT_MSG(pt->kind == Type_Proc, "%.*s %.*s %s", LIT(e->token.string), LIT(entity_strings[e->kind]), type_to_string(x.type));
+ Type *first_type = nullptr;
+ String first_arg_name = {};
+ if (pt->Proc.param_count > 0) {
+ Entity *f = pt->Proc.params->Tuple.variables[0];
+ first_type = f->type;
+ first_arg_name = f->token.string;
+ }
+ if (first_arg_name.len == 0) {
+ first_arg_name = str_lit("_");
+ }
- if (first_type == nullptr) {
- error(se->call, "Selector call expressions expect a procedure type for the call with at least 1 parameter");
- o->mode = Addressing_Invalid;
- o->type = t_invalid;
- o->expr = node;
- return Expr_Stmt;
- }
+ if (first_type == nullptr) {
+ error(se->call, "Selector call expressions expect a procedure type for the call with at least 1 parameter");
+ o->mode = Addressing_Invalid;
+ o->type = t_invalid;
+ o->expr = node;
+ return Expr_Stmt;
+ }
- Operand y = {};
- y.mode = first_arg->tav.mode;
- y.type = first_arg->tav.type;
- y.value = first_arg->tav.value;
+ Operand y = {};
+ y.mode = first_arg->tav.mode;
+ y.type = first_arg->tav.type;
+ y.value = first_arg->tav.value;
- if (check_is_assignable_to(c, &y, first_type)) {
- // Do nothing, it's valid
- } else {
- Operand z = y;
- z.type = type_deref(y.type);
- if (check_is_assignable_to(c, &z, first_type)) {
- // NOTE(bill): AST GENERATION HACK!
- Token op = {Token_Pointer};
- first_arg = ast_deref_expr(first_arg->file(), first_arg, op);
- } else if (y.mode == Addressing_Variable) {
- Operand w = y;
- w.type = alloc_type_pointer(y.type);
- if (check_is_assignable_to(c, &w, first_type)) {
+ if (check_is_assignable_to(c, &y, first_type)) {
+ // Do nothing, it's valid
+ } else {
+ Operand z = y;
+ z.type = type_deref(y.type);
+ if (check_is_assignable_to(c, &z, first_type)) {
// NOTE(bill): AST GENERATION HACK!
- Token op = {Token_And};
- first_arg = ast_unary_expr(first_arg->file(), op, first_arg);
+ Token op = {Token_Pointer};
+ first_arg = ast_deref_expr(first_arg->file(), first_arg, op);
+ } else if (y.mode == Addressing_Variable) {
+ Operand w = y;
+ w.type = alloc_type_pointer(y.type);
+ if (check_is_assignable_to(c, &w, first_type)) {
+ // NOTE(bill): AST GENERATION HACK!
+ Token op = {Token_And};
+ first_arg = ast_unary_expr(first_arg->file(), op, first_arg);
+ }
+ }
+ }
+
+ if (ce->args.count > 0) {
+ bool fail = false;
+ bool first_is_field_value = (ce->args[0]->kind == Ast_FieldValue);
+ for (Ast *arg : ce->args) {
+ bool mix = false;
+ if (first_is_field_value) {
+ mix = arg->kind != Ast_FieldValue;
+ } else {
+ mix = arg->kind == Ast_FieldValue;
+ }
+ if (mix) {
+ fail = true;
+ break;
+ }
+ }
+ if (!fail && first_is_field_value) {
+ Token op = {Token_Eq};
+ AstFile *f = first_arg->file();
+ first_arg = ast_field_value(f, ast_ident(f, make_token_ident(first_arg_name)), first_arg, op);
}
}
}
- if (ce->args.count > 0) {
- bool fail = false;
- bool first_is_field_value = (ce->args[0]->kind == Ast_FieldValue);
- for (Ast *arg : ce->args) {
- bool mix = false;
- if (first_is_field_value) {
- mix = arg->kind != Ast_FieldValue;
- } else {
- mix = arg->kind == Ast_FieldValue;
- }
- if (mix) {
- fail = true;
- break;
- }
- }
- if (!fail && first_is_field_value) {
- Token op = {Token_Eq};
- AstFile *f = first_arg->file();
- first_arg = ast_field_value(f, ast_ident(f, make_token_ident(first_arg_name)), first_arg, op);
- }
- }
-
-
-
auto modified_args = slice_make(heap_allocator(), ce->args.count+1);
modified_args[0] = first_arg;
slice_copy(&modified_args, ce->args, 1);
diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp
index 09af496ab..a15977b7d 100644
--- a/src/check_stmt.cpp
+++ b/src/check_stmt.cpp
@@ -417,6 +417,7 @@ gb_internal Type *check_assignment_variable(CheckerContext *ctx, Operand *lhs, O
return nullptr;
case Addressing_Variable:
+ check_old_for_or_switch_value_usage(lhs->expr);
break;
case Addressing_MapIndex: {
@@ -1141,8 +1142,14 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
syntax_error(as_token, "Expected 1 expression after 'in'");
return;
}
+ bool is_addressed = false;
+
Ast *lhs = as->lhs[0];
Ast *rhs = as->rhs[0];
+ if (lhs->kind == Ast_UnaryExpr && lhs->UnaryExpr.op.kind == Token_And) {
+ is_addressed = true;
+ lhs = lhs->UnaryExpr.expr;
+ }
check_expr(ctx, &x, rhs);
check_assignment(ctx, &x, nullptr, str_lit("type switch expression"));
@@ -1281,12 +1288,15 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
}
}
- bool is_reference = false;
+ bool is_reference = is_addressed;
+ bool old_style = false;
- if (is_ptr &&
+ if (!is_reference &&
+ is_ptr &&
cc->list.count == 1 &&
case_type != nullptr) {
is_reference = true;
+ old_style = true;
}
if (cc->list.count > 1 || saw_nil) {
@@ -1305,9 +1315,12 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
{
Entity *tag_var = alloc_entity_variable(ctx->scope, lhs->Ident.token, case_type, EntityState_Resolved);
tag_var->flags |= EntityFlag_Used;
+ tag_var->flags |= EntityFlag_SwitchValue;
if (!is_reference) {
tag_var->flags |= EntityFlag_Value;
- tag_var->flags |= EntityFlag_SwitchValue;
+ }
+ if (old_style) {
+ tag_var->flags |= EntityFlag_OldForOrSwitchValue;
}
add_entity(ctx, ctx->scope, lhs, tag_var);
add_entity_use(ctx, lhs, tag_var);
@@ -1469,12 +1482,15 @@ gb_internal void check_range_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags)
Ast *expr = unparen_expr(rs->expr);
+ bool is_possibly_addressable = true;
isize max_val_count = 2;
if (is_ast_range(expr)) {
ast_node(ie, BinaryExpr, expr);
Operand x = {};
Operand y = {};
+ is_possibly_addressable = false;
+
bool ok = check_range(ctx, expr, true, &x, &y, nullptr);
if (!ok) {
goto skip_expr_range_stmt;
@@ -1497,6 +1513,8 @@ gb_internal void check_range_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags)
gb_string_free(t);
goto skip_expr_range_stmt;
} else {
+ is_possibly_addressable = false;
+
if (is_reverse) {
error(node, "#reverse for is not supported for enum types");
}
@@ -1510,7 +1528,8 @@ gb_internal void check_range_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags)
Type *t = base_type(type_deref(operand.type));
switch (t->kind) {
case Type_Basic:
- if (is_type_string(t) && t->Basic.kind != Basic_cstring) {
+ if (t->Basic.kind == Basic_string || t->Basic.kind == Basic_UntypedString) {
+ is_possibly_addressable = false;
array_add(&vals, t_rune);
array_add(&vals, t_int);
if (is_reverse) {
@@ -1529,6 +1548,7 @@ gb_internal void check_range_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags)
case Type_Array:
if (is_ptr) use_by_reference_for_value = true;
+ if (!is_ptr) is_possibly_addressable = operand.mode == Addressing_Variable;
array_add(&vals, t->Array.elem);
array_add(&vals, t_int);
break;
@@ -1575,6 +1595,8 @@ gb_internal void check_range_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags)
array_add(&vals, e->type);
}
+ is_possibly_addressable = false;
+
if (rs->vals.count > 1 && rs->vals[1] != nullptr && count < 3) {
gbString s = type_to_string(t);
error(operand.expr, "Expected a 3-valued expression on the rhs, got (%s)", s);
@@ -1644,8 +1666,13 @@ gb_internal void check_range_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags)
}
Ast * name = lhs[i];
Type *type = rhs[i];
-
Entity *entity = nullptr;
+
+ bool is_addressed = false;
+ if (name->kind == Ast_UnaryExpr && name->UnaryExpr.op.kind == Token_And) {
+ is_addressed = true;
+ name = name->UnaryExpr.expr;
+ }
if (name->kind == Ast_Ident) {
Token token = name->Ident.token;
String str = token.string;
@@ -1659,7 +1686,17 @@ gb_internal void check_range_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags)
entity->flags |= EntityFlag_ForValue;
entity->flags |= EntityFlag_Value;
entity->identifier = name;
- if (i == addressable_index && use_by_reference_for_value) {
+ entity->Variable.for_loop_parent_type = type_of_expr(expr);
+
+ if (is_addressed) {
+ if (is_possibly_addressable && i == addressable_index) {
+ entity->flags &= ~EntityFlag_Value;
+ } else {
+ char const *idx_name = is_map ? "key" : "index";
+ error(token, "The %s variable '%.*s' cannot be made addressable", idx_name, LIT(str));
+ }
+ } else if (i == addressable_index && use_by_reference_for_value) {
+ entity->flags |= EntityFlag_OldForOrSwitchValue;
entity->flags &= ~EntityFlag_Value;
}
if (is_soa) {
@@ -1678,7 +1715,9 @@ gb_internal void check_range_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags)
entity = found;
}
} else {
- error(name, "A variable declaration must be an identifier");
+ gbString s = expr_to_string(lhs[i]);
+ error(name, "A variable declaration must be an identifier, got %s", s);
+ gb_string_free(s);
}
if (entity == nullptr) {
@@ -2207,7 +2246,13 @@ gb_internal void check_return_stmt(CheckerContext *ctx, Ast *node) {
} else if (operands.count != result_count) {
// Ignore error message as it has most likely already been reported
if (all_operands_valid(operands)) {
- error(node, "Expected %td return values, got %td", result_count, operands.count);
+ if (operands.count == 1) {
+ gbString t = type_to_string(operands[0].type);
+ error(node, "Expected %td return values, got %td (%s)", result_count, operands.count, t);
+ gb_string_free(t);
+ } else {
+ error(node, "Expected %td return values, got %td", result_count, operands.count);
+ }
}
} else {
for (isize i = 0; i < result_count; i++) {
diff --git a/src/check_type.cpp b/src/check_type.cpp
index bbfc25a12..a68f83ba9 100644
--- a/src/check_type.cpp
+++ b/src/check_type.cpp
@@ -729,6 +729,12 @@ gb_internal void check_union_type(CheckerContext *ctx, Type *union_type, Ast *no
union_type->Union.kind = ut->kind;
switch (ut->kind) {
case UnionType_no_nil:
+ if (union_type->Union.is_polymorphic && poly_operands == nullptr) {
+ GB_ASSERT(variants.count == 0);
+ if (ut->variants.count != 1) {
+ break;
+ }
+ }
if (variants.count < 2) {
error(ut->align, "A union with #no_nil must have at least 2 variants");
}
@@ -1410,7 +1416,7 @@ gb_internal ParameterValue handle_parameter_value(CheckerContext *ctx, Type *in_
}
-gb_internal Type *check_get_params(CheckerContext *ctx, Scope *scope, Ast *_params, bool *is_variadic_, isize *variadic_index_, bool *success_, isize *specialization_count_, Array *operands) {
+gb_internal Type *check_get_params(CheckerContext *ctx, Scope *scope, Ast *_params, bool *is_variadic_, isize *variadic_index_, bool *success_, isize *specialization_count_, Array const *operands) {
if (_params == nullptr) {
return nullptr;
}
@@ -1658,7 +1664,6 @@ gb_internal Type *check_get_params(CheckerContext *ctx, Scope *scope, Ast *_para
ExactValue poly_const = {};
if (operands != nullptr && variables.count < operands->count) {
-
Operand op = (*operands)[variables.count];
if (op.expr == nullptr) {
// NOTE(bill): 2019-03-30
@@ -1961,7 +1966,7 @@ gb_internal Type *check_get_results(CheckerContext *ctx, Scope *scope, Ast *_res
// NOTE(bill): 'operands' is for generating non generic procedure type
-gb_internal bool check_procedure_type(CheckerContext *ctx, Type *type, Ast *proc_type_node, Array *operands) {
+gb_internal bool check_procedure_type(CheckerContext *ctx, Type *type, Ast *proc_type_node, Array const *operands) {
ast_node(pt, ProcType, proc_type_node);
if (ctx->polymorphic_scope == nullptr && ctx->allow_polymorphic_types) {
diff --git a/src/checker.cpp b/src/checker.cpp
index a8227fc2e..2a2cb5c42 100644
--- a/src/checker.cpp
+++ b/src/checker.cpp
@@ -32,7 +32,7 @@ gb_internal bool is_operand_uninit(Operand o) {
}
gb_internal bool check_rtti_type_disallowed(Token const &token, Type *type, char const *format) {
- if (build_context.disallow_rtti && type) {
+ if (build_context.no_rtti && type) {
if (is_type_any(type)) {
gbString t = type_to_string(type);
error(token, format, t);
@@ -285,17 +285,6 @@ gb_internal Scope *create_scope_from_package(CheckerContext *c, AstPackage *pkg)
}
gb_internal void destroy_scope(Scope *scope) {
- for (auto const &entry : scope->elements) {
- Entity *e = entry.value;
- if (e->kind == Entity_Variable) {
- if (!(e->flags & EntityFlag_Used)) {
-#if 0
- warning(e->token, "Unused variable '%.*s'", LIT(e->token.string));
-#endif
- }
- }
- }
-
for (Scope *child = scope->head_child; child != nullptr; child = child->next) {
destroy_scope(child);
}
@@ -1054,7 +1043,7 @@ gb_internal void init_universal(void) {
add_global_bool_constant("ODIN_TEST", bc->command_kind == Command_test);
add_global_bool_constant("ODIN_NO_ENTRY_POINT", bc->no_entry_point);
add_global_bool_constant("ODIN_FOREIGN_ERROR_PROCEDURES", bc->ODIN_FOREIGN_ERROR_PROCEDURES);
- add_global_bool_constant("ODIN_DISALLOW_RTTI", bc->disallow_rtti);
+ add_global_bool_constant("ODIN_NO_RTTI", bc->no_rtti);
add_global_bool_constant("ODIN_VALGRIND_SUPPORT", bc->ODIN_VALGRIND_SUPPORT);
@@ -1742,7 +1731,7 @@ gb_internal void add_implicit_entity(CheckerContext *c, Ast *clause, Entity *e)
gb_internal void add_type_info_type_internal(CheckerContext *c, Type *t);
gb_internal void add_type_info_type(CheckerContext *c, Type *t) {
- if (build_context.disallow_rtti) {
+ if (build_context.no_rtti) {
return;
}
if (t == nullptr) {
@@ -2343,7 +2332,7 @@ gb_internal void generate_minimum_dependency_set(Checker *c, Entity *start) {
str_lit("__multi3"),
);
- FORCE_ADD_RUNTIME_ENTITIES(!build_context.disallow_rtti,
+ FORCE_ADD_RUNTIME_ENTITIES(!build_context.no_rtti,
// Odin types
str_lit("Type_Info"),
@@ -2946,6 +2935,54 @@ gb_internal DECL_ATTRIBUTE_PROC(foreign_block_decl_attribute) {
return false;
}
+gb_internal DECL_ATTRIBUTE_PROC(proc_group_attribute) {
+ if (name == ATTRIBUTE_USER_TAG_NAME) {
+ ExactValue ev = check_decl_attribute_value(c, value);
+ if (ev.kind != ExactValue_String) {
+ error(elem, "Expected a string value for '%.*s'", LIT(name));
+ }
+ return true;
+ } else if (name == "objc_name") {
+ ExactValue ev = check_decl_attribute_value(c, value);
+ if (ev.kind == ExactValue_String) {
+ if (string_is_valid_identifier(ev.value_string)) {
+ ac->objc_name = ev.value_string;
+ } else {
+ error(elem, "Invalid identifier for '%.*s', got '%.*s'", LIT(name), LIT(ev.value_string));
+ }
+ } else {
+ error(elem, "Expected a string value for '%.*s'", LIT(name));
+ }
+ return true;
+ } else if (name == "objc_is_class_method") {
+ ExactValue ev = check_decl_attribute_value(c, value);
+ if (ev.kind == ExactValue_Bool) {
+ ac->objc_is_class_method = ev.value_bool;
+ } else {
+ error(elem, "Expected a boolean value for '%.*s'", LIT(name));
+ }
+ return true;
+ } else if (name == "objc_type") {
+ if (value == nullptr) {
+ error(elem, "Expected a type for '%.*s'", LIT(name));
+ } else {
+ Type *objc_type = check_type(c, value);
+ if (objc_type != nullptr) {
+ if (!has_type_got_objc_class_attribute(objc_type)) {
+ gbString t = type_to_string(objc_type);
+ error(value, "'%.*s' expected a named type with the attribute @(obj_class=), got type %s", LIT(name), t);
+ gb_string_free(t);
+ } else {
+ ac->objc_type = objc_type;
+ }
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+
gb_internal DECL_ATTRIBUTE_PROC(proc_decl_attribute) {
if (name == ATTRIBUTE_USER_TAG_NAME) {
ExactValue ev = check_decl_attribute_value(c, value);
diff --git a/src/checker.hpp b/src/checker.hpp
index 1a95e2772..b06d0a8f9 100644
--- a/src/checker.hpp
+++ b/src/checker.hpp
@@ -199,6 +199,9 @@ struct DeclInfo {
BlockingMutex type_and_value_mutex;
Array labels;
+
+ // NOTE(bill): this is to prevent a race condition since these procedure literals can be created anywhere at any time
+ struct lbModule *code_gen_module;
};
// ProcInfo stores the information needed for checking a procedure
diff --git a/src/entity.cpp b/src/entity.cpp
index d6f4edece..649dd900d 100644
--- a/src/entity.cpp
+++ b/src/entity.cpp
@@ -84,7 +84,9 @@ enum EntityFlag : u64 {
EntityFlag_CustomLinkage_LinkOnce = 1ull<<44,
EntityFlag_Require = 1ull<<50,
- EntityFlag_ByPtr = 1ull<<51, // enforce parameter is passed by pointer
+ EntityFlag_ByPtr = 1ull<<51, // enforce parameter is passed by pointer
+
+ EntityFlag_OldForOrSwitchValue = 1ull<<52,
EntityFlag_Overridden = 1ull<<63,
};
@@ -209,6 +211,8 @@ struct Entity {
ParameterValue param_value;
+ Type *for_loop_parent_type;
+
String thread_local_model;
Entity * foreign_library;
Ast * foreign_library_ident;
diff --git a/src/error.cpp b/src/error.cpp
index defc2593f..eb010eb36 100644
--- a/src/error.cpp
+++ b/src/error.cpp
@@ -265,7 +265,8 @@ gb_internal bool show_error_on_line(TokenPos const &pos, TokenPos end) {
defer (gb_string_free(the_line));
if (the_line != nullptr) {
- String line = make_string(cast(u8 const *)the_line, gb_string_length(the_line));
+ char const *line_text = the_line;
+ isize line_len = gb_string_length(the_line);
// TODO(bill): This assumes ASCII
@@ -285,21 +286,27 @@ gb_internal bool show_error_on_line(TokenPos const &pos, TokenPos end) {
isize squiggle_extra = 0;
- if (line.len > MAX_LINE_LENGTH_PADDED) {
+ if (line_len > MAX_LINE_LENGTH_PADDED) {
i32 left = MAX_TAB_WIDTH;
- line.text += offset-left;
- line.len -= offset-left;
- offset = left+MAX_TAB_WIDTH/2;
- if (line.len > MAX_LINE_LENGTH_PADDED) {
- line.len = MAX_LINE_LENGTH_PADDED;
- if (error_length > line.len-left) {
- error_length = cast(i32)line.len - left;
+ if (offset > 0) {
+ line_text += offset-left;
+ line_len -= offset-left;
+ offset = left+MAX_TAB_WIDTH/2;
+ }
+ if (line_len > MAX_LINE_LENGTH_PADDED) {
+ line_len = MAX_LINE_LENGTH_PADDED;
+ if (error_length > line_len-left) {
+ error_length = cast(i32)line_len - left;
squiggle_extra = 1;
}
}
- error_out("... %.*s ...", LIT(line));
+ if (offset > 0) {
+ error_out("... %.*s ...", cast(i32)line_len, line_text);
+ } else {
+ error_out("%.*s ...", cast(i32)line_len, line_text);
+ }
} else {
- error_out("%.*s", LIT(line));
+ error_out("%.*s", cast(i32)line_len, line_text);
}
error_out("\n\t");
@@ -312,7 +319,7 @@ gb_internal bool show_error_on_line(TokenPos const &pos, TokenPos end) {
error_out("^");
if (end.file_id == pos.file_id) {
if (end.line > pos.line) {
- for (i32 i = offset; i < line.len; i++) {
+ for (i32 i = offset; i < line_len; i++) {
error_out("~");
}
} else if (end.line == pos.line && end.column > pos.column) {
diff --git a/src/gb/gb.h b/src/gb/gb.h
index bc4c1f27d..3d4bff9b4 100644
--- a/src/gb/gb.h
+++ b/src/gb/gb.h
@@ -3299,12 +3299,39 @@ void const *gb_memrchr(void const *data, u8 c, isize n) {
-gb_inline void *gb_alloc_align (gbAllocator a, isize size, isize alignment) { return a.proc(a.data, gbAllocation_Alloc, size, alignment, NULL, 0, GB_DEFAULT_ALLOCATOR_FLAGS); }
-gb_inline void *gb_alloc (gbAllocator a, isize size) { return gb_alloc_align(a, size, GB_DEFAULT_MEMORY_ALIGNMENT); }
-gb_inline void gb_free (gbAllocator a, void *ptr) { if (ptr != NULL) a.proc(a.data, gbAllocation_Free, 0, 0, ptr, 0, GB_DEFAULT_ALLOCATOR_FLAGS); }
-gb_inline void gb_free_all (gbAllocator a) { a.proc(a.data, gbAllocation_FreeAll, 0, 0, NULL, 0, GB_DEFAULT_ALLOCATOR_FLAGS); }
-gb_inline void *gb_resize (gbAllocator a, void *ptr, isize old_size, isize new_size) { return gb_resize_align(a, ptr, old_size, new_size, GB_DEFAULT_MEMORY_ALIGNMENT); }
-gb_inline void *gb_resize_align(gbAllocator a, void *ptr, isize old_size, isize new_size, isize alignment) { return a.proc(a.data, gbAllocation_Resize, new_size, alignment, ptr, old_size, GB_DEFAULT_ALLOCATOR_FLAGS); }
+gb_inline void *gb_alloc_align (gbAllocator a, isize size, isize alignment) {
+ if (size == 0) {
+ return NULL;
+ }
+ return a.proc(a.data, gbAllocation_Alloc, size, alignment, NULL, 0, GB_DEFAULT_ALLOCATOR_FLAGS);
+}
+gb_inline void *gb_alloc(gbAllocator a, isize size) {
+ return gb_alloc_align(a, size, GB_DEFAULT_MEMORY_ALIGNMENT);
+}
+gb_inline void gb_free(gbAllocator a, void *ptr) {
+ if (ptr != NULL) {
+ a.proc(a.data, gbAllocation_Free, 0, 0, ptr, 0, GB_DEFAULT_ALLOCATOR_FLAGS);
+ }
+}
+gb_inline void gb_free_all(gbAllocator a) {
+ a.proc(a.data, gbAllocation_FreeAll, 0, 0, NULL, 0, GB_DEFAULT_ALLOCATOR_FLAGS);
+}
+gb_inline void *gb_resize(gbAllocator a, void *ptr, isize old_size, isize new_size) {
+ return gb_resize_align(a, ptr, old_size, new_size, GB_DEFAULT_MEMORY_ALIGNMENT);
+}
+gb_inline void *gb_resize_align(gbAllocator a, void *ptr, isize old_size, isize new_size, isize alignment) {
+ if (new_size == 0) {
+ if (ptr != NULL) {
+ return a.proc(a.data, gbAllocation_Free, 0, 0, ptr, old_size, GB_DEFAULT_ALLOCATOR_FLAGS);
+ }
+ return NULL;
+ } else if (ptr == NULL) {
+ return a.proc(a.data, gbAllocation_Alloc, new_size, alignment, NULL, 0, GB_DEFAULT_ALLOCATOR_FLAGS);
+ } else if (old_size == new_size && ((uintptr)ptr % (uintptr)alignment) == 0) {
+ return ptr;
+ }
+ return a.proc(a.data, gbAllocation_Resize, new_size, alignment, ptr, old_size, GB_DEFAULT_ALLOCATOR_FLAGS);
+}
gb_inline void *gb_alloc_copy (gbAllocator a, void const *src, isize size) {
return gb_memcopy(gb_alloc(a, size), src, size);
diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp
index 34a401c33..938c9b2ac 100644
--- a/src/llvm_backend.cpp
+++ b/src/llvm_backend.cpp
@@ -157,10 +157,10 @@ gb_internal lbValue lb_equal_proc_for_type(lbModule *m, Type *type) {
return {compare_proc->value, compare_proc->type};
}
- static u32 proc_index = 0;
+ static std::atomic proc_index;
char buf[32] = {};
- isize n = gb_snprintf(buf, 32, "__$equal%u", ++proc_index);
+ isize n = gb_snprintf(buf, 32, "__$equal%u", 1+proc_index.fetch_add(1));
char *str = gb_alloc_str_len(permanent_allocator(), buf, n-1);
String proc_name = make_string_c(str);
@@ -218,7 +218,9 @@ gb_internal lbValue lb_equal_proc_for_type(lbModule *m, Type *type) {
LLVMBuildRet(p->builder, LLVMConstInt(lb_type(m, t_bool), 0, false));
} else if (type->kind == Type_Union) {
- if (is_type_union_maybe_pointer(type)) {
+ if (type_size_of(type) == 0) {
+ LLVMBuildRet(p->builder, LLVMConstInt(lb_type(m, t_bool), 1, false));
+ } else if (is_type_union_maybe_pointer(type)) {
Type *v = type->Union.variants[0];
Type *pv = alloc_type_pointer(v);
@@ -656,10 +658,10 @@ gb_internal lbValue lb_map_set_proc_for_type(lbModule *m, Type *type) {
GB_ASSERT(*found != nullptr);
return {(*found)->value, (*found)->type};
}
- static u32 proc_index = 0;
+ static std::atomic proc_index;
char buf[32] = {};
- isize n = gb_snprintf(buf, 32, "__$map_set-%u", ++proc_index);
+ isize n = gb_snprintf(buf, 32, "__$map_set-%u", 1+proc_index.fetch_add(1));
char *str = gb_alloc_str_len(permanent_allocator(), buf, n-1);
String proc_name = make_string_c(str);
@@ -772,56 +774,6 @@ gb_internal lbValue lb_map_set_proc_for_type(lbModule *m, Type *type) {
return {p->value, p->type};
}
-
-gb_internal lbValue lb_generate_anonymous_proc_lit(lbModule *m, String const &prefix_name, Ast *expr, lbProcedure *parent) {
- MUTEX_GUARD(&m->gen->anonymous_proc_lits_mutex);
-
- lbProcedure **found = map_get(&m->gen->anonymous_proc_lits, expr);
- if (found) {
- return lb_find_procedure_value_from_entity(m, (*found)->entity);
- }
-
- ast_node(pl, ProcLit, expr);
-
- // NOTE(bill): Generate a new name
- // parent$count
- isize name_len = prefix_name.len + 1 + 8 + 1;
- char *name_text = gb_alloc_array(permanent_allocator(), char, name_len);
- i32 name_id = cast(i32)m->gen->anonymous_proc_lits.count;
-
- name_len = gb_snprintf(name_text, name_len, "%.*s$anon-%d", LIT(prefix_name), name_id);
- String name = make_string((u8 *)name_text, name_len-1);
-
- Type *type = type_of_expr(expr);
-
- Token token = {};
- token.pos = ast_token(expr).pos;
- token.kind = Token_Ident;
- token.string = name;
- Entity *e = alloc_entity_procedure(nullptr, token, type, pl->tags);
- e->file = expr->file();
- e->decl_info = pl->decl;
- e->code_gen_module = m;
- e->flags |= EntityFlag_ProcBodyChecked;
- lbProcedure *p = lb_create_procedure(m, e);
-
- lbValue value = {};
- value.value = p->value;
- value.type = p->type;
-
- array_add(&m->procedures_to_generate, p);
- if (parent != nullptr) {
- array_add(&parent->children, p);
- } else {
- string_map_set(&m->members, name, value);
- }
-
- map_set(&m->gen->anonymous_proc_lits, expr, p);
-
- return value;
-}
-
-
gb_internal lbValue lb_gen_map_cell_info_ptr(lbModule *m, Type *type) {
lbAddr *found = map_get(&m->map_cell_info_map, type);
if (found) {
@@ -1048,7 +1000,7 @@ struct lbGlobalVariable {
};
gb_internal lbProcedure *lb_create_startup_type_info(lbModule *m) {
- if (build_context.disallow_rtti) {
+ if (build_context.no_rtti) {
return nullptr;
}
Type *proc_type = alloc_type_proc(nullptr, nullptr, 0, nullptr, 0, false, ProcCC_CDecl);
@@ -1513,7 +1465,7 @@ gb_internal WORKER_TASK_PROC(lb_generate_missing_procedures_to_check_worker_proc
lbModule *m = cast(lbModule *)data;
for (isize i = 0; i < m->missing_procedures_to_check.count; i++) {
lbProcedure *p = m->missing_procedures_to_check[i];
- debugf("Generate missing procedure: %.*s\n", LIT(p->name));
+ debugf("Generate missing procedure: %.*s module %p\n", LIT(p->name), m);
lb_generate_procedure(m, p);
}
return 0;
@@ -1577,7 +1529,6 @@ gb_internal void lb_llvm_module_passes(lbGenerator *gen, bool do_threading) {
thread_pool_wait();
}
-
gb_internal String lb_filepath_ll_for_module(lbModule *m) {
String path = concatenate3_strings(permanent_allocator(),
build_context.build_paths[BuildPath_Output].basename,
@@ -1874,25 +1825,28 @@ gb_internal lbProcedure *lb_create_main_procedure(lbModule *m, lbProcedure *star
TEMPORARY_ALLOCATOR_GUARD();
auto args = array_make(temporary_allocator(), 1);
args[0] = lb_addr_load(p, all_tests_slice);
- lb_emit_call(p, runner, args);
+ lbValue result = lb_emit_call(p, runner, args);
+
+ lbValue exit_runner = lb_find_package_value(m, str_lit("os"), str_lit("exit"));
+ auto exit_args = array_make(temporary_allocator(), 1);
+ exit_args[0] = lb_emit_select(p, result, lb_const_int(m, t_int, 0), lb_const_int(m, t_int, 1));
+ lb_emit_call(p, exit_runner, exit_args, ProcInlining_none);
} else {
if (m->info->entry_point != nullptr) {
lbValue entry_point = lb_find_procedure_value_from_entity(m, m->info->entry_point);
lb_emit_call(p, entry_point, {}, ProcInlining_no_inline);
}
- }
+ if (call_cleanup) {
+ lbValue cleanup_runtime_value = {cleanup_runtime->value, cleanup_runtime->type};
+ lb_emit_call(p, cleanup_runtime_value, {}, ProcInlining_none);
+ }
- if (call_cleanup) {
- lbValue cleanup_runtime_value = {cleanup_runtime->value, cleanup_runtime->type};
- lb_emit_call(p, cleanup_runtime_value, {}, ProcInlining_none);
- }
-
-
- if (is_dll_main) {
- LLVMBuildRet(p->builder, LLVMConstInt(lb_type(m, t_i32), 1, false));
- } else {
- LLVMBuildRet(p->builder, LLVMConstInt(lb_type(m, t_i32), 0, false));
+ if (is_dll_main) {
+ LLVMBuildRet(p->builder, LLVMConstInt(lb_type(m, t_i32), 1, false));
+ } else {
+ LLVMBuildRet(p->builder, LLVMConstInt(lb_type(m, t_i32), 0, false));
+ }
}
lb_end_procedure_body(p);
@@ -2170,7 +2124,7 @@ gb_internal bool lb_generate_code(lbGenerator *gen) {
TIME_SECTION("LLVM Global Variables");
- if (!build_context.disallow_rtti) {
+ if (!build_context.no_rtti) {
lbModule *m = default_module;
{ // Add type info data
diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp
index 4c4d9703d..ce01485ff 100644
--- a/src/llvm_backend.hpp
+++ b/src/llvm_backend.hpp
@@ -164,7 +164,7 @@ struct lbModule {
PtrMap map_get_procs;
PtrMap map_set_procs;
- u32 nested_type_name_guid;
+ std::atomic nested_type_name_guid;
Array procedures_to_generate;
Array global_procedures_and_types_to_create;
@@ -201,7 +201,7 @@ struct lbGenerator {
PtrMap modules_through_ctx;
lbModule default_module;
- BlockingMutex anonymous_proc_lits_mutex;
+ RecursiveMutex anonymous_proc_lits_mutex;
PtrMap anonymous_proc_lits;
BlockingMutex foreign_mutex;
@@ -346,6 +346,8 @@ struct lbProcedure {
};
+#define ABI_PKG_NAME_SEPARATOR "."
+
#if !ODIN_LLVM_MINIMUM_VERSION_14
#define LLVMConstGEP2(Ty__, ConstantVal__, ConstantIndices__, NumIndices__) LLVMConstGEP(ConstantVal__, ConstantIndices__, NumIndices__)
@@ -545,6 +547,8 @@ gb_internal gb_inline i64 lb_max_zero_init_size(void) {
gb_internal LLVMTypeRef OdinLLVMGetArrayElementType(LLVMTypeRef type);
gb_internal LLVMTypeRef OdinLLVMGetVectorElementType(LLVMTypeRef type);
+gb_internal String lb_filepath_ll_for_module(lbModule *m);
+
#define LB_STARTUP_RUNTIME_PROC_NAME "__$startup_runtime"
#define LB_CLEANUP_RUNTIME_PROC_NAME "__$cleanup_runtime"
#define LB_STARTUP_TYPE_INFO_PROC_NAME "__$startup_type_info"
diff --git a/src/llvm_backend_const.cpp b/src/llvm_backend_const.cpp
index c9d2f5b26..2a121ff5d 100644
--- a/src/llvm_backend_const.cpp
+++ b/src/llvm_backend_const.cpp
@@ -473,6 +473,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo
if (value.kind == ExactValue_Procedure) {
lbValue res = {};
Ast *expr = unparen_expr(value.value_procedure);
+ GB_ASSERT(expr != nullptr);
if (expr->kind == Ast_ProcLit) {
res = lb_generate_anonymous_proc_lit(m, str_lit("_proclit"), expr);
} else {
@@ -482,7 +483,10 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo
GB_ASSERT(res.value != nullptr);
GB_ASSERT(LLVMGetValueKind(res.value) == LLVMFunctionValueKind);
- res.value = LLVMConstPointerCast(res.value, lb_type(m, res.type));
+ if (LLVMGetIntrinsicID(res.value) == 0) {
+ // NOTE(bill): do not cast intrinsics as they are not really procedures that can be casted
+ res.value = LLVMConstPointerCast(res.value, lb_type(m, res.type));
+ }
return res;
}
diff --git a/src/llvm_backend_expr.cpp b/src/llvm_backend_expr.cpp
index f95e351ce..5e6831fc2 100644
--- a/src/llvm_backend_expr.cpp
+++ b/src/llvm_backend_expr.cpp
@@ -1325,6 +1325,15 @@ handle_op:;
return {};
}
+gb_internal bool lb_is_empty_string_constant(Ast *expr) {
+ if (expr->tav.value.kind == ExactValue_String &&
+ is_type_string(expr->tav.type)) {
+ String s = expr->tav.value.value_string;
+ return s.len == 0;
+ }
+ return false;
+}
+
gb_internal lbValue lb_build_binary_expr(lbProcedure *p, Ast *expr) {
ast_node(be, BinaryExpr, expr);
@@ -1373,15 +1382,33 @@ gb_internal lbValue lb_build_binary_expr(lbProcedure *p, Ast *expr) {
case Token_CmpEq:
case Token_NotEq:
if (is_type_untyped_nil(be->right->tav.type)) {
+ // `x == nil` or `x != nil`
lbValue left = lb_build_expr(p, be->left);
lbValue cmp = lb_emit_comp_against_nil(p, be->op.kind, left);
Type *type = default_type(tv.type);
return lb_emit_conv(p, cmp, type);
} else if (is_type_untyped_nil(be->left->tav.type)) {
+ // `nil == x` or `nil != x`
lbValue right = lb_build_expr(p, be->right);
lbValue cmp = lb_emit_comp_against_nil(p, be->op.kind, right);
Type *type = default_type(tv.type);
return lb_emit_conv(p, cmp, type);
+ } else if (lb_is_empty_string_constant(be->right)) {
+ // `x == ""` or `x != ""`
+ lbValue s = lb_build_expr(p, be->left);
+ s = lb_emit_conv(p, s, t_string);
+ lbValue len = lb_string_len(p, s);
+ lbValue cmp = lb_emit_comp(p, be->op.kind, len, lb_const_int(p->module, t_int, 0));
+ Type *type = default_type(tv.type);
+ return lb_emit_conv(p, cmp, type);
+ } else if (lb_is_empty_string_constant(be->left)) {
+ // `"" == x` or `"" != x`
+ lbValue s = lb_build_expr(p, be->right);
+ s = lb_emit_conv(p, s, t_string);
+ lbValue len = lb_string_len(p, s);
+ lbValue cmp = lb_emit_comp(p, be->op.kind, len, lb_const_int(p->module, t_int, 0));
+ Type *type = default_type(tv.type);
+ return lb_emit_conv(p, cmp, type);
}
/*fallthrough*/
case Token_Lt:
@@ -2246,7 +2273,6 @@ gb_internal lbValue lb_compare_records(lbProcedure *p, TokenKind op_kind, lbValu
-
gb_internal lbValue lb_emit_comp(lbProcedure *p, TokenKind op_kind, lbValue left, lbValue right) {
Type *a = core_type(left.type);
Type *b = core_type(right.type);
@@ -2254,7 +2280,10 @@ gb_internal lbValue lb_emit_comp(lbProcedure *p, TokenKind op_kind, lbValue left
GB_ASSERT(gb_is_between(op_kind, Token__ComparisonBegin+1, Token__ComparisonEnd-1));
lbValue nil_check = {};
- if (is_type_untyped_nil(left.type)) {
+
+ if (is_type_array_like(left.type) || is_type_array_like(right.type)) {
+ // don't do `nil` check if it is array-like
+ } else if (is_type_untyped_nil(left.type)) {
nil_check = lb_emit_comp_against_nil(p, op_kind, right);
} else if (is_type_untyped_nil(right.type)) {
nil_check = lb_emit_comp_against_nil(p, op_kind, left);
@@ -2310,7 +2339,7 @@ gb_internal lbValue lb_emit_comp(lbProcedure *p, TokenKind op_kind, lbValue left
lbValue res = lb_emit_comp(p, op_kind, val, lb_const_nil(p->module, val.type));
return lb_emit_conv(p, res, t_bool);
}
- if (is_type_array(a) || is_type_enumerated_array(a)) {
+ if (is_type_array_like(a)) {
Type *tl = base_type(a);
lbValue lhs = lb_address_from_load_or_generate_local(p, left);
lbValue rhs = lb_address_from_load_or_generate_local(p, right);
@@ -2984,7 +3013,7 @@ gb_internal lbValue lb_build_unary_and(lbProcedure *p, Ast *expr) {
isize arg_count = 6;
- if (build_context.disallow_rtti) {
+ if (build_context.no_rtti) {
arg_count = 4;
}
@@ -2996,7 +3025,7 @@ gb_internal lbValue lb_build_unary_and(lbProcedure *p, Ast *expr) {
args[2] = lb_const_int(p->module, t_i32, pos.line);
args[3] = lb_const_int(p->module, t_i32, pos.column);
- if (!build_context.disallow_rtti) {
+ if (!build_context.no_rtti) {
args[4] = lb_typeid(p->module, src_type);
args[5] = lb_typeid(p->module, dst_type);
}
@@ -3012,7 +3041,7 @@ gb_internal lbValue lb_build_unary_and(lbProcedure *p, Ast *expr) {
}
lbValue data_ptr = lb_emit_struct_ev(p, v, 0);
if ((p->state_flags & StateFlag_no_type_assert) == 0) {
- GB_ASSERT(!build_context.disallow_rtti);
+ GB_ASSERT(!build_context.no_rtti);
lbValue any_id = lb_emit_struct_ev(p, v, 1);
@@ -4142,7 +4171,7 @@ gb_internal lbAddr lb_build_addr_compound_lit(lbProcedure *p, Ast *expr) {
// HACK TODO(bill): THIS IS A MASSIVE HACK!!!!
if (is_type_union(ft) && !are_types_identical(fet, ft) && !is_type_untyped(fet)) {
- GB_ASSERT_MSG(union_variant_index(ft, fet) > 0, "%s", type_to_string(fet));
+ GB_ASSERT_MSG(union_variant_index(ft, fet) >= 0, "%s", type_to_string(fet));
lb_emit_store_union_variant(p, gep, field_expr, fet);
} else {
@@ -4490,8 +4519,9 @@ gb_internal lbAddr lb_build_addr_internal(lbProcedure *p, Ast *expr) {
Selection sel = lookup_field(type, selector, false);
GB_ASSERT(sel.entity != nullptr);
if (sel.pseudo_field) {
- GB_ASSERT(sel.entity->kind == Entity_Procedure);
+ GB_ASSERT(sel.entity->kind == Entity_Procedure || sel.entity->kind == Entity_ProcGroup);
Entity *e = entity_of_node(sel_node);
+ GB_ASSERT(e->kind == Entity_Procedure);
return lb_addr(lb_find_value_from_entity(p->module, e));
}
diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp
index e5f3e3081..ad8a1816a 100644
--- a/src/llvm_backend_general.cpp
+++ b/src/llvm_backend_general.cpp
@@ -334,10 +334,35 @@ gb_internal bool lb_is_instr_terminating(LLVMValueRef instr) {
return false;
}
+gb_internal lbModule *lb_module_of_expr(lbGenerator *gen, Ast *expr) {
+ GB_ASSERT(expr != nullptr);
+ lbModule **found = nullptr;
+ AstFile *file = expr->file();
+ if (file) {
+ found = map_get(&gen->modules, cast(void *)file);
+ if (found) {
+ return *found;
+ }
+
+ if (file->pkg) {
+ found = map_get(&gen->modules, cast(void *)file->pkg);
+ if (found) {
+ return *found;
+ }
+ }
+ }
+
+ return &gen->default_module;
+}
gb_internal lbModule *lb_module_of_entity(lbGenerator *gen, Entity *e) {
GB_ASSERT(e != nullptr);
lbModule **found = nullptr;
+ if (e->kind == Entity_Procedure &&
+ e->decl_info &&
+ e->decl_info->code_gen_module) {
+ return e->decl_info->code_gen_module;
+ }
if (e->file) {
found = map_get(&gen->modules, cast(void *)e->file);
if (found) {
@@ -1298,6 +1323,7 @@ gb_internal lbValue lb_emit_union_tag_value(lbProcedure *p, lbValue u) {
gb_internal void lb_emit_store_union_variant_tag(lbProcedure *p, lbValue parent, Type *variant_type) {
Type *t = type_deref(parent.type);
+ GB_ASSERT(is_type_union(t));
if (is_type_union_maybe_pointer(t) || type_size_of(t) == 0) {
// No tag needed!
@@ -1377,7 +1403,7 @@ gb_internal String lb_mangle_name(lbModule *m, Entity *e) {
char *new_name = gb_alloc_array(permanent_allocator(), char, max_len);
isize new_name_len = gb_snprintf(
new_name, max_len,
- "%.*s.%.*s", LIT(pkgn), LIT(name)
+ "%.*s" ABI_PKG_NAME_SEPARATOR "%.*s", LIT(pkgn), LIT(name)
);
if (require_suffix_id) {
char *str = new_name + new_name_len-1;
@@ -1426,8 +1452,8 @@ gb_internal String lb_set_nested_type_name_ir_mangled_name(Entity *e, lbProcedur
if (p != nullptr) {
isize name_len = p->name.len + 1 + ts_name.len + 1 + 10 + 1;
char *name_text = gb_alloc_array(permanent_allocator(), char, name_len);
- u32 guid = ++p->module->nested_type_name_guid;
- name_len = gb_snprintf(name_text, name_len, "%.*s.%.*s-%u", LIT(p->name), LIT(ts_name), guid);
+ u32 guid = 1+p->module->nested_type_name_guid.fetch_add(1);
+ name_len = gb_snprintf(name_text, name_len, "%.*s" ABI_PKG_NAME_SEPARATOR "%.*s-%u", LIT(p->name), LIT(ts_name), guid);
String name = make_string(cast(u8 *)name_text, name_len-1);
e->TypeName.ir_mangled_name = name;
@@ -1436,9 +1462,8 @@ gb_internal String lb_set_nested_type_name_ir_mangled_name(Entity *e, lbProcedur
// NOTE(bill): a nested type be required before its parameter procedure exists. Just give it a temp name for now
isize name_len = 9 + 1 + ts_name.len + 1 + 10 + 1;
char *name_text = gb_alloc_array(permanent_allocator(), char, name_len);
- static u32 guid = 0;
- guid += 1;
- name_len = gb_snprintf(name_text, name_len, "_internal.%.*s-%u", LIT(ts_name), guid);
+ static std::atomic guid;
+ name_len = gb_snprintf(name_text, name_len, "_internal" ABI_PKG_NAME_SEPARATOR "%.*s-%u", LIT(ts_name), 1+guid.fetch_add(1));
String name = make_string(cast(u8 *)name_text, name_len-1);
e->TypeName.ir_mangled_name = name;
@@ -2662,9 +2687,12 @@ gb_internal lbValue lb_find_ident(lbProcedure *p, lbModule *m, Entity *e, Ast *e
gb_internal lbValue lb_find_procedure_value_from_entity(lbModule *m, Entity *e) {
+ lbGenerator *gen = m->gen;
+
GB_ASSERT(is_type_proc(e->type));
e = strip_entity_wrapping(e);
GB_ASSERT(e != nullptr);
+ GB_ASSERT(e->kind == Entity_Procedure);
lbValue *found = nullptr;
rw_mutex_shared_lock(&m->values_mutex);
@@ -2678,27 +2706,34 @@ gb_internal lbValue lb_find_procedure_value_from_entity(lbModule *m, Entity *e)
lbModule *other_module = m;
if (USE_SEPARATE_MODULES) {
- other_module = lb_module_of_entity(m->gen, e);
+ other_module = lb_module_of_entity(gen, e);
}
if (other_module == m) {
- debugf("Missing Procedure (lb_find_procedure_value_from_entity): %.*s\n", LIT(e->token.string));
+ debugf("Missing Procedure (lb_find_procedure_value_from_entity): %.*s module %p\n", LIT(e->token.string), m);
}
ignore_body = other_module != m;
lbProcedure *missing_proc = lb_create_procedure(m, e, ignore_body);
if (ignore_body) {
+ mutex_lock(&gen->anonymous_proc_lits_mutex);
+ defer (mutex_unlock(&gen->anonymous_proc_lits_mutex));
+
GB_ASSERT(other_module != nullptr);
rw_mutex_shared_lock(&other_module->values_mutex);
auto *found = map_get(&other_module->values, e);
rw_mutex_shared_unlock(&other_module->values_mutex);
if (found == nullptr) {
+ // THIS IS THE RACE CONDITION
lbProcedure *missing_proc_in_other_module = lb_create_procedure(other_module, e, false);
array_add(&other_module->missing_procedures_to_check, missing_proc_in_other_module);
}
} else {
array_add(&m->missing_procedures_to_check, missing_proc);
}
+
+ rw_mutex_shared_lock(&m->values_mutex);
found = map_get(&m->values, e);
+ rw_mutex_shared_unlock(&m->values_mutex);
if (found) {
return *found;
}
@@ -2708,6 +2743,63 @@ gb_internal lbValue lb_find_procedure_value_from_entity(lbModule *m, Entity *e)
}
+
+gb_internal lbValue lb_generate_anonymous_proc_lit(lbModule *m, String const &prefix_name, Ast *expr, lbProcedure *parent) {
+ lbGenerator *gen = m->gen;
+
+ mutex_lock(&gen->anonymous_proc_lits_mutex);
+ defer (mutex_unlock(&gen->anonymous_proc_lits_mutex));
+
+ TokenPos pos = ast_token(expr).pos;
+ lbProcedure **found = map_get(&gen->anonymous_proc_lits, expr);
+ if (found) {
+ return lb_find_procedure_value_from_entity(m, (*found)->entity);
+ }
+
+ ast_node(pl, ProcLit, expr);
+
+ // NOTE(bill): Generate a new name
+ // parent$count
+ isize name_len = prefix_name.len + 6 + 11;
+ char *name_text = gb_alloc_array(permanent_allocator(), char, name_len);
+ static std::atomic name_id;
+ name_len = gb_snprintf(name_text, name_len, "%.*s$anon-%d", LIT(prefix_name), 1+name_id.fetch_add(1));
+ String name = make_string((u8 *)name_text, name_len-1);
+
+ Type *type = type_of_expr(expr);
+
+ GB_ASSERT(pl->decl->entity == nullptr);
+ Token token = {};
+ token.pos = ast_token(expr).pos;
+ token.kind = Token_Ident;
+ token.string = name;
+ Entity *e = alloc_entity_procedure(nullptr, token, type, pl->tags);
+ e->file = expr->file();
+
+ // NOTE(bill): this is to prevent a race condition since these procedure literals can be created anywhere at any time
+ pl->decl->code_gen_module = m;
+ e->decl_info = pl->decl;
+ pl->decl->entity = e;
+ e->flags |= EntityFlag_ProcBodyChecked;
+
+ lbProcedure *p = lb_create_procedure(m, e);
+ GB_ASSERT(e->code_gen_module == m);
+
+ lbValue value = {};
+ value.value = p->value;
+ value.type = p->type;
+
+ map_set(&gen->anonymous_proc_lits, expr, p);
+ array_add(&m->procedures_to_generate, p);
+ if (parent != nullptr) {
+ array_add(&parent->children, p);
+ } else {
+ string_map_set(&m->members, name, value);
+ }
+ return value;
+}
+
+
gb_internal lbAddr lb_add_global_generated(lbModule *m, Type *type, lbValue value, Entity **entity_) {
GB_ASSERT(type != nullptr);
type = default_type(type);
@@ -2915,8 +3007,9 @@ gb_internal lbAddr lb_add_local(lbProcedure *p, Type *type, Entity *e, bool zero
LLVMPositionBuilderAtEnd(p->builder, p->decl_block->block);
char const *name = "";
- if (e != nullptr) {
- // name = alloc_cstring(permanent_allocator(), e->token.string);
+ if (e != nullptr && e->token.string.len > 0 && e->token.string != "_") {
+ // NOTE(bill): for debugging purposes only
+ name = alloc_cstring(permanent_allocator(), e->token.string);
}
LLVMTypeRef llvm_type = lb_type(p->module, type);
diff --git a/src/llvm_backend_opt.cpp b/src/llvm_backend_opt.cpp
index 141ee88c7..54e667a0b 100644
--- a/src/llvm_backend_opt.cpp
+++ b/src/llvm_backend_opt.cpp
@@ -375,16 +375,6 @@ gb_internal void lb_run_function_pass_manager(LLVMPassManagerRef fpm, lbProcedur
return;
}
LLVMRunFunctionPassManager(fpm, p->value);
- switch (pass_manager_kind) {
- case lbFunctionPassManager_none:
- return;
- case lbFunctionPassManager_default:
- case lbFunctionPassManager_default_without_memcpy:
- if (build_context.optimization_level < 0) {
- return;
- }
- break;
- }
// NOTE(bill): LLVMAddDCEPass doesn't seem to be exported in the official DLL's for LLVM
// which means we cannot rely upon it
// This is also useful for read the .ll for debug purposes because a lot of instructions
diff --git a/src/llvm_backend_proc.cpp b/src/llvm_backend_proc.cpp
index 8b9f8b249..c27c55337 100644
--- a/src/llvm_backend_proc.cpp
+++ b/src/llvm_backend_proc.cpp
@@ -757,7 +757,7 @@ gb_internal void lb_build_nested_proc(lbProcedure *p, AstProcLit *pd, Entity *e)
char *name_text = gb_alloc_array(permanent_allocator(), char, name_len);
i32 guid = cast(i32)p->children.count;
- name_len = gb_snprintf(name_text, name_len, "%.*s.%.*s-%d", LIT(p->name), LIT(pd_name), guid);
+ name_len = gb_snprintf(name_text, name_len, "%.*s" ABI_PKG_NAME_SEPARATOR "%.*s-%d", LIT(p->name), LIT(pd_name), guid);
String name = make_string(cast(u8 *)name_text, name_len-1);
e->Procedure.link_name = name;
@@ -1160,7 +1160,13 @@ gb_internal lbValue lb_emit_call(lbProcedure *p, lbValue value, Array c
}
- Entity **found = map_get(&p->module->procedure_values, value.value);
+ LLVMValueRef the_proc_value = value.value;
+
+ if (LLVMIsAConstantExpr(the_proc_value)) {
+ // NOTE(bill): it's a bit cast
+ the_proc_value = LLVMGetOperand(the_proc_value, 0);
+ }
+ Entity **found = map_get(&p->module->procedure_values, the_proc_value);
if (found != nullptr) {
Entity *e = *found;
if (e != nullptr && entity_has_deferred_procedure(e)) {
@@ -3145,6 +3151,18 @@ gb_internal lbValue lb_build_call_expr(lbProcedure *p, Ast *expr) {
}
return res;
}
+
+gb_internal void lb_add_values_to_array(lbProcedure *p, Array *args, lbValue value) {
+ if (is_type_tuple(value.type)) {
+ for_array(i, value.type->Tuple.variables) {
+ lbValue sub_value = lb_emit_struct_ev(p, value, cast(i32)i);
+ array_add(args, sub_value);
+ }
+ } else {
+ array_add(args, value);
+ }
+}
+
gb_internal lbValue lb_build_call_expr_internal(lbProcedure *p, Ast *expr) {
lbModule *m = p->module;
@@ -3219,245 +3237,147 @@ gb_internal lbValue lb_build_call_expr_internal(lbProcedure *p, Ast *expr) {
GB_ASSERT(proc_type_->kind == Type_Proc);
TypeProc *pt = &proc_type_->Proc;
- if (is_call_expr_field_value(ce)) {
- auto args = array_make(permanent_allocator(), pt->param_count);
+ GB_ASSERT(ce->split_args != nullptr);
- for_array(arg_index, ce->args) {
- Ast *arg = ce->args[arg_index];
- ast_node(fv, FieldValue, arg);
- GB_ASSERT(fv->field->kind == Ast_Ident);
- String name = fv->field->Ident.token.string;
- isize index = lookup_procedure_parameter(pt, name);
- GB_ASSERT(index >= 0);
- TypeAndValue tav = type_and_value_of_expr(fv->value);
- if (tav.mode == Addressing_Type) {
- args[index] = lb_const_nil(m, tav.type);
- } else {
- args[index] = lb_build_expr(p, fv->value);
- }
- }
- TypeTuple *params = &pt->params->Tuple;
- for (isize i = 0; i < args.count; i++) {
- Entity *e = params->variables[i];
- if (e->kind == Entity_TypeName) {
- args[i] = lb_const_nil(m, e->type);
- } else if (e->kind == Entity_Constant) {
- continue;
- } else {
- GB_ASSERT(e->kind == Entity_Variable);
- if (args[i].value == nullptr) {
- args[i] = lb_handle_param_value(p, e->type, e->Variable.param_value, ast_token(expr).pos);
- } else if (is_type_typeid(e->type) && !is_type_typeid(args[i].type)) {
- args[i] = lb_typeid(p->module, args[i].type);
- } else {
- args[i] = lb_emit_conv(p, args[i], e->type);
- }
- }
- }
+ auto args = array_make(permanent_allocator(), 0, pt->param_count);
- for (isize i = 0; i < args.count; i++) {
- Entity *e = params->variables[i];
- if (args[i].type == nullptr) {
- continue;
- } else if (is_type_untyped_uninit(args[i].type)) {
- args[i] = lb_const_undef(m, e->type);
- } else if (is_type_untyped_nil(args[i].type)) {
- args[i] = lb_const_nil(m, e->type);
- }
- }
-
- return lb_emit_call(p, value, args, ce->inlining);
- }
-
- isize arg_index = 0;
-
- isize arg_count = 0;
- for_array(i, ce->args) {
- Ast *arg = ce->args[i];
- TypeAndValue tav = type_and_value_of_expr(arg);
- GB_ASSERT_MSG(tav.mode != Addressing_Invalid, "%s %s %d", expr_to_string(arg), expr_to_string(expr), tav.mode);
- GB_ASSERT_MSG(tav.mode != Addressing_ProcGroup, "%s", expr_to_string(arg));
- Type *at = tav.type;
- if (is_type_tuple(at)) {
- arg_count += at->Tuple.variables.count;
- } else {
- arg_count++;
- }
- }
-
- isize param_count = 0;
- if (pt->params) {
- GB_ASSERT(pt->params->kind == Type_Tuple);
- param_count = pt->params->Tuple.variables.count;
- }
-
- auto args = array_make(permanent_allocator(), cast(isize)gb_max(param_count, arg_count));
- isize variadic_index = pt->variadic_index;
- bool variadic = pt->variadic && variadic_index >= 0;
- bool vari_expand = ce->ellipsis.pos.line != 0;
+ bool vari_expand = (ce->ellipsis.pos.line != 0);
bool is_c_vararg = pt->c_vararg;
- String proc_name = {};
- if (p->entity != nullptr) {
- proc_name = p->entity->token.string;
+ for_array(i, ce->split_args->positional) {
+ Entity *e = pt->params->Tuple.variables[i];
+ if (e->kind == Entity_TypeName) {
+ array_add(&args, lb_const_nil(p->module, e->type));
+ continue;
+ } else if (e->kind == Entity_Constant) {
+ array_add(&args, lb_const_value(p->module, e->type, e->Constant.value));
+ continue;
+ }
+
+ GB_ASSERT(e->kind == Entity_Variable);
+
+ if (pt->variadic && pt->variadic_index == i) {
+ lbValue variadic_args = lb_const_nil(p->module, e->type);
+ auto variadic = slice(ce->split_args->positional, pt->variadic_index, ce->split_args->positional.count);
+ if (variadic.count != 0) {
+ // variadic call argument generation
+ Type *slice_type = e->type;
+ GB_ASSERT(slice_type->kind == Type_Slice);
+
+ if (is_c_vararg) {
+ GB_ASSERT(!vari_expand);
+
+ Type *elem_type = slice_type->Slice.elem;
+
+ for (Ast *var_arg : variadic) {
+ lbValue arg = lb_build_expr(p, var_arg);
+ if (is_type_any(elem_type)) {
+ array_add(&args, lb_emit_conv(p, arg, default_type(arg.type)));
+ } else {
+ array_add(&args, lb_emit_conv(p, arg, elem_type));
+ }
+ }
+ break;
+ } else if (vari_expand) {
+ GB_ASSERT(variadic.count == 1);
+ variadic_args = lb_build_expr(p, variadic[0]);
+ variadic_args = lb_emit_conv(p, variadic_args, slice_type);
+ } else {
+ Type *elem_type = slice_type->Slice.elem;
+
+ auto var_args = array_make(heap_allocator(), 0, variadic.count);
+ defer (array_free(&var_args));
+ for (Ast *var_arg : variadic) {
+ lbValue v = lb_build_expr(p, var_arg);
+ lb_add_values_to_array(p, &var_args, v);
+ }
+ isize slice_len = var_args.count;
+ if (slice_len > 0) {
+ lbAddr slice = lb_add_local_generated(p, slice_type, true);
+ lbAddr base_array = lb_add_local_generated(p, alloc_type_array(elem_type, slice_len), true);
+
+ for (isize i = 0; i < var_args.count; i++) {
+ lbValue addr = lb_emit_array_epi(p, base_array.addr, cast(i32)i);
+ lbValue var_arg = var_args[i];
+ var_arg = lb_emit_conv(p, var_arg, elem_type);
+ lb_emit_store(p, addr, var_arg);
+ }
+
+ lbValue base_elem = lb_emit_array_epi(p, base_array.addr, 0);
+ lbValue len = lb_const_int(p->module, t_int, slice_len);
+ lb_fill_slice(p, slice, base_elem, len);
+
+ variadic_args = lb_addr_load(p, slice);
+ }
+ }
+ }
+ array_add(&args, variadic_args);
+
+ break;
+ } else {
+ lbValue value = lb_build_expr(p, ce->split_args->positional[i]);
+ lb_add_values_to_array(p, &args, value);
+ }
}
+
+ if (!is_c_vararg) {
+ array_resize(&args, pt->param_count);
+ }
+
+ for (Ast *arg : ce->split_args->named) {
+ ast_node(fv, FieldValue, arg);
+ GB_ASSERT(fv->field->kind == Ast_Ident);
+ String name = fv->field->Ident.token.string;
+ gb_unused(name);
+ isize param_index = lookup_procedure_parameter(pt, name);
+ GB_ASSERT(param_index >= 0);
+
+ lbValue value = lb_build_expr(p, fv->value);
+ GB_ASSERT(!is_type_tuple(value.type));
+ args[param_index] = value;
+ }
+
TokenPos pos = ast_token(ce->proc).pos;
- TypeTuple *param_tuple = nullptr;
- if (pt->params) {
- GB_ASSERT(pt->params->kind == Type_Tuple);
- param_tuple = &pt->params->Tuple;
- }
-
- for_array(i, ce->args) {
- Ast *arg = ce->args[i];
- TypeAndValue arg_tv = type_and_value_of_expr(arg);
- if (arg_tv.mode == Addressing_Type) {
- args[arg_index++] = lb_const_nil(m, arg_tv.type);
- } else {
- lbValue a = lb_build_expr(p, arg);
- Type *at = a.type;
- if (at->kind == Type_Tuple) {
- lbTupleFix *tf = map_get(&p->tuple_fix_map, a.value);
- if (tf) {
- for_array(j, tf->values) {
- args[arg_index++] = tf->values[j];
- }
- } else {
- for_array(j, at->Tuple.variables) {
- lbValue v = lb_emit_struct_ev(p, a, cast(i32)j);
- args[arg_index++] = v;
- }
- }
- } else {
- args[arg_index++] = a;
- }
- }
- }
-
-
- if (param_count > 0) {
- GB_ASSERT_MSG(pt->params != nullptr, "%s %td", expr_to_string(expr), pt->param_count);
- GB_ASSERT(param_count < 1000000);
-
- if (arg_count < param_count) {
- isize end = cast(isize)param_count;
- if (variadic) {
- end = variadic_index;
- }
- while (arg_index < end) {
- Entity *e = param_tuple->variables[arg_index];
- GB_ASSERT(e->kind == Entity_Variable);
- args[arg_index++] = lb_handle_param_value(p, e->type, e->Variable.param_value, ast_token(expr).pos);
- }
- }
+ if (pt->params != nullptr) {
+ isize min_count = pt->params->Tuple.variables.count;
if (is_c_vararg) {
- GB_ASSERT(variadic);
- GB_ASSERT(!vari_expand);
- isize i = 0;
- for (; i < variadic_index; i++) {
- Entity *e = param_tuple->variables[i];
- if (e->kind == Entity_Variable) {
- args[i] = lb_emit_conv(p, args[i], e->type);
+ min_count -= 1;
+ }
+ GB_ASSERT(args.count >= min_count);
+ for_array(arg_index, pt->params->Tuple.variables) {
+ Entity *e = pt->params->Tuple.variables[arg_index];
+ if (pt->variadic && arg_index == pt->variadic_index) {
+ if (!is_c_vararg && args[arg_index].value == 0) {
+ args[arg_index] = lb_const_nil(p->module, e->type);
}
+ continue;
}
- Type *variadic_type = param_tuple->variables[i]->type;
- GB_ASSERT(is_type_slice(variadic_type));
- variadic_type = base_type(variadic_type)->Slice.elem;
- if (!is_type_any(variadic_type)) {
- for (; i < arg_count; i++) {
- args[i] = lb_emit_conv(p, args[i], variadic_type);
+
+ lbValue arg = args[arg_index];
+ if (arg.value == nullptr) {
+ switch (e->kind) {
+ case Entity_TypeName:
+ args[arg_index] = lb_const_nil(p->module, e->type);
+ break;
+ case Entity_Variable:
+ args[arg_index] = lb_handle_param_value(p, e->type, e->Variable.param_value, pos);
+ break;
+
+ case Entity_Constant:
+ args[arg_index] = lb_const_value(p->module, e->type, e->Constant.value);
+ break;
+ default:
+ GB_PANIC("Unknown entity kind %.*s\n", LIT(entity_strings[e->kind]));
}
} else {
- for (; i < arg_count; i++) {
- args[i] = lb_emit_conv(p, args[i], default_type(args[i].type));
- }
- }
- } else if (variadic) {
- isize i = 0;
- for (; i < variadic_index; i++) {
- Entity *e = param_tuple->variables[i];
- if (e->kind == Entity_Variable) {
- args[i] = lb_emit_conv(p, args[i], e->type);
- }
- }
- if (!vari_expand) {
- Type *variadic_type = param_tuple->variables[i]->type;
- GB_ASSERT(is_type_slice(variadic_type));
- variadic_type = base_type(variadic_type)->Slice.elem;
- for (; i < arg_count; i++) {
- args[i] = lb_emit_conv(p, args[i], variadic_type);
- }
- }
- } else {
- for (isize i = 0; i < param_count; i++) {
- Entity *e = param_tuple->variables[i];
- if (e->kind == Entity_Variable) {
- if (args[i].value == nullptr) {
- continue;
- }
- GB_ASSERT_MSG(args[i].value != nullptr, "%.*s", LIT(e->token.string));
- if (is_type_typeid(e->type) && !is_type_typeid(args[i].type)) {
- GB_ASSERT(LLVMIsNull(args[i].value));
- args[i] = lb_typeid(p->module, args[i].type);
- } else {
- args[i] = lb_emit_conv(p, args[i], e->type);
- }
- }
- }
- }
-
- if (variadic && !vari_expand && !is_c_vararg) {
- // variadic call argument generation
- Type *slice_type = param_tuple->variables[variadic_index]->type;
- Type *elem_type = base_type(slice_type)->Slice.elem;
- lbAddr slice = lb_add_local_generated(p, slice_type, true);
- isize slice_len = arg_count+1 - (variadic_index+1);
-
- if (slice_len > 0) {
- lbAddr base_array = lb_add_local_generated(p, alloc_type_array(elem_type, slice_len), true);
-
- for (isize i = variadic_index, j = 0; i < arg_count; i++, j++) {
- lbValue addr = lb_emit_array_epi(p, base_array.addr, cast(i32)j);
- lb_emit_store(p, addr, args[i]);
- }
-
- lbValue base_elem = lb_emit_array_epi(p, base_array.addr, 0);
- lbValue len = lb_const_int(m, t_int, slice_len);
- lb_fill_slice(p, slice, base_elem, len);
- }
-
- arg_count = param_count;
- args[variadic_index] = lb_addr_load(p, slice);
- }
- }
-
- if (variadic && variadic_index+1 < param_count) {
- for (isize i = variadic_index+1; i < param_count; i++) {
- Entity *e = param_tuple->variables[i];
- args[i] = lb_handle_param_value(p, e->type, e->Variable.param_value, ast_token(expr).pos);
- }
- }
-
- isize final_count = param_count;
- if (is_c_vararg) {
- final_count = arg_count;
- }
-
- if (param_tuple != nullptr) {
- for (isize i = 0; i < gb_min(args.count, param_tuple->variables.count); i++) {
- Entity *e = param_tuple->variables[i];
- if (args[i].type == nullptr) {
- continue;
- } else if (is_type_untyped_uninit(args[i].type)) {
- args[i] = lb_const_undef(m, e->type);
- } else if (is_type_untyped_nil(args[i].type)) {
- args[i] = lb_const_nil(m, e->type);
+ args[arg_index] = lb_emit_conv(p, arg, e->type);
}
}
}
+ isize final_count = is_c_vararg ? args.count : pt->param_count;
auto call_args = array_slice(args, 0, final_count);
return lb_emit_call(p, value, call_args, ce->inlining);
}
diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp
index 275d1f728..60420402a 100644
--- a/src/llvm_backend_stmt.cpp
+++ b/src/llvm_backend_stmt.cpp
@@ -619,6 +619,18 @@ gb_internal void lb_build_range_string(lbProcedure *p, lbValue expr, Type *val_t
}
+gb_internal Ast *lb_strip_and_prefix(Ast *ident) {
+ if (ident != nullptr) {
+ if (ident->kind == Ast_UnaryExpr && ident->UnaryExpr.op.kind == Token_And) {
+ ident = ident->UnaryExpr.expr;
+ }
+ GB_ASSERT(ident->kind == Ast_Ident);
+ }
+ return ident;
+}
+
+
+
gb_internal void lb_build_range_interval(lbProcedure *p, AstBinaryExpr *node,
AstRangeStmt *rs, Scope *scope) {
bool ADD_EXTRA_WRAPPING_CHECK = true;
@@ -627,13 +639,15 @@ gb_internal void lb_build_range_interval(lbProcedure *p, AstBinaryExpr *node,
lb_open_scope(p, scope);
+ Ast *val0 = rs->vals.count > 0 ? lb_strip_and_prefix(rs->vals[0]) : nullptr;
+ Ast *val1 = rs->vals.count > 1 ? lb_strip_and_prefix(rs->vals[1]) : nullptr;
Type *val0_type = nullptr;
Type *val1_type = nullptr;
- if (rs->vals.count > 0 && rs->vals[0] != nullptr && !is_blank_ident(rs->vals[0])) {
- val0_type = type_of_expr(rs->vals[0]);
+ if (val0 != nullptr && !is_blank_ident(val0)) {
+ val0_type = type_of_expr(val0);
}
- if (rs->vals.count > 1 && rs->vals[1] != nullptr && !is_blank_ident(rs->vals[1])) {
- val1_type = type_of_expr(rs->vals[1]);
+ if (val1 != nullptr && !is_blank_ident(val1)) {
+ val1_type = type_of_expr(val1);
}
TokenKind op = Token_Lt;
@@ -649,7 +663,7 @@ gb_internal void lb_build_range_interval(lbProcedure *p, AstBinaryExpr *node,
lbAddr value;
if (val0_type != nullptr) {
- Entity *e = entity_of_node(rs->vals[0]);
+ Entity *e = entity_of_node(val0);
value = lb_add_local(p, val0_type, e, false);
} else {
value = lb_add_local_generated(p, lower.type, false);
@@ -658,7 +672,7 @@ gb_internal void lb_build_range_interval(lbProcedure *p, AstBinaryExpr *node,
lbAddr index;
if (val1_type != nullptr) {
- Entity *e = entity_of_node(rs->vals[1]);
+ Entity *e = entity_of_node(val1);
index = lb_add_local(p, val1_type, e, false);
} else {
index = lb_add_local_generated(p, t_int, false);
@@ -680,8 +694,8 @@ gb_internal void lb_build_range_interval(lbProcedure *p, AstBinaryExpr *node,
lbValue val = lb_addr_load(p, value);
lbValue idx = lb_addr_load(p, index);
- if (val0_type) lb_store_range_stmt_val(p, rs->vals[0], val);
- if (val1_type) lb_store_range_stmt_val(p, rs->vals[1], idx);
+ if (val0_type) lb_store_range_stmt_val(p, val0, val);
+ if (val1_type) lb_store_range_stmt_val(p, val1, idx);
{
// NOTE: this check block will most likely be optimized out, and is here
@@ -815,12 +829,14 @@ gb_internal void lb_build_range_stmt_struct_soa(lbProcedure *p, AstRangeStmt *rs
lb_open_scope(p, scope);
+ Ast *val0 = rs->vals.count > 0 ? lb_strip_and_prefix(rs->vals[0]) : nullptr;
+ Ast *val1 = rs->vals.count > 1 ? lb_strip_and_prefix(rs->vals[1]) : nullptr;
Type *val_types[2] = {};
- if (rs->vals.count > 0 && rs->vals[0] != nullptr && !is_blank_ident(rs->vals[0])) {
- val_types[0] = type_of_expr(rs->vals[0]);
+ if (val0 != nullptr && !is_blank_ident(val0)) {
+ val_types[0] = type_of_expr(val0);
}
- if (rs->vals.count > 1 && rs->vals[1] != nullptr && !is_blank_ident(rs->vals[1])) {
- val_types[1] = type_of_expr(rs->vals[1]);
+ if (val1 != nullptr && !is_blank_ident(val1)) {
+ val_types[1] = type_of_expr(val1);
}
@@ -901,14 +917,14 @@ gb_internal void lb_build_range_stmt_struct_soa(lbProcedure *p, AstRangeStmt *rs
if (val_types[0]) {
- Entity *e = entity_of_node(rs->vals[0]);
+ Entity *e = entity_of_node(val0);
if (e != nullptr) {
lbAddr soa_val = lb_addr_soa_variable(array.addr, lb_addr_load(p, index), nullptr);
map_set(&p->module->soa_values, e, soa_val);
}
}
if (val_types[1]) {
- lb_store_range_stmt_val(p, rs->vals[1], lb_addr_load(p, index));
+ lb_store_range_stmt_val(p, val1, lb_addr_load(p, index));
}
@@ -942,13 +958,15 @@ gb_internal void lb_build_range_stmt(lbProcedure *p, AstRangeStmt *rs, Scope *sc
lb_open_scope(p, scope);
+ Ast *val0 = rs->vals.count > 0 ? lb_strip_and_prefix(rs->vals[0]) : nullptr;
+ Ast *val1 = rs->vals.count > 1 ? lb_strip_and_prefix(rs->vals[1]) : nullptr;
Type *val0_type = nullptr;
Type *val1_type = nullptr;
- if (rs->vals.count > 0 && rs->vals[0] != nullptr && !is_blank_ident(rs->vals[0])) {
- val0_type = type_of_expr(rs->vals[0]);
+ if (val0 != nullptr && !is_blank_ident(val0)) {
+ val0_type = type_of_expr(val0);
}
- if (rs->vals.count > 1 && rs->vals[1] != nullptr && !is_blank_ident(rs->vals[1])) {
- val1_type = type_of_expr(rs->vals[1]);
+ if (val1 != nullptr && !is_blank_ident(val1)) {
+ val1_type = type_of_expr(val1);
}
lbValue val = {};
@@ -1042,11 +1060,11 @@ gb_internal void lb_build_range_stmt(lbProcedure *p, AstRangeStmt *rs, Scope *sc
if (is_map) {
- if (val0_type) lb_store_range_stmt_val(p, rs->vals[0], key);
- if (val1_type) lb_store_range_stmt_val(p, rs->vals[1], val);
+ if (val0_type) lb_store_range_stmt_val(p, val0, key);
+ if (val1_type) lb_store_range_stmt_val(p, val1, val);
} else {
- if (val0_type) lb_store_range_stmt_val(p, rs->vals[0], val);
- if (val1_type) lb_store_range_stmt_val(p, rs->vals[1], key);
+ if (val0_type) lb_store_range_stmt_val(p, val0, val);
+ if (val1_type) lb_store_range_stmt_val(p, val1, key);
}
lb_push_target_list(p, rs->label, done, loop, nullptr);
@@ -1064,21 +1082,23 @@ gb_internal void lb_build_unroll_range_stmt(lbProcedure *p, AstUnrollRangeStmt *
lb_open_scope(p, scope); // Open scope here
+ Ast *val0 = lb_strip_and_prefix(rs->val0);
+ Ast *val1 = lb_strip_and_prefix(rs->val1);
Type *val0_type = nullptr;
Type *val1_type = nullptr;
- if (rs->val0 != nullptr && !is_blank_ident(rs->val0)) {
- val0_type = type_of_expr(rs->val0);
+ if (val0 != nullptr && !is_blank_ident(val0)) {
+ val0_type = type_of_expr(val0);
}
- if (rs->val1 != nullptr && !is_blank_ident(rs->val1)) {
- val1_type = type_of_expr(rs->val1);
+ if (val1 != nullptr && !is_blank_ident(val1)) {
+ val1_type = type_of_expr(val1);
}
if (val0_type != nullptr) {
- Entity *e = entity_of_node(rs->val0);
+ Entity *e = entity_of_node(val0);
lb_add_local(p, e->type, e, true);
}
if (val1_type != nullptr) {
- Entity *e = entity_of_node(rs->val1);
+ Entity *e = entity_of_node(val1);
lb_add_local(p, e->type, e, true);
}
@@ -1092,8 +1112,8 @@ gb_internal void lb_build_unroll_range_stmt(lbProcedure *p, AstUnrollRangeStmt *
lbAddr val0_addr = {};
lbAddr val1_addr = {};
- if (val0_type) val0_addr = lb_build_addr(p, rs->val0);
- if (val1_type) val1_addr = lb_build_addr(p, rs->val1);
+ if (val0_type) val0_addr = lb_build_addr(p, val0);
+ if (val1_type) val1_addr = lb_build_addr(p, val1);
TokenKind op = expr->BinaryExpr.op.kind;
Ast *start_expr = expr->BinaryExpr.left;
@@ -1135,8 +1155,8 @@ gb_internal void lb_build_unroll_range_stmt(lbProcedure *p, AstUnrollRangeStmt *
lbAddr val0_addr = {};
lbAddr val1_addr = {};
- if (val0_type) val0_addr = lb_build_addr(p, rs->val0);
- if (val1_type) val1_addr = lb_build_addr(p, rs->val1);
+ if (val0_type) val0_addr = lb_build_addr(p, val0);
+ if (val1_type) val1_addr = lb_build_addr(p, val1);
for_array(i, bet->Enum.fields) {
Entity *field = bet->Enum.fields[i];
@@ -1149,8 +1169,8 @@ gb_internal void lb_build_unroll_range_stmt(lbProcedure *p, AstUnrollRangeStmt *
} else {
lbAddr val0_addr = {};
lbAddr val1_addr = {};
- if (val0_type) val0_addr = lb_build_addr(p, rs->val0);
- if (val1_type) val1_addr = lb_build_addr(p, rs->val1);
+ if (val0_type) val0_addr = lb_build_addr(p, val0);
+ if (val1_type) val1_addr = lb_build_addr(p, val1);
GB_ASSERT(expr->tav.mode == Addressing_Constant);
@@ -1858,7 +1878,9 @@ gb_internal void lb_build_return_stmt(lbProcedure *p, Slice const &return
} else if (return_count == 1) {
Entity *e = tuple->variables[0];
if (res_count == 0) {
+ rw_mutex_shared_lock(&p->module->values_mutex);
lbValue found = map_must_get(&p->module->values, e);
+ rw_mutex_shared_unlock(&p->module->values_mutex);
res = lb_emit_load(p, found);
} else {
res = lb_build_expr(p, return_results[0]);
@@ -1867,7 +1889,9 @@ gb_internal void lb_build_return_stmt(lbProcedure *p, Slice const &return
if (p->type->Proc.has_named_results) {
// NOTE(bill): store the named values before returning
if (e->token.string != "") {
+ rw_mutex_shared_lock(&p->module->values_mutex);
lbValue found = map_must_get(&p->module->values, e);
+ rw_mutex_shared_unlock(&p->module->values_mutex);
lb_emit_store(p, found, lb_emit_conv(p, res, e->type));
}
}
@@ -1883,7 +1907,9 @@ gb_internal void lb_build_return_stmt(lbProcedure *p, Slice const &return
} else {
for (isize res_index = 0; res_index < return_count; res_index++) {
Entity *e = tuple->variables[res_index];
+ rw_mutex_shared_lock(&p->module->values_mutex);
lbValue found = map_must_get(&p->module->values, e);
+ rw_mutex_shared_unlock(&p->module->values_mutex);
lbValue res = lb_emit_load(p, found);
array_add(&results, res);
}
@@ -1905,7 +1931,9 @@ gb_internal void lb_build_return_stmt(lbProcedure *p, Slice const &return
if (e->token.string == "") {
continue;
}
+ rw_mutex_shared_lock(&p->module->values_mutex);
named_results[i] = map_must_get(&p->module->values, e);
+ rw_mutex_shared_unlock(&p->module->values_mutex);
values[i] = lb_emit_conv(p, results[i], e->type);
}
@@ -2463,6 +2491,7 @@ gb_internal void lb_build_stmt(lbProcedure *p, Ast *node) {
lb_add_entity(p->module, e, val);
lb_add_debug_local_variable(p, val.value, e->type, e->token);
lvals_preused[lval_index] = true;
+ lvals[lval_index] = *comp_lit_addr;
}
}
}
diff --git a/src/llvm_backend_type.cpp b/src/llvm_backend_type.cpp
index 1e26fd6bd..4716733cc 100644
--- a/src/llvm_backend_type.cpp
+++ b/src/llvm_backend_type.cpp
@@ -15,7 +15,7 @@ gb_internal isize lb_type_info_index(CheckerInfo *info, Type *type, bool err_on_
}
gb_internal lbValue lb_typeid(lbModule *m, Type *type) {
- GB_ASSERT(!build_context.disallow_rtti);
+ GB_ASSERT(!build_context.no_rtti);
type = default_type(type);
@@ -92,7 +92,7 @@ gb_internal lbValue lb_typeid(lbModule *m, Type *type) {
}
gb_internal lbValue lb_type_info(lbModule *m, Type *type) {
- GB_ASSERT(!build_context.disallow_rtti);
+ GB_ASSERT(!build_context.no_rtti);
type = default_type(type);
@@ -141,7 +141,7 @@ gb_internal lbValue lb_type_info_member_tags_offset(lbProcedure *p, isize count)
gb_internal void lb_setup_type_info_data(lbProcedure *p) { // NOTE(bill): Setup type_info data
- if (build_context.disallow_rtti) {
+ if (build_context.no_rtti) {
return;
}
@@ -654,10 +654,9 @@ gb_internal void lb_setup_type_info_data(lbProcedure *p) { // NOTE(bill): Setup
lbValue count = lb_const_int(m, t_int, variant_count);
vals[0] = llvm_const_slice(m, memory_types, count);
- i64 tag_size = union_tag_size(t);
- i64 tag_offset = align_formula(t->Union.variant_block_size, tag_size);
-
+ i64 tag_size = union_tag_size(t);
if (tag_size > 0) {
+ i64 tag_offset = align_formula(t->Union.variant_block_size, tag_size);
vals[1] = lb_const_int(m, t_uintptr, tag_offset).value;
vals[2] = lb_type_info(m, union_tag_type(t)).value;
} else {
diff --git a/src/llvm_backend_utility.cpp b/src/llvm_backend_utility.cpp
index 0c26382ed..2ecad1703 100644
--- a/src/llvm_backend_utility.cpp
+++ b/src/llvm_backend_utility.cpp
@@ -721,7 +721,7 @@ gb_internal lbValue lb_emit_union_cast(lbProcedure *p, lbValue value, Type *type
Type *dst_type = tuple->Tuple.variables[0]->type;
isize arg_count = 7;
- if (build_context.disallow_rtti) {
+ if (build_context.no_rtti) {
arg_count = 4;
}
@@ -733,7 +733,7 @@ gb_internal lbValue lb_emit_union_cast(lbProcedure *p, lbValue value, Type *type
args[2] = lb_const_int(m, t_i32, pos.line);
args[3] = lb_const_int(m, t_i32, pos.column);
- if (!build_context.disallow_rtti) {
+ if (!build_context.no_rtti) {
args[4] = lb_typeid(m, src_type);
args[5] = lb_typeid(m, dst_type);
args[6] = lb_emit_conv(p, value_, t_rawptr);
@@ -797,7 +797,7 @@ gb_internal lbAddr lb_emit_any_cast_addr(lbProcedure *p, lbValue value, Type *ty
lbValue ok = lb_emit_load(p, lb_emit_struct_ep(p, v.addr, 1));
isize arg_count = 7;
- if (build_context.disallow_rtti) {
+ if (build_context.no_rtti) {
arg_count = 4;
}
auto args = array_make(permanent_allocator(), arg_count);
@@ -807,7 +807,7 @@ gb_internal lbAddr lb_emit_any_cast_addr(lbProcedure *p, lbValue value, Type *ty
args[2] = lb_const_int(m, t_i32, pos.line);
args[3] = lb_const_int(m, t_i32, pos.column);
- if (!build_context.disallow_rtti) {
+ if (!build_context.no_rtti) {
args[4] = any_typeid;
args[5] = dst_typeid;
args[6] = lb_emit_struct_ev(p, value, 0);
diff --git a/src/main.cpp b/src/main.cpp
index 162cd309e..db2702b19 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -655,7 +655,6 @@ enum BuildFlagKind {
BuildFlag_ShowDebugMessages,
BuildFlag_Vet,
BuildFlag_VetExtra,
- BuildFlag_UseLLVMApi,
BuildFlag_IgnoreUnknownAttributes,
BuildFlag_ExtraLinkerFlags,
BuildFlag_ExtraAssemblerFlags,
@@ -671,11 +670,10 @@ enum BuildFlagKind {
BuildFlag_DisallowDo,
BuildFlag_DefaultToNilAllocator,
- BuildFlag_InsertSemicolon,
BuildFlag_StrictStyle,
BuildFlag_StrictStyleInitOnly,
BuildFlag_ForeignErrorProcedures,
- BuildFlag_DisallowRTTI,
+ BuildFlag_NoRTTI,
BuildFlag_DynamicMapCalls,
BuildFlag_Compact,
@@ -834,7 +832,6 @@ gb_internal bool parse_build_flags(Array args) {
add_flag(&build_flags, BuildFlag_ShowDebugMessages, str_lit("show-debug-messages"), BuildFlagParam_None, Command_all);
add_flag(&build_flags, BuildFlag_Vet, str_lit("vet"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_VetExtra, str_lit("vet-extra"), BuildFlagParam_None, Command__does_check);
- add_flag(&build_flags, BuildFlag_UseLLVMApi, str_lit("llvm-api"), BuildFlagParam_None, Command__does_build);
add_flag(&build_flags, BuildFlag_IgnoreUnknownAttributes, str_lit("ignore-unknown-attributes"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_ExtraLinkerFlags, str_lit("extra-linker-flags"), BuildFlagParam_String, Command__does_build);
add_flag(&build_flags, BuildFlag_ExtraAssemblerFlags, str_lit("extra-assembler-flags"), BuildFlagParam_String, Command__does_build);
@@ -849,12 +846,12 @@ gb_internal bool parse_build_flags(Array args) {
add_flag(&build_flags, BuildFlag_DisallowDo, str_lit("disallow-do"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_DefaultToNilAllocator, str_lit("default-to-nil-allocator"), BuildFlagParam_None, Command__does_check);
- add_flag(&build_flags, BuildFlag_InsertSemicolon, str_lit("insert-semicolon"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_StrictStyle, str_lit("strict-style"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_StrictStyleInitOnly, str_lit("strict-style-init-only"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_ForeignErrorProcedures, str_lit("foreign-error-procedures"), BuildFlagParam_None, Command__does_check);
- add_flag(&build_flags, BuildFlag_DisallowRTTI, str_lit("disallow-rtti"), BuildFlagParam_None, Command__does_check);
+ add_flag(&build_flags, BuildFlag_NoRTTI, str_lit("no-rtti"), BuildFlagParam_None, Command__does_check);
+ add_flag(&build_flags, BuildFlag_NoRTTI, str_lit("disallow-rtti"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_DynamicMapCalls, str_lit("dynamic-map-calls"), BuildFlagParam_None, Command__does_check);
@@ -1372,11 +1369,6 @@ gb_internal bool parse_build_flags(Array args) {
build_context.vet_extra = true;
break;
}
- case BuildFlag_UseLLVMApi: {
- gb_printf_err("-llvm-api flag is not required any more\n");
- bad_flags = true;
- break;
- }
case BuildFlag_IgnoreUnknownAttributes:
build_context.ignore_unknown_attributes = true;
break;
@@ -1448,8 +1440,12 @@ gb_internal bool parse_build_flags(Array args) {
case BuildFlag_DisallowDo:
build_context.disallow_do = true;
break;
- case BuildFlag_DisallowRTTI:
- build_context.disallow_rtti = true;
+ case BuildFlag_NoRTTI:
+ if (name == "disallow-rtti") {
+ gb_printf_err("'-disallow-rtti' has been replaced with '-no-rtti'\n");
+ bad_flags = true;
+ }
+ build_context.no_rtti = true;
break;
case BuildFlag_DynamicMapCalls:
build_context.dynamic_map_calls = true;
@@ -1460,11 +1456,6 @@ gb_internal bool parse_build_flags(Array args) {
case BuildFlag_ForeignErrorProcedures:
build_context.ODIN_FOREIGN_ERROR_PROCEDURES = true;
break;
- case BuildFlag_InsertSemicolon: {
- gb_printf_err("-insert-semicolon flag is not required any more\n");
- bad_flags = true;
- break;
- }
case BuildFlag_StrictStyle: {
if (build_context.strict_style_init_only) {
gb_printf_err("-strict-style and -strict-style-init-only cannot be used together\n");
@@ -2558,6 +2549,22 @@ gb_internal int strip_semicolons(Parser *parser) {
gb_internal void init_terminal(void) {
build_context.has_ansi_terminal_colours = false;
+
+ gbAllocator a = heap_allocator();
+
+ char const *no_color = gb_get_env("NO_COLOR", a);
+ defer (gb_free(a, cast(void *)no_color));
+ if (no_color != nullptr) {
+ return;
+ }
+
+ char const *force_color = gb_get_env("FORCE_COLOR", a);
+ defer (gb_free(a, cast(void *)force_color));
+ if (force_color != nullptr) {
+ build_context.has_ansi_terminal_colours = true;
+ return;
+ }
+
#if defined(GB_SYSTEM_WINDOWS)
HANDLE hnd = GetStdHandle(STD_ERROR_HANDLE);
DWORD mode = 0;
@@ -2567,10 +2574,16 @@ gb_internal void init_terminal(void) {
build_context.has_ansi_terminal_colours = true;
}
}
+#elif defined(GB_SYSTEM_OSX) || defined(GB_SYSTEM_UNIX)
+ char const *term_ = gb_get_env("TERM", a);
+ defer (gb_free(a, cast(void *)term_));
+ String term = make_string_c(term_);
+ if (!str_eq(term, str_lit("dumb")) && isatty(STDERR_FILENO)) {
+ build_context.has_ansi_terminal_colours = true;
+ }
#endif
if (!build_context.has_ansi_terminal_colours) {
- gbAllocator a = heap_allocator();
char const *odin_terminal_ = gb_get_env("ODIN_TERMINAL", a);
defer (gb_free(a, cast(void *)odin_terminal_));
String odin_terminal = make_string_c(odin_terminal_);
diff --git a/src/parser.cpp b/src/parser.cpp
index 883342b21..b756412ff 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -2752,9 +2752,9 @@ gb_internal Ast *parse_call_expr(AstFile *f, Ast *operand) {
open_paren = expect_token(f, Token_OpenParen);
+ bool seen_ellipsis = false;
while (f->curr_token.kind != Token_CloseParen &&
- f->curr_token.kind != Token_EOF &&
- ellipsis.pos.line == 0) {
+ f->curr_token.kind != Token_EOF) {
if (f->curr_token.kind == Token_Comma) {
syntax_error(f->curr_token, "Expected an expression not ,");
} else if (f->curr_token.kind == Token_Eq) {
@@ -2777,11 +2777,15 @@ gb_internal Ast *parse_call_expr(AstFile *f, Ast *operand) {
Ast *value = parse_value(f);
arg = ast_field_value(f, arg, value, eq);
-
-
+ } else if (seen_ellipsis) {
+ syntax_error(arg, "Positional arguments are not allowed after '..'");
}
array_add(&args, arg);
+ if (ellipsis.pos.line != 0) {
+ seen_ellipsis = true;
+ }
+
if (!allow_field_separator(f)) {
break;
}
diff --git a/src/parser.hpp b/src/parser.hpp
index 6ba4ef6d6..900fddbab 100644
--- a/src/parser.hpp
+++ b/src/parser.hpp
@@ -367,6 +367,11 @@ gb_global char const *union_type_kind_strings[UnionType_COUNT] = {
"#shared_nil",
};
+struct AstSplitArgs {
+ Slice positional;
+ Slice named;
+};
+
#define AST_KINDS \
AST_KIND(Ident, "identifier", struct { \
Token token; \
@@ -442,6 +447,7 @@ AST_KIND(_ExprBegin, "", bool) \
ProcInlining inlining; \
bool optional_ok_one; \
bool was_selector; \
+ AstSplitArgs *split_args; \
}) \
AST_KIND(FieldValue, "field value", struct { Token eq; Ast *field, *value; }) \
AST_KIND(EnumFieldValue, "enum field value", struct { \
diff --git a/src/parser_pos.cpp b/src/parser_pos.cpp
index 52d49e897..3d2e8f27d 100644
--- a/src/parser_pos.cpp
+++ b/src/parser_pos.cpp
@@ -37,11 +37,15 @@ gb_internal Token ast_token(Ast *node) {
return ast_token(node->ImplicitSelectorExpr.selector);
}
return node->ImplicitSelectorExpr.token;
- case Ast_IndexExpr: return node->IndexExpr.open;
- case Ast_MatrixIndexExpr: return node->MatrixIndexExpr.open;
- case Ast_SliceExpr: return node->SliceExpr.open;
+ case Ast_IndexExpr: return ast_token(node->IndexExpr.expr);
+ case Ast_MatrixIndexExpr: return ast_token(node->MatrixIndexExpr.expr);
+ case Ast_SliceExpr: return ast_token(node->SliceExpr.expr);
case Ast_Ellipsis: return node->Ellipsis.token;
- case Ast_FieldValue: return node->FieldValue.eq;
+ case Ast_FieldValue:
+ if (node->FieldValue.field) {
+ return ast_token(node->FieldValue.field);
+ }
+ return node->FieldValue.eq;
case Ast_EnumFieldValue: return ast_token(node->EnumFieldValue.name);
case Ast_DerefExpr: return node->DerefExpr.op;
case Ast_TernaryIfExpr: return ast_token(node->TernaryIfExpr.x);
diff --git a/src/types.cpp b/src/types.cpp
index 3cc077f84..847aea9f3 100644
--- a/src/types.cpp
+++ b/src/types.cpp
@@ -2108,8 +2108,12 @@ gb_internal bool is_type_polymorphic(Type *t, bool or_specialized=false) {
return is_type_polymorphic(t->Matrix.elem, or_specialized);
case Type_Tuple:
- for_array(i, t->Tuple.variables) {
- if (is_type_polymorphic(t->Tuple.variables[i]->type, or_specialized)) {
+ for (Entity *e : t->Tuple.variables) {
+ if (e->kind == Entity_Constant) {
+ if (e->Constant.value.kind != ExactValue_Invalid) {
+ return or_specialized;
+ }
+ } else if (is_type_polymorphic(e->type, or_specialized)) {
return true;
}
}
@@ -2119,7 +2123,6 @@ gb_internal bool is_type_polymorphic(Type *t, bool or_specialized=false) {
if (t->Proc.is_polymorphic) {
return true;
}
- #if 1
if (t->Proc.param_count > 0 &&
is_type_polymorphic(t->Proc.params, or_specialized)) {
return true;
@@ -2128,7 +2131,6 @@ gb_internal bool is_type_polymorphic(Type *t, bool or_specialized=false) {
is_type_polymorphic(t->Proc.results, or_specialized)) {
return true;
}
- #endif
break;
case Type_Enum:
@@ -3079,7 +3081,7 @@ gb_internal Selection lookup_field_with_selection(Type *type_, String field_name
mutex_lock(md->mutex);
defer (mutex_unlock(md->mutex));
for (TypeNameObjCMetadataEntry const &entry : md->value_entries) {
- GB_ASSERT(entry.entity->kind == Entity_Procedure);
+ GB_ASSERT(entry.entity->kind == Entity_Procedure || entry.entity->kind == Entity_ProcGroup);
if (entry.name == field_name) {
sel.entity = entry.entity;
sel.pseudo_field = true;
@@ -3326,6 +3328,9 @@ gb_internal bool are_struct_fields_reordered(Type *type) {
type = base_type(type);
GB_ASSERT(type->kind == Type_Struct);
type_set_offsets(type);
+ if (type->Struct.fields.count == 0) {
+ return false;
+ }
GB_ASSERT(type->Struct.offsets != nullptr);
i64 prev_offset = 0;
@@ -3344,6 +3349,9 @@ gb_internal Slice struct_fields_index_by_increasing_offset(gbAllocator allo
type = base_type(type);
GB_ASSERT(type->kind == Type_Struct);
type_set_offsets(type);
+ if (type->Struct.fields.count == 0) {
+ return {};
+ }
GB_ASSERT(type->Struct.offsets != nullptr);
auto indices = slice_make(allocator, type->Struct.fields.count);
@@ -4273,6 +4281,10 @@ gb_internal gbString write_type_to_string(gbString str, Type *type, bool shortha
if (var == nullptr) {
continue;
}
+ if (comma_index++ > 0) {
+ str = gb_string_appendc(str, ", ");
+ }
+
String name = var->token.string;
if (var->kind == Entity_Constant) {
str = gb_string_appendc(str, "$");
@@ -4289,10 +4301,6 @@ gb_internal gbString write_type_to_string(gbString str, Type *type, bool shortha
continue;
}
- if (comma_index++ > 0) {
- str = gb_string_appendc(str, ", ");
- }
-
if (var->kind == Entity_Variable) {
if (var->flags&EntityFlag_CVarArg) {
str = gb_string_appendc(str, "#c_vararg ");
diff --git a/tests/documentation/documentation_tester.odin b/tests/documentation/documentation_tester.odin
index a4d18d1eb..1f0f8ca97 100644
--- a/tests/documentation/documentation_tester.odin
+++ b/tests/documentation/documentation_tester.odin
@@ -238,10 +238,10 @@ find_and_add_examples :: proc(docs: string, package_name: string, entity_name: s
}
}
// Remove first layer of tabs which are always present
- for line in &example_block.lines {
+ for &line in example_block.lines {
line = strings.trim_prefix(line, "\t")
}
- for line in &output_block.lines {
+ for &line in output_block.lines {
line = strings.trim_prefix(line, "\t")
}
append(&g_examples_to_verify, Example_Test {
diff --git a/tests/issues/run.bat b/tests/issues/run.bat
index 87492bc29..63d722e09 100644
--- a/tests/issues/run.bat
+++ b/tests/issues/run.bat
@@ -12,6 +12,9 @@ set COMMON=-collection:tests=..\..
..\..\..\odin test ..\test_issue_2056.odin %COMMON% -file || exit /b
..\..\..\odin test ..\test_issue_2087.odin %COMMON% -file || exit /b
..\..\..\odin build ..\test_issue_2113.odin %COMMON% -file -debug || exit /b
+..\..\..\odin test ..\test_issue_2466.odin %COMMON% -file || exit /b
+..\..\..\odin test ..\test_issue_2615.odin %COMMON% -file || exit /b
+..\..\..\odin test ..\test_issue_2637.odin %COMMON% -file || exit /b
@echo off
diff --git a/tests/issues/run.sh b/tests/issues/run.sh
index f894f2dae..7d2101dc6 100755
--- a/tests/issues/run.sh
+++ b/tests/issues/run.sh
@@ -6,6 +6,8 @@ pushd build
ODIN=../../../odin
COMMON="-collection:tests=../.."
+NO_NIL_ERR="Error: "
+
set -x
$ODIN test ../test_issue_829.odin $COMMON -file
@@ -13,6 +15,14 @@ $ODIN test ../test_issue_1592.odin $COMMON -file
$ODIN test ../test_issue_2056.odin $COMMON -file
$ODIN test ../test_issue_2087.odin $COMMON -file
$ODIN build ../test_issue_2113.odin $COMMON -file -debug
+$ODIN test ../test_issue_2466.odin $COMMON -file
+$ODIN test ../test_issue_2615.odin $COMMON -file
+$ODIN test ../test_issue_2637.odin $COMMON -file
+if [[ $($ODIN build ../test_issue_2395.odin $COMMON -file 2>&1 >/dev/null | grep -c "$NO_NIL_ERR") -eq 2 ]] ; then
+ echo "SUCCESSFUL 1/1"
+else
+ echo "SUCCESSFUL 0/1"
+fi
set +x
diff --git a/tests/issues/test_issue_2395.odin b/tests/issues/test_issue_2395.odin
new file mode 100644
index 000000000..48e1ee516
--- /dev/null
+++ b/tests/issues/test_issue_2395.odin
@@ -0,0 +1,29 @@
+// Tests issue #2395 https://github.com/odin-lang/Odin/issues/2395
+
+// Ensures that we no longer raise the faulty error for #no_nil unions when
+// then are 2 variants with the polymorphic type. Also ensure that we raise
+// exactly 2 errors from the invalid unions
+package test_issues
+
+import "core:testing"
+
+ValidUnion :: union($T: typeid) #no_nil {
+ T,
+ f32,
+}
+
+OtherValidUnion :: union($T: typeid, $S: typeid) #no_nil {
+ T,
+ S,
+}
+
+InvalidUnion :: union($T: typeid) #no_nil {
+ T,
+}
+
+OtherInvalidUnion :: union($T: typeid) #no_nil {
+ u8,
+}
+
+main :: proc() {
+}
diff --git a/tests/issues/test_issue_2466.odin b/tests/issues/test_issue_2466.odin
new file mode 100644
index 000000000..4810cfea9
--- /dev/null
+++ b/tests/issues/test_issue_2466.odin
@@ -0,0 +1,22 @@
+// Tests issue #2466 https://github.com/odin-lang/Odin/issues/2466
+package test_issues
+
+import "core:fmt"
+import "core:testing"
+
+Bug :: struct {
+ val: int,
+ arr: []int,
+}
+
+@test
+test_compound_literal_local_reuse :: proc(t: ^testing.T) {
+ v: int = 123
+ bug := Bug {
+ val = v,
+ arr = {42},
+ }
+ testing.expect(t, bug.val == 123, fmt.tprintf("expected 123, found %d", bug.val))
+ testing.expect(t, bug.arr[0] == 42, fmt.tprintf("expected 42, found %d", bug.arr[0]))
+}
+
diff --git a/tests/issues/test_issue_2615.odin b/tests/issues/test_issue_2615.odin
new file mode 100644
index 000000000..229e5c35b
--- /dev/null
+++ b/tests/issues/test_issue_2615.odin
@@ -0,0 +1,19 @@
+// Tests issue https://github.com/odin-lang/Odin/issues/2615
+// Cannot iterate over string literals
+package test_issues
+
+import "core:testing"
+
+@(test)
+test_cannot_iterate_over_string_literal :: proc(t: ^testing.T) {
+ for c, i in "foäø" {
+ switch i {
+ case 0:
+ testing.expect_value(t, c, 'f')
+ case 1:
+ testing.expect_value(t, c, 'o')
+ case 2:
+ testing.expect_value(t, c, 'äø')
+ }
+ }
+}
diff --git a/tests/issues/test_issue_2637.odin b/tests/issues/test_issue_2637.odin
new file mode 100644
index 000000000..c170fc444
--- /dev/null
+++ b/tests/issues/test_issue_2637.odin
@@ -0,0 +1,13 @@
+// Tests issue #2637 https://github.com/odin-lang/Odin/issues/2637
+package test_issues
+
+import "core:testing"
+
+Foo :: Maybe(string)
+
+@(test)
+test_expect_value_succeeds_with_nil :: proc(t: ^testing.T) {
+ x: Foo
+ testing.expect(t, x == nil) // Succeeds
+ testing.expect_value(t, x, nil) // Fails, "expected nil, got nil"
+}
diff --git a/vendor/README.md b/vendor/README.md
index 03f4ef97c..4e44d0970 100644
--- a/vendor/README.md
+++ b/vendor/README.md
@@ -6,9 +6,9 @@ Its use is similar to that of `core:` packages, which would be available in any
Presently, the `vendor:` collection comprises the following packages:
-## microui
+## microui (Port)
-A tiny, portable, immediate-mode UI library written in Odin. (Ported from [rxi/microui](https://github.com/rxi/microui).)
+A tiny, portable, immediate-mode UI library written in Odin. [rxi/microui](https://github.com/rxi/microui)
This package is available under the MIT license. See `LICENSE` for more details.
@@ -158,4 +158,12 @@ Includes full bindings.
Used in: [bgfx](https://github.com/bkaradzic/bgfx), [Filament](https://github.com/google/filament), [gltfpack](https://github.com/zeux/meshoptimizer/tree/master/gltf), [raylib](https://github.com/raysan5/raylib), [Unigine](https://developer.unigine.com/en/docs/2.14.1/third_party?rlang=cpp#cgltf), and more!
-See also LICENCE in `cgltf` directory itself.
+Se also LICENCE in `cgltf` directory itself.
+
+## nanovg (Port)
+
+[NanoVG](https://github.com/memononen/nanovg) is a small antialiased vector graphics rendering library for OpenGL. It has lean API modeled after HTML5 canvas API. It is aimed to be a practical and fun toolset for building scalable user interfaces and visualizations.
+
+## fontstash (Port)
+
+[Font stash](https://github.com/memononen/fontstash) is a light-weight online font texture atlas builder. It uses stb_truetype to render fonts on demand to a texture atlas.
diff --git a/vendor/botan/blake2b/blake2b.odin b/vendor/botan/blake2b/blake2b.odin
index 18fd89bd8..6cc828caf 100644
--- a/vendor/botan/blake2b/blake2b.odin
+++ b/vendor/botan/blake2b/blake2b.odin
@@ -69,7 +69,7 @@ hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
diff --git a/vendor/botan/gost/gost.odin b/vendor/botan/gost/gost.odin
index bccc4d463..5b3db31fe 100644
--- a/vendor/botan/gost/gost.odin
+++ b/vendor/botan/gost/gost.odin
@@ -69,7 +69,7 @@ hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
diff --git a/vendor/botan/keccak/keccak.odin b/vendor/botan/keccak/keccak.odin
index 4c82edc92..c08eaf598 100644
--- a/vendor/botan/keccak/keccak.odin
+++ b/vendor/botan/keccak/keccak.odin
@@ -69,7 +69,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
diff --git a/vendor/botan/md4/md4.odin b/vendor/botan/md4/md4.odin
index ddb7d5940..02c33dde9 100644
--- a/vendor/botan/md4/md4.odin
+++ b/vendor/botan/md4/md4.odin
@@ -69,7 +69,7 @@ hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
diff --git a/vendor/botan/md5/md5.odin b/vendor/botan/md5/md5.odin
index 9ea489669..9aaf96d27 100644
--- a/vendor/botan/md5/md5.odin
+++ b/vendor/botan/md5/md5.odin
@@ -69,7 +69,7 @@ hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
diff --git a/vendor/botan/ripemd/ripemd.odin b/vendor/botan/ripemd/ripemd.odin
index 33f0ba692..ddb549350 100644
--- a/vendor/botan/ripemd/ripemd.odin
+++ b/vendor/botan/ripemd/ripemd.odin
@@ -69,7 +69,7 @@ hash_stream_160 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
diff --git a/vendor/botan/sha1/sha1.odin b/vendor/botan/sha1/sha1.odin
index 96520f09e..c39a41d0a 100644
--- a/vendor/botan/sha1/sha1.odin
+++ b/vendor/botan/sha1/sha1.odin
@@ -69,7 +69,7 @@ hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
diff --git a/vendor/botan/sha2/sha2.odin b/vendor/botan/sha2/sha2.odin
index d583298ee..4ce001a75 100644
--- a/vendor/botan/sha2/sha2.odin
+++ b/vendor/botan/sha2/sha2.odin
@@ -72,7 +72,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
@@ -151,7 +151,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
@@ -230,7 +230,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
@@ -309,7 +309,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
diff --git a/vendor/botan/sha3/sha3.odin b/vendor/botan/sha3/sha3.odin
index 5f82be49c..5dcb008ce 100644
--- a/vendor/botan/sha3/sha3.odin
+++ b/vendor/botan/sha3/sha3.odin
@@ -72,7 +72,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
@@ -151,7 +151,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
@@ -230,7 +230,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
@@ -309,7 +309,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
diff --git a/vendor/botan/shake/shake.odin b/vendor/botan/shake/shake.odin
index b973fee24..af577f316 100644
--- a/vendor/botan/shake/shake.odin
+++ b/vendor/botan/shake/shake.odin
@@ -70,7 +70,7 @@ hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
@@ -149,7 +149,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
diff --git a/vendor/botan/skein512/skein512.odin b/vendor/botan/skein512/skein512.odin
index 41ffaefff..47529bc44 100644
--- a/vendor/botan/skein512/skein512.odin
+++ b/vendor/botan/skein512/skein512.odin
@@ -72,7 +72,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
@@ -151,7 +151,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
@@ -230,7 +230,7 @@ hash_stream_slice :: proc(s: io.Stream, bit_size: int, allocator := context.allo
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
diff --git a/vendor/botan/sm3/sm3.odin b/vendor/botan/sm3/sm3.odin
index 52fe9a488..dd6da9e63 100644
--- a/vendor/botan/sm3/sm3.odin
+++ b/vendor/botan/sm3/sm3.odin
@@ -69,7 +69,7 @@ hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
diff --git a/vendor/botan/streebog/streebog.odin b/vendor/botan/streebog/streebog.odin
index fdc07923f..07c39684a 100644
--- a/vendor/botan/streebog/streebog.odin
+++ b/vendor/botan/streebog/streebog.odin
@@ -70,7 +70,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
@@ -149,7 +149,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
diff --git a/vendor/botan/tiger/tiger.odin b/vendor/botan/tiger/tiger.odin
index 3d7e064d0..960d4694b 100644
--- a/vendor/botan/tiger/tiger.odin
+++ b/vendor/botan/tiger/tiger.odin
@@ -71,7 +71,7 @@ hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
@@ -150,7 +150,7 @@ hash_stream_160 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
@@ -229,7 +229,7 @@ hash_stream_192 :: proc(s: io.Stream) -> ([DIGEST_SIZE_192]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
diff --git a/vendor/botan/whirlpool/whirlpool.odin b/vendor/botan/whirlpool/whirlpool.odin
index c32ff20c0..76d4d25d4 100644
--- a/vendor/botan/whirlpool/whirlpool.odin
+++ b/vendor/botan/whirlpool/whirlpool.odin
@@ -69,7 +69,7 @@ hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
defer delete(buf)
i := 1
for i > 0 {
- i, _ = s->impl_read(buf)
+ i, _ = io.read(s, buf)
if i > 0 {
botan.hash_update(ctx, len(buf) == 0 ? nil : &buf[0], uint(i))
}
diff --git a/vendor/darwin/Metal/MetalClasses.odin b/vendor/darwin/Metal/MetalClasses.odin
index b10959c2b..17f22e1d3 100644
--- a/vendor/darwin/Metal/MetalClasses.odin
+++ b/vendor/darwin/Metal/MetalClasses.odin
@@ -6,23 +6,7 @@ _ :: mem
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- AccelerationStructureBoundingBoxGeometryDescriptor
-Class Methods:
- alloc
- descriptor
-Methods:
- init
- boundingBoxBuffer
- boundingBoxBufferOffset
- boundingBoxCount
- boundingBoxStride
- setBoundingBoxBuffer
- setBoundingBoxBufferOffset
- setBoundingBoxCount
- setBoundingBoxStride
-*/
+
@(objc_class="MTLAccelerationStructureBoundingBoxGeometryDescriptor")
AccelerationStructureBoundingBoxGeometryDescriptor :: struct { using _: NS.Copying(AccelerationStructureBoundingBoxGeometryDescriptor), using _: AccelerationStructureDescriptor }
@@ -73,19 +57,6 @@ AccelerationStructureBoundingBoxGeometryDescriptor_setBoundingBoxStride :: #forc
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- MotionKeyframeData
-Class Methods:
- alloc
- data
-Methods:
- init
- buffer
- setBuffer
- offset
- setOffset
-*/
@(objc_class="MTLMotionKeyframeData")
MotionKeyframeData :: struct { using _: NS.Object }
@@ -121,10 +92,7 @@ MotionKeyframeData_setOffset :: #force_inline proc "c" (self: ^MotionKeyframeDat
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- AccelerationStructureMotionTriangleGeometryDescriptor
-*/
+
@(objc_class="MTLAccelerationStructureMotionTriangleGeometryDescriptor")
AccelerationStructureMotionTriangleGeometryDescriptor :: struct { using _: NS.Copying(AccelerationStructureMotionTriangleGeometryDescriptor), using _: AccelerationStructureGeometryDescriptor }
@@ -222,10 +190,7 @@ AccelerationStructureMotionTriangleGeometryDescriptor_setTransformationMatrixBuf
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- AccelerationStructureMotionBoundingBoxGeometryDescriptor
-*/
+
@(objc_class="MTLAccelerationStructureMotionBoundingBoxGeometryDescriptor")
AccelerationStructureMotionBoundingBoxGeometryDescriptor :: struct { using _: NS.Copying(AccelerationStructureMotionBoundingBoxGeometryDescriptor), using _: AccelerationStructureGeometryDescriptor }
@@ -279,16 +244,7 @@ AccelerationStructureMotionBoundingBoxGeometryDescriptor_setBoundingBoxCount ::
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- AccelerationStructureDescriptor
-Class Methods:
- alloc
-Methods:
- init
- setUsage
- usage
-*/
+
@(objc_class="MTLAccelerationStructureDescriptor")
AccelerationStructureDescriptor :: struct { using _: NS.Copying(AccelerationStructureDescriptor) }
@@ -311,28 +267,7 @@ AccelerationStructureDescriptor_usage :: #force_inline proc "c" (self: ^Accelera
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- AccelerationStructureGeometryDescriptor
-Class Methods:
- alloc
-Methods:
- init
- allowDuplicateIntersectionFunctionInvocation
- intersectionFunctionTableOffset
- opaque
- setAllowDuplicateIntersectionFunctionInvocation
- setIntersectionFunctionTableOffset
- setOpaque
- primitiveDataBuffer
- setPrimitiveDataBuffer
- primitiveDataBufferOffset
- setPrimitiveDataBufferOffset
- primitiveDataStride
- setPrimitiveDataStride
- primitiveDataElementSize
- setPrimitiveDataElementSize
-*/
+
@(objc_class="MTLAccelerationStructureGeometryDescriptor")
AccelerationStructureGeometryDescriptor :: struct { using _: NS.Copying(AccelerationStructureGeometryDescriptor) }
@@ -405,35 +340,7 @@ AccelerationStructureGeometryDescriptor_setPrimitiveDataElementSize :: #force_in
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- AccelerationStructureTriangleGeometryDescriptor
-Class Methods:
- alloc
- descriptor
-Methods:
- init
- indexBuffer
- indexBufferOffset
- indexType
- setIndexBuffer
- setIndexBufferOffset
- setIndexType
- setTriangleCount
- setVertexBuffer
- setVertexBufferOffset
- setVertexStride
- triangleCount
- vertexBuffer
- vertexBufferOffset
- vertexStride
- vertexFormat
- setVertexFormat
- transformationMatrixBuffer
- setTransformationMatrixBuffer
- transformationMatrixBufferOffset
- setTransformationMatrixBufferOffset
-*/
+
@(objc_class="MTLAccelerationStructureTriangleGeometryDescriptor")
AccelerationStructureTriangleGeometryDescriptor :: struct { using _: NS.Copying(AccelerationStructureTriangleGeometryDescriptor), using _: AccelerationStructureDescriptor }
@@ -534,30 +441,7 @@ AccelerationStructureTriangleGeometryDescriptor_setTransformationMatrixBufferOff
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- Argument
-Class Methods:
- alloc
-Methods:
- init
- access
- arrayLength
- bufferAlignment
- bufferDataSize
- bufferDataType
- bufferPointerType
- bufferStructType
- index
- isActive
- isDepthTexture
- name
- textureDataType
- textureType
- threadgroupMemoryAlignment
- threadgroupMemoryDataSize
- type
-*/
+
@(objc_class="MTLArgument")
Argument :: struct { using _: NS.Object }
@@ -636,27 +520,7 @@ Argument_type :: #force_inline proc "c" (self: ^Argument) -> ArgumentType {
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- ArgumentDescriptor
-Class Methods:
- alloc
- argumentDescriptor
-Methods:
- init
- access
- arrayLength
- constantBlockAlignment
- dataType
- index
- setAccess
- setArrayLength
- setConstantBlockAlignment
- setDataType
- setIndex
- setTextureType
- textureType
-*/
+
@(objc_class="MTLArgumentDescriptor")
ArgumentDescriptor :: struct { using _: NS.Copying(ArgumentDescriptor) }
@@ -723,22 +587,7 @@ ArgumentDescriptor_textureType :: #force_inline proc "c" (self: ^ArgumentDescrip
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- ArrayType
-Class Methods:
- alloc
-Methods:
- init
- argumentIndexStride
- arrayLength
- elementArrayType
- elementPointerType
- elementStructType
- elementTextureReferenceType
- elementType
- stride
-*/
+
@(objc_class="MTLArrayType")
ArrayType :: struct { using _: Type }
@@ -785,20 +634,7 @@ ArrayType_stride :: #force_inline proc "c" (self: ^ArrayType) -> NS.UInteger {
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- Attribute
-Class Methods:
- alloc
-Methods:
- init
- attributeIndex
- attributeType
- isActive
- isPatchControlPointData
- isPatchData
- name
-*/
+
@(objc_class="MTLAttribute")
Attribute :: struct { using _: NS.Object }
@@ -837,20 +673,7 @@ Attribute_name :: #force_inline proc "c" (self: ^Attribute) -> ^NS.String {
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- AttributeDescriptor
-Class Methods:
- alloc
-Methods:
- init
- bufferIndex
- format
- offset
- setBufferIndex
- setFormat
- setOffset
-*/
+
@(objc_class="MTLAttributeDescriptor")
AttributeDescriptor :: struct { using _: NS.Copying(AttributeDescriptor) }
@@ -889,16 +712,7 @@ AttributeDescriptor_setOffset :: #force_inline proc "c" (self: ^AttributeDescrip
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- AttributeDescriptorArray
-Class Methods:
- alloc
-Methods:
- init
- objectAtIndexedSubscript
- setObject
-*/
+
@(objc_class="MTLAttributeDescriptorArray")
AttributeDescriptorArray :: struct { using _: NS.Object }
@@ -921,16 +735,7 @@ AttributeDescriptorArray_setObject :: #force_inline proc "c" (self: ^AttributeDe
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- BinaryArchiveDescriptor
-Class Methods:
- alloc
-Methods:
- init
- setUrl
- url
-*/
+
@(objc_class="MTLBinaryArchiveDescriptor")
BinaryArchiveDescriptor :: struct { using _: NS.Copying(BinaryArchiveDescriptor) }
@@ -953,16 +758,7 @@ BinaryArchiveDescriptor_url :: #force_inline proc "c" (self: ^BinaryArchiveDescr
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- BlitPassDescriptor
-Class Methods:
- alloc
- blitPassDescriptor
-Methods:
- init
- sampleBufferAttachments
-*/
+
@(objc_class="MTLBlitPassDescriptor")
BlitPassDescriptor :: struct { using _: NS.Copying(BlitPassDescriptor) }
@@ -985,20 +781,7 @@ BlitPassDescriptor_sampleBufferAttachments :: #force_inline proc "c" (self: ^Bli
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- BlitPassSampleBufferAttachmentDescriptor
-Class Methods:
- alloc
-Methods:
- init
- endOfEncoderSampleIndex
- sampleBuffer
- setEndOfEncoderSampleIndex
- setSampleBuffer
- setStartOfEncoderSampleIndex
- startOfEncoderSampleIndex
-*/
+
@(objc_class="MTLBlitPassSampleBufferAttachmentDescriptor")
BlitPassSampleBufferAttachmentDescriptor :: struct { using _: NS.Copying(BlitPassSampleBufferAttachmentDescriptor) }
@@ -1037,16 +820,7 @@ BlitPassSampleBufferAttachmentDescriptor_startOfEncoderSampleIndex :: #force_inl
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- BlitPassSampleBufferAttachmentDescriptorArray
-Class Methods:
- alloc
-Methods:
- init
- objectAtIndexedSubscript
- setObject
-*/
+
@(objc_class="MTLBlitPassSampleBufferAttachmentDescriptorArray")
BlitPassSampleBufferAttachmentDescriptorArray :: struct { using _: NS.Object }
@@ -1069,20 +843,7 @@ BlitPassSampleBufferAttachmentDescriptorArray_setObject :: #force_inline proc "c
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- BufferLayoutDescriptor
-Class Methods:
- alloc
-Methods:
- init
- setStepFunction
- setStepRate
- setStride
- stepFunction
- stepRate
- stride
-*/
+
@(objc_class="MTLBufferLayoutDescriptor")
BufferLayoutDescriptor :: struct { using _: NS.Copying(BufferLayoutDescriptor) }
@@ -1121,16 +882,7 @@ BufferLayoutDescriptor_stride :: #force_inline proc "c" (self: ^BufferLayoutDesc
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- BufferLayoutDescriptorArray
-Class Methods:
- alloc
-Methods:
- init
- objectAtIndexedSubscript
- setObject
-*/
+
@(objc_class="MTLBufferLayoutDescriptorArray")
BufferLayoutDescriptorArray :: struct { using _: NS.Object }
@@ -1153,20 +905,7 @@ BufferLayoutDescriptorArray_setObject :: #force_inline proc "c" (self: ^BufferLa
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- CaptureDescriptor
-Class Methods:
- alloc
-Methods:
- init
- captureObject
- destination
- outputURL
- setCaptureObject
- setDestination
- setOutputURL
-*/
+
@(objc_class="MTLCaptureDescriptor")
CaptureDescriptor :: struct { using _: NS.Copying(CaptureDescriptor) }
@@ -1205,26 +944,7 @@ CaptureDescriptor_setOutputURL :: #force_inline proc "c" (self: ^CaptureDescript
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- CaptureManager
-Class Methods:
- alloc
- sharedCaptureManager
-Methods:
- defaultCaptureScope
- init
- isCapturing
- newCaptureScopeWithCommandQueue
- newCaptureScopeWithDevice
- setDefaultCaptureScope
- startCaptureWithCommandQueue
- startCaptureWithDescriptor
- startCaptureWithDevice
- startCaptureWithScope
- stopCapture
- supportsDestination
-*/
+
@(objc_class="MTLCaptureManager")
CaptureManager :: struct { using _: NS.Object }
@@ -1294,18 +1014,7 @@ CaptureManager_supportsDestination :: #force_inline proc "c" (self: ^CaptureMana
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- CommandBufferDescriptor
-Class Methods:
- alloc
-Methods:
- init
- errorOptions
- retainedReferences
- setErrorOptions
- setRetainedReferences
-*/
+
@(objc_class="MTLCommandBufferDescriptor")
CommandBufferDescriptor :: struct { using _: NS.Copying(CommandBufferDescriptor) }
@@ -1336,28 +1045,7 @@ CommandBufferDescriptor_setRetainedReferences :: #force_inline proc "c" (self: ^
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- CompileOptions
-Class Methods:
- alloc
-Methods:
- init
- fastMathEnabled
- installName
- languageVersion
- libraries
- libraryType
- preprocessorMacros
- preserveInvariance
- setFastMathEnabled
- setInstallName
- setLanguageVersion
- setLibraries
- setLibraryType
- setPreprocessorMacros
- setPreserveInvariance
-*/
+
@(objc_class="MTLCompileOptions")
CompileOptions :: struct { using _: NS.Copying(CompileOptions) }
@@ -1437,18 +1125,7 @@ CompileOptions_setOptimizationLevel :: #force_inline proc "c" (self: ^CompileOpt
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- ComputePassDescriptor
-Class Methods:
- alloc
- computePassDescriptor
-Methods:
- init
- dispatchType
- sampleBufferAttachments
- setDispatchType
-*/
+
@(objc_class="MTLComputePassDescriptor")
ComputePassDescriptor :: struct { using _: NS.Copying(ComputePassDescriptor) }
@@ -1479,20 +1156,7 @@ ComputePassDescriptor_setDispatchType :: #force_inline proc "c" (self: ^ComputeP
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- ComputePassSampleBufferAttachmentDescriptor
-Class Methods:
- alloc
-Methods:
- init
- endOfEncoderSampleIndex
- sampleBuffer
- setEndOfEncoderSampleIndex
- setSampleBuffer
- setStartOfEncoderSampleIndex
- startOfEncoderSampleIndex
-*/
+
@(objc_class="MTLComputePassSampleBufferAttachmentDescriptor")
ComputePassSampleBufferAttachmentDescriptor :: struct { using _: NS.Copying(ComputePassSampleBufferAttachmentDescriptor) }
@@ -1531,16 +1195,7 @@ ComputePassSampleBufferAttachmentDescriptor_startOfEncoderSampleIndex :: #force_
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- ComputePassSampleBufferAttachmentDescriptorArray
-Class Methods:
- alloc
-Methods:
- init
- objectAtIndexedSubscript
- setObject
-*/
+
@(objc_class="MTLComputePassSampleBufferAttachmentDescriptorArray")
ComputePassSampleBufferAttachmentDescriptorArray :: struct { using _: NS.Object }
@@ -1563,38 +1218,7 @@ ComputePassSampleBufferAttachmentDescriptorArray_setObject :: #force_inline proc
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- ComputePipelineDescriptor
-Class Methods:
- alloc
-Methods:
- init
- binaryArchives
- buffers
- computeFunction
- insertLibraries
- label
- linkedFunctions
- maxCallStackDepth
- maxTotalThreadsPerThreadgroup
- reset
- setBinaryArchives
- setComputeFunction
- setInsertLibraries
- setLabel
- setLinkedFunctions
- setMaxCallStackDepth
- setMaxTotalThreadsPerThreadgroup
- setStageInputDescriptor
- setSupportAddingBinaryFunctions
- setSupportIndirectCommandBuffers
- setThreadGroupSizeIsMultipleOfThreadExecutionWidth
- stageInputDescriptor
- supportAddingBinaryFunctions
- supportIndirectCommandBuffers
- threadGroupSizeIsMultipleOfThreadExecutionWidth
-*/
+
@(objc_class="MTLComputePipelineDescriptor")
ComputePipelineDescriptor :: struct { using _: NS.Copying(ComputePipelineDescriptor) }
@@ -1710,15 +1334,7 @@ ComputePipelineDescriptor_gpuResourceID :: #force_inline proc "c" (self: ^Comput
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- ComputePipelineReflection
-Class Methods:
- alloc
-Methods:
- init
- arguments
-*/
+
@(objc_class="MTLComputePipelineReflection")
ComputePipelineReflection :: struct { using _: NS.Object }
@@ -1741,22 +1357,7 @@ ComputePipelineReflection_arguments :: #force_inline proc "c" (self: ^ComputePip
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- CounterSampleBufferDescriptor
-Class Methods:
- alloc
-Methods:
- init
- counterSet
- label
- sampleCount
- setCounterSet
- setLabel
- setSampleCount
- setStorageMode
- storageMode
-*/
+
@(objc_class="MTLCounterSampleBufferDescriptor")
CounterSampleBufferDescriptor :: struct { using _: NS.Copying(CounterSampleBufferDescriptor) }
@@ -1803,24 +1404,7 @@ CounterSampleBufferDescriptor_storageMode :: #force_inline proc "c" (self: ^Coun
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- DepthStencilDescriptor
-Class Methods:
- alloc
-Methods:
- init
- backFaceStencil
- depthCompareFunction
- frontFaceStencil
- isDepthWriteEnabled
- label
- setBackFaceStencil
- setDepthCompareFunction
- setDepthWriteEnabled
- setFrontFaceStencil
- setLabel
-*/
+
@(objc_class="MTLDepthStencilDescriptor")
DepthStencilDescriptor :: struct { using _: NS.Copying(DepthStencilDescriptor) }
@@ -1875,18 +1459,7 @@ DepthStencilDescriptor_setLabel :: #force_inline proc "c" (self: ^DepthStencilDe
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- FunctionConstant
-Class Methods:
- alloc
-Methods:
- init
- index
- name
- required
- type
-*/
+
@(objc_class="MTLFunctionConstant")
FunctionConstant :: struct { using _: NS.Copying(FunctionConstant) }
@@ -1917,18 +1490,7 @@ FunctionConstant_type :: #force_inline proc "c" (self: ^FunctionConstant) -> Dat
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- FunctionConstantValues
-Class Methods:
- alloc
-Methods:
- init
- reset
- setConstantValue
- setConstantValue
- setConstantValues
-*/
+
@(objc_class="MTLFunctionConstantValues")
FunctionConstantValues :: struct { using _: NS.Copying(FunctionConstantValues) }
@@ -1959,23 +1521,7 @@ FunctionConstantValues_setConstantValues :: #force_inline proc "c" (self: ^Funct
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- FunctionDescriptor
-Class Methods:
- alloc
- functionDescriptor
-Methods:
- init
- constantValues
- name
- options
- setConstantValues
- setName
- setOptions
- setSpecializedName
- specializedName
-*/
+
@(objc_class="MTLFunctionDescriptor")
FunctionDescriptor :: struct { using _: NS.Copying(FunctionDescriptor) }
@@ -2026,14 +1572,7 @@ FunctionDescriptor_specializedName :: #force_inline proc "c" (self: ^FunctionDes
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- IntersectionFunctionDescriptor
-Class Methods:
- alloc
-Methods:
- init
-*/
+
@(objc_class="MTLIntersectionFunctionDescriptor")
IntersectionFunctionDescriptor :: struct { using _: NS.Copying(IntersectionFunctionDescriptor) }
@@ -2048,26 +1587,7 @@ IntersectionFunctionDescriptor_init :: #force_inline proc "c" (self: ^Intersecti
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- HeapDescriptor
-Class Methods:
- alloc
-Methods:
- init
- cpuCacheMode
- hazardTrackingMode
- resourceOptions
- setCpuCacheMode
- setHazardTrackingMode
- setResourceOptions
- setSize
- setStorageMode
- setType
- size
- storageMode
- type
-*/
+
@(objc_class="MTLHeapDescriptor")
HeapDescriptor :: struct { using _: NS.Copying(HeapDescriptor) }
@@ -2140,26 +1660,7 @@ HeapDescriptor_type :: #force_inline proc "c" (self: ^HeapDescriptor) -> HeapTyp
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- IndirectCommandBufferDescriptor
-Class Methods:
- alloc
-Methods:
- init
- commandTypes
- inheritBuffers
- inheritPipelineState
- maxFragmentBufferBindCount
- maxKernelBufferBindCount
- maxVertexBufferBindCount
- setCommandTypes
- setInheritBuffers
- setInheritPipelineState
- setMaxFragmentBufferBindCount
- setMaxKernelBufferBindCount
- setMaxVertexBufferBindCount
-*/
+
@(objc_class="MTLIndirectCommandBufferDescriptor")
IndirectCommandBufferDescriptor :: struct { using _: NS.Copying(IndirectCommandBufferDescriptor) }
@@ -2222,25 +1723,7 @@ IndirectCommandBufferDescriptor_setMaxVertexBufferBindCount :: #force_inline pro
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- InstanceAccelerationStructureDescriptor
-Class Methods:
- alloc
- descriptor
-Methods:
- init
- instanceCount
- instanceDescriptorBuffer
- instanceDescriptorBufferOffset
- instanceDescriptorStride
- instancedAccelerationStructures
- setInstanceCount
- setInstanceDescriptorBuffer
- setInstanceDescriptorBufferOffset
- setInstanceDescriptorStride
- setInstancedAccelerationStructures
-*/
+
@(objc_class="MTLInstanceAccelerationStructureDescriptor")
InstanceAccelerationStructureDescriptor :: struct { using _: NS.Copying(InstanceAccelerationStructureDescriptor), using _: AccelerationStructureDescriptor }
@@ -2339,17 +1822,7 @@ InstanceAccelerationStructureDescriptor_setMotionTransformCount :: #force_inline
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- IntersectionFunctionTableDescriptor
-Class Methods:
- alloc
- intersectionFunctionTableDescriptor
-Methods:
- init
- functionCount
- setFunctionCount
-*/
+
@(objc_class="MTLIntersectionFunctionTableDescriptor")
IntersectionFunctionTableDescriptor :: struct { using _: NS.Copying(IntersectionFunctionTableDescriptor) }
@@ -2376,21 +1849,7 @@ IntersectionFunctionTableDescriptor_setFunctionCount :: #force_inline proc "c" (
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- LinkedFunctions
-Class Methods:
- alloc
- linkedFunctions
-Methods:
- init
- binaryFunctions
- functions
- groups
- setBinaryFunctions
- setFunctions
- setGroups
-*/
+
@(objc_class="MTLLinkedFunctions")
LinkedFunctions :: struct { using _: NS.Copying(LinkedFunctions) }
@@ -2433,16 +1892,7 @@ LinkedFunctions_setGroups :: #force_inline proc "c" (self: ^LinkedFunctions, gro
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- PipelineBufferDescriptor
-Class Methods:
- alloc
-Methods:
- init
- mutability
- setMutability
-*/
+
@(objc_class="MTLPipelineBufferDescriptor")
PipelineBufferDescriptor :: struct { using _: NS.Copying(PipelineBufferDescriptor) }
@@ -2465,16 +1915,7 @@ PipelineBufferDescriptor_setMutability :: #force_inline proc "c" (self: ^Pipelin
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- PipelineBufferDescriptorArray
-Class Methods:
- alloc
-Methods:
- init
- objectAtIndexedSubscript
- setObject
-*/
+
@(objc_class="MTLPipelineBufferDescriptorArray")
PipelineBufferDescriptorArray :: struct { using _: NS.Object }
@@ -2497,21 +1938,7 @@ PipelineBufferDescriptorArray_setObject :: #force_inline proc "c" (self: ^Pipeli
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- PointerType
-Class Methods:
- alloc
-Methods:
- init
- access
- alignment
- dataSize
- elementArrayType
- elementIsArgumentBuffer
- elementStructType
- elementType
-*/
+
@(objc_class="MTLPointerType")
PointerType :: struct { using _: Type }
@@ -2554,17 +1981,7 @@ PointerType_elementType :: #force_inline proc "c" (self: ^PointerType) -> DataTy
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- PrimitiveAccelerationStructureDescriptor
-Class Methods:
- alloc
- descriptor
-Methods:
- init
- geometryDescriptors
- setGeometryDescriptors
-*/
+
@(objc_class="MTLPrimitiveAccelerationStructureDescriptor")
PrimitiveAccelerationStructureDescriptor :: struct { using _: NS.Copying(PrimitiveAccelerationStructureDescriptor), using _: AccelerationStructureDescriptor }
@@ -2639,16 +2056,7 @@ PrimitiveAccelerationStructureDescriptor_setMotionKeyframeCount :: #force_inline
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- RasterizationRateLayerArray
-Class Methods:
- alloc
-Methods:
- init
- objectAtIndexedSubscript
- setObject
-*/
+
@(objc_class="MTLRasterizationRateLayerArray")
RasterizationRateLayerArray :: struct { using _: NS.Object }
@@ -2671,21 +2079,7 @@ RasterizationRateLayerArray_setObject :: #force_inline proc "c" (self: ^Rasteriz
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- RasterizationRateLayerDescriptor
-Class Methods:
- alloc
-Methods:
- horizontal
- horizontalSampleStorage
- init
- initWithSampleCount
- initWithSampleCount
- sampleCount
- vertical
- verticalSampleStorage
-*/
+
@(objc_class="MTLRasterizationRateLayerDescriptor")
RasterizationRateLayerDescriptor :: struct { using _: NS.Copying(RasterizationRateLayerDescriptor) }
@@ -2728,25 +2122,7 @@ RasterizationRateLayerDescriptor_verticalSampleStorage :: #force_inline proc "c"
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- RasterizationRateMapDescriptor
-Class Methods:
- alloc
- rasterizationRateMapDescriptorWithScreenSize
- rasterizationRateMapDescriptorWithScreenSize
- rasterizationRateMapDescriptorWithScreenSize
-Methods:
- init
- label
- layerAtIndex
- layerCount
- layers
- screenSize
- setLabel
- setLayer
- setScreenSize
-*/
+
@(objc_class="MTLRasterizationRateMapDescriptor")
RasterizationRateMapDescriptor :: struct { using _: NS.Copying(RasterizationRateMapDescriptor) }
@@ -2805,16 +2181,7 @@ RasterizationRateMapDescriptor_setScreenSize :: #force_inline proc "c" (self: ^R
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- RasterizationRateSampleArray
-Class Methods:
- alloc
-Methods:
- init
- objectAtIndexedSubscript
- setObject
-*/
+
@(objc_class="MTLRasterizationRateSampleArray")
RasterizationRateSampleArray :: struct { using _: NS.Object }
@@ -2837,36 +2204,7 @@ RasterizationRateSampleArray_setObject :: #force_inline proc "c" (self: ^Rasteri
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- RenderPassAttachmentDescriptor
-Class Methods:
- alloc
-Methods:
- init
- depthPlane
- level
- loadAction
- resolveDepthPlane
- resolveLevel
- resolveSlice
- resolveTexture
- setDepthPlane
- setLevel
- setLoadAction
- setResolveDepthPlane
- setResolveLevel
- setResolveSlice
- setResolveTexture
- setSlice
- setStoreAction
- setStoreActionOptions
- setTexture
- slice
- storeAction
- storeActionOptions
- texture
-*/
+
@(objc_class="MTLRenderPassAttachmentDescriptor")
RenderPassAttachmentDescriptor :: struct { using _: NS.Copying(RenderPassAttachmentDescriptor) }
@@ -2969,16 +2307,7 @@ RenderPassAttachmentDescriptor_texture :: #force_inline proc "c" (self: ^RenderP
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- RenderPassColorAttachmentDescriptor
-Class Methods:
- alloc
-Methods:
- init
- clearColor
- setClearColor
-*/
+
@(objc_class="MTLRenderPassColorAttachmentDescriptor")
RenderPassColorAttachmentDescriptor :: struct { using _: NS.Copying(RenderPassColorAttachmentDescriptor), using _: RenderPassAttachmentDescriptor }
@@ -3001,16 +2330,7 @@ RenderPassColorAttachmentDescriptor_setClearColor :: #force_inline proc "c" (sel
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- RenderPassColorAttachmentDescriptorArray
-Class Methods:
- alloc
-Methods:
- init
- objectAtIndexedSubscript
- setObject
-*/
+
@(objc_class="MTLRenderPassColorAttachmentDescriptorArray")
RenderPassColorAttachmentDescriptorArray :: struct { using _: NS.Object }
@@ -3033,18 +2353,7 @@ RenderPassColorAttachmentDescriptorArray_setObject :: #force_inline proc "c" (se
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- RenderPassDepthAttachmentDescriptor
-Class Methods:
- alloc
-Methods:
- init
- clearDepth
- depthResolveFilter
- setClearDepth
- setDepthResolveFilter
-*/
+
@(objc_class="MTLRenderPassDepthAttachmentDescriptor")
RenderPassDepthAttachmentDescriptor :: struct { using _: NS.Copying(RenderPassDepthAttachmentDescriptor), using _: RenderPassAttachmentDescriptor }
@@ -3075,43 +2384,7 @@ RenderPassDepthAttachmentDescriptor_setDepthResolveFilter :: #force_inline proc
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- RenderPassDescriptor
-Class Methods:
- alloc
- renderPassDescriptor
-Methods:
- init
- colorAttachments
- defaultRasterSampleCount
- depthAttachment
- getSamplePositions
- imageblockSampleLength
- rasterizationRateMap
- renderTargetArrayLength
- renderTargetHeight
- renderTargetWidth
- sampleBufferAttachments
- setDefaultRasterSampleCount
- setDepthAttachment
- setImageblockSampleLength
- setRasterizationRateMap
- setRenderTargetArrayLength
- setRenderTargetHeight
- setRenderTargetWidth
- setSamplePositions
- setStencilAttachment
- setThreadgroupMemoryLength
- setTileHeight
- setTileWidth
- setVisibilityResultBuffer
- stencilAttachment
- threadgroupMemoryLength
- tileHeight
- tileWidth
- visibilityResultBuffer
-*/
+
@(objc_class="MTLRenderPassDescriptor")
RenderPassDescriptor :: struct { using _: NS.Copying(RenderPassDescriptor), using _: AccelerationStructureDescriptor }
@@ -3242,24 +2515,8 @@ RenderPassDescriptor_visibilityResultBuffer :: #force_inline proc "c" (self: ^Re
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- RenderPassSampleBufferAttachmentDescriptor
-Class Methods:
- alloc
-Methods:
- init
- endOfFragmentSampleIndex
- endOfVertexSampleIndex
- sampleBuffer
- setEndOfFragmentSampleIndex
- setEndOfVertexSampleIndex
- setSampleBuffer
- setStartOfFragmentSampleIndex
- setStartOfVertexSampleIndex
- startOfFragmentSampleIndex
- startOfVertexSampleIndex
-*/
+
+
@(objc_class="MTLRenderPassSampleBufferAttachmentDescriptor")
RenderPassSampleBufferAttachmentDescriptor :: struct { using _: NS.Copying(RenderPassSampleBufferAttachmentDescriptor) }
@@ -3314,16 +2571,7 @@ RenderPassSampleBufferAttachmentDescriptor_startOfVertexSampleIndex :: #force_in
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- RenderPassSampleBufferAttachmentDescriptorArray
-Class Methods:
- alloc
-Methods:
- init
- objectAtIndexedSubscript
- setObject
-*/
+
@(objc_class="MTLRenderPassSampleBufferAttachmentDescriptorArray")
RenderPassSampleBufferAttachmentDescriptorArray :: struct { using _: NS.Object }
@@ -3346,18 +2594,7 @@ RenderPassSampleBufferAttachmentDescriptorArray_setObject :: #force_inline proc
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- RenderPassStencilAttachmentDescriptor
-Class Methods:
- alloc
-Methods:
- init
- clearStencil
- setClearStencil
- setStencilResolveFilter
- stencilResolveFilter
-*/
+
@(objc_class="MTLRenderPassStencilAttachmentDescriptor")
RenderPassStencilAttachmentDescriptor :: struct { using _: NS.Copying(RenderPassStencilAttachmentDescriptor) }
@@ -3388,32 +2625,7 @@ RenderPassStencilAttachmentDescriptor_stencilResolveFilter :: #force_inline proc
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- RenderPipelineColorAttachmentDescriptor
-Class Methods:
- alloc
-Methods:
- init
- alphaBlendOperation
- destinationAlphaBlendFactor
- destinationRGBBlendFactor
- isBlendingEnabled
- pixelFormat
- rgbBlendOperation
- setAlphaBlendOperation
- setBlendingEnabled
- setDestinationAlphaBlendFactor
- setDestinationRGBBlendFactor
- setPixelFormat
- setRgbBlendOperation
- setSourceAlphaBlendFactor
- setSourceRGBBlendFactor
- setWriteMask
- sourceAlphaBlendFactor
- sourceRGBBlendFactor
- writeMask
-*/
+
@(objc_class="MTLRenderPipelineColorAttachmentDescriptor")
RenderPipelineColorAttachmentDescriptor :: struct { using _: NS.Copying(RenderPipelineColorAttachmentDescriptor), using _: RenderPassAttachmentDescriptor }
@@ -3500,16 +2712,7 @@ RenderPipelineColorAttachmentDescriptor_writeMask :: #force_inline proc "c" (sel
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- RenderPipelineColorAttachmentDescriptorArray
-Class Methods:
- alloc
-Methods:
- init
- objectAtIndexedSubscript
- setObject
-*/
+
@(objc_class="MTLRenderPipelineColorAttachmentDescriptorArray")
RenderPipelineColorAttachmentDescriptorArray :: struct { using _: NS.Object }
@@ -3532,62 +2735,7 @@ RenderPipelineColorAttachmentDescriptorArray_setObject :: #force_inline proc "c"
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- RenderPipelineDescriptor
-Class Methods:
- alloc
-Methods:
- init
- binaryArchives
- colorAttachments
- depthAttachmentPixelFormat
- fragmentBuffers
- fragmentFunction
- inputPrimitiveTopology
- isAlphaToCoverageEnabled
- isAlphaToOneEnabled
- isRasterizationEnabled
- isTessellationFactorScaleEnabled
- label
- maxTessellationFactor
- maxVertexAmplificationCount
- rasterSampleCount
- reset
- sampleCount
- setAlphaToCoverageEnabled
- setAlphaToOneEnabled
- setBinaryArchives
- setDepthAttachmentPixelFormat
- setFragmentFunction
- setInputPrimitiveTopology
- setLabel
- setMaxTessellationFactor
- setMaxVertexAmplificationCount
- setRasterSampleCount
- setRasterizationEnabled
- setSampleCount
- setStencilAttachmentPixelFormat
- setSupportIndirectCommandBuffers
- setTessellationControlPointIndexType
- setTessellationFactorFormat
- setTessellationFactorScaleEnabled
- setTessellationFactorStepFunction
- setTessellationOutputWindingOrder
- setTessellationPartitionMode
- setVertexDescriptor
- setVertexFunction
- stencilAttachmentPixelFormat
- supportIndirectCommandBuffers
- tessellationControlPointIndexType
- tessellationFactorFormat
- tessellationFactorStepFunction
- tessellationOutputWindingOrder
- tessellationPartitionMode
- vertexBuffers
- vertexDescriptor
- vertexFunction
-*/
+
@(objc_class="MTLRenderPipelineDescriptor")
RenderPipelineDescriptor :: struct { using _: NS.Copying(RenderPipelineDescriptor) }
@@ -3889,17 +3037,7 @@ RenderPipelineDescriptor_rasterizationEnabled :: #force_inline proc "c" (self: ^
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- RenderPipelineReflection
-Class Methods:
- alloc
-Methods:
- init
- fragmentArguments
- tileArguments
- vertexArguments
-*/
+
@(objc_class="MTLRenderPipelineReflection")
RenderPipelineReflection :: struct { using _: NS.Object }
@@ -3947,16 +3085,7 @@ RenderPipelineReflection_meshBindings :: #force_inline proc "c" (self: ^RenderPi
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- ResourceStatePassDescriptor
-Class Methods:
- alloc
- resourceStatePassDescriptor
-Methods:
- init
- sampleBufferAttachments
-*/
+
@(objc_class="MTLResourceStatePassDescriptor")
ResourceStatePassDescriptor :: struct { using _: NS.Copying(ResourceStatePassDescriptor) }
@@ -3979,20 +3108,7 @@ ResourceStatePassDescriptor_sampleBufferAttachments :: #force_inline proc "c" (s
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- ResourceStatePassSampleBufferAttachmentDescriptor
-Class Methods:
- alloc
-Methods:
- init
- endOfEncoderSampleIndex
- sampleBuffer
- setEndOfEncoderSampleIndex
- setSampleBuffer
- setStartOfEncoderSampleIndex
- startOfEncoderSampleIndex
-*/
+
@(objc_class="MTLResourceStatePassSampleBufferAttachmentDescriptor")
ResourceStatePassSampleBufferAttachmentDescriptor :: struct { using _: NS.Copying(ResourceStatePassSampleBufferAttachmentDescriptor) }
@@ -4031,16 +3147,7 @@ ResourceStatePassSampleBufferAttachmentDescriptor_startOfEncoderSampleIndex :: #
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- ResourceStatePassSampleBufferAttachmentDescriptorArray
-Class Methods:
- alloc
-Methods:
- init
- objectAtIndexedSubscript
- setObject
-*/
+
@(objc_class="MTLResourceStatePassSampleBufferAttachmentDescriptorArray")
ResourceStatePassSampleBufferAttachmentDescriptorArray :: struct { using _: NS.Object }
@@ -4063,44 +3170,7 @@ ResourceStatePassSampleBufferAttachmentDescriptorArray_setObject :: #force_inlin
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- SamplerDescriptor
-Class Methods:
- alloc
-Methods:
- init
- borderColor
- compareFunction
- label
- lodAverage
- lodMaxClamp
- lodMinClamp
- magFilter
- maxAnisotropy
- minFilter
- mipFilter
- normalizedCoordinates
- rAddressMode
- sAddressMode
- setBorderColor
- setCompareFunction
- setLabel
- setLodAverage
- setLodMaxClamp
- setLodMinClamp
- setMagFilter
- setMaxAnisotropy
- setMinFilter
- setMipFilter
- setNormalizedCoordinates
- setRAddressMode
- setSAddressMode
- setSupportArgumentBuffers
- setTAddressMode
- supportArgumentBuffers
- tAddressMode
-*/
+
@(objc_class="MTLSamplerDescriptor")
SamplerDescriptor :: struct { using _: NS.Copying(SamplerDescriptor) }
@@ -4235,15 +3305,7 @@ SamplerDescriptor_tAddressMode :: #force_inline proc "c" (self: ^SamplerDescript
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- SharedEventHandle
-Class Methods:
- alloc
-Methods:
- init
- label
-*/
+
@(objc_class="MTLSharedEventHandle")
SharedEventHandle :: struct { using _: NS.Object }
@@ -4262,16 +3324,7 @@ SharedEventHandle_label :: #force_inline proc "c" (self: ^SharedEventHandle) ->
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- SharedEventListener
-Class Methods:
- alloc
-Methods:
- dispatchQueue
- init
- initWithDispatchQueue
-*/
+
@(objc_class="MTLSharedEventListener")
SharedEventListener :: struct { using _: NS.Object }
@@ -4294,16 +3347,7 @@ SharedEventListener_initWithDispatchQueue :: #force_inline proc "c" (self: ^Shar
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- SharedTextureHandle
-Class Methods:
- alloc
-Methods:
- init
- device
- label
-*/
+
@(objc_class="MTLSharedTextureHandle")
SharedTextureHandle :: struct { using _: NS.Object }
@@ -4326,22 +3370,7 @@ SharedTextureHandle_label :: #force_inline proc "c" (self: ^SharedTextureHandle)
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- StageInputOutputDescriptor
-Class Methods:
- alloc
- stageInputOutputDescriptor
-Methods:
- init
- attributes
- indexBufferIndex
- indexType
- layouts
- reset
- setIndexBufferIndex
- setIndexType
-*/
+
@(objc_class="MTLStageInputOutputDescriptor")
StageInputOutputDescriptor :: struct { using _: NS.Copying(StageInputOutputDescriptor) }
@@ -4388,26 +3417,7 @@ StageInputOutputDescriptor_stageInputOutputDescriptor :: #force_inline proc "c"
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- StencilDescriptor
-Class Methods:
- alloc
-Methods:
- init
- depthFailureOperation
- depthStencilPassOperation
- readMask
- setDepthFailureOperation
- setDepthStencilPassOperation
- setReadMask
- setStencilCompareFunction
- setStencilFailureOperation
- setWriteMask
- stencilCompareFunction
- stencilFailureOperation
- writeMask
-*/
+
@(objc_class="MTLStencilDescriptor")
StencilDescriptor :: struct { using _: NS.Copying(StencilDescriptor) }
@@ -4470,22 +3480,7 @@ StencilDescriptor_writeMask :: #force_inline proc "c" (self: ^StencilDescriptor)
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- StructMember
-Class Methods:
- alloc
-Methods:
- init
- argumentIndex
- arrayType
- dataType
- name
- offset
- pointerType
- structType
- textureReferenceType
-*/
+
@(objc_class="MTLStructMember")
StructMember :: struct { using _: NS.Object }
@@ -4532,16 +3527,7 @@ StructMember_textureReferenceType :: #force_inline proc "c" (self: ^StructMember
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- StructType
-Class Methods:
- alloc
-Methods:
- init
- memberByName
- members
-*/
+
@(objc_class="MTLStructType")
StructType :: struct { using _: Type }
@@ -4564,47 +3550,7 @@ StructType_members :: #force_inline proc "c" (self: ^StructType) -> ^NS.Array {
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- TextureDescriptor
-Class Methods:
- alloc
- texture2DDescriptorWithPixelFormat
- textureBufferDescriptorWithPixelFormat
- textureCubeDescriptorWithPixelFormat
-Methods:
- init
- allowGPUOptimizedContents
- arrayLength
- cpuCacheMode
- depth
- hazardTrackingMode
- height
- mipmapLevelCount
- pixelFormat
- resourceOptions
- sampleCount
- setAllowGPUOptimizedContents
- setArrayLength
- setCpuCacheMode
- setDepth
- setHazardTrackingMode
- setHeight
- setMipmapLevelCount
- setPixelFormat
- setResourceOptions
- setSampleCount
- setStorageMode
- setSwizzle
- setTextureType
- setUsage
- setWidth
- storageMode
- swizzle
- textureType
- usage
- width
-*/
+
@(objc_class="MTLTextureDescriptor")
TextureDescriptor :: struct { using _: NS.Copying(TextureDescriptor) }
@@ -4760,18 +3706,7 @@ TextureDescriptor_setCompressionType :: #force_inline proc "c" (self: ^TextureDe
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- TextureReferenceType
-Class Methods:
- alloc
-Methods:
- init
- access
- isDepthTexture
- textureDataType
- textureType
-*/
+
@(objc_class="MTLTextureReferenceType")
TextureReferenceType :: struct { using _: Type }
@@ -4802,16 +3737,7 @@ TextureReferenceType_textureType :: #force_inline proc "c" (self: ^TextureRefere
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- TileRenderPipelineColorAttachmentDescriptor
-Class Methods:
- alloc
-Methods:
- init
- pixelFormat
- setPixelFormat
-*/
+
@(objc_class="MTLTileRenderPipelineColorAttachmentDescriptor")
TileRenderPipelineColorAttachmentDescriptor :: struct { using _: NS.Copying(TileRenderPipelineColorAttachmentDescriptor) }
@@ -4834,16 +3760,7 @@ TileRenderPipelineColorAttachmentDescriptor_setPixelFormat :: #force_inline proc
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- TileRenderPipelineColorAttachmentDescriptorArray
-Class Methods:
- alloc
-Methods:
- init
- objectAtIndexedSubscript
- setObject
-*/
+
@(objc_class="MTLTileRenderPipelineColorAttachmentDescriptorArray")
TileRenderPipelineColorAttachmentDescriptorArray :: struct { using _: NS.Object }
@@ -4866,29 +3783,7 @@ TileRenderPipelineColorAttachmentDescriptorArray_setObject :: #force_inline proc
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- TileRenderPipelineDescriptor
-Class Methods:
- alloc
-Methods:
- init
- binaryArchives
- colorAttachments
- label
- maxTotalThreadsPerThreadgroup
- rasterSampleCount
- reset
- setBinaryArchives
- setLabel
- setMaxTotalThreadsPerThreadgroup
- setRasterSampleCount
- setThreadgroupSizeMatchesTileSize
- setTileFunction
- threadgroupSizeMatchesTileSize
- tileBuffers
- tileFunction
-*/
+
@(objc_class="MTLTileRenderPipelineDescriptor")
TileRenderPipelineDescriptor :: struct { using _: NS.Copying(TileRenderPipelineDescriptor) }
@@ -4963,15 +3858,7 @@ TileRenderPipelineDescriptor_tileFunction :: #force_inline proc "c" (self: ^Tile
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- Type
-Class Methods:
- alloc
-Methods:
- init
- dataType
-*/
+
@(objc_class="MTLType")
Type :: struct { using _: NS.Object }
@@ -4990,20 +3877,7 @@ Type_dataType :: #force_inline proc "c" (self: ^Type) -> DataType {
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- VertexAttribute
-Class Methods:
- alloc
-Methods:
- init
- attributeIndex
- attributeType
- isActive
- isPatchControlPointData
- isPatchData
- name
-*/
+
@(objc_class="MTLVertexAttribute")
VertexAttribute :: struct { using _: NS.Object }
@@ -5042,20 +3916,7 @@ VertexAttribute_name :: #force_inline proc "c" (self: ^VertexAttribute) -> ^NS.S
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- VertexAttributeDescriptor
-Class Methods:
- alloc
-Methods:
- init
- bufferIndex
- format
- offset
- setBufferIndex
- setFormat
- setOffset
-*/
+
@(objc_class="MTLVertexAttributeDescriptor")
VertexAttributeDescriptor :: struct { using _: NS.Copying(VertexAttributeDescriptor) }
@@ -5094,16 +3955,7 @@ VertexAttributeDescriptor_setOffset :: #force_inline proc "c" (self: ^VertexAttr
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- VertexAttributeDescriptorArray
-Class Methods:
- alloc
-Methods:
- init
- objectAtIndexedSubscript
- setObject
-*/
+
@(objc_class="MTLVertexAttributeDescriptorArray")
VertexAttributeDescriptorArray :: struct { using _: NS.Object }
@@ -5126,20 +3978,7 @@ VertexAttributeDescriptorArray_setObject :: #force_inline proc "c" (self: ^Verte
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- VertexBufferLayoutDescriptor
-Class Methods:
- alloc
-Methods:
- init
- setStepFunction
- setStepRate
- setStride
- stepFunction
- stepRate
- stride
-*/
+
@(objc_class="MTLVertexBufferLayoutDescriptor")
VertexBufferLayoutDescriptor :: struct { using _: NS.Copying(VertexBufferLayoutDescriptor) }
@@ -5178,16 +4017,6 @@ VertexBufferLayoutDescriptor_stride :: #force_inline proc "c" (self: ^VertexBuff
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- VertexBufferLayoutDescriptorArray
-Class Methods:
- alloc
-Methods:
- init
- objectAtIndexedSubscript
- setObject
-*/
@(objc_class="MTLVertexBufferLayoutDescriptorArray")
VertexBufferLayoutDescriptorArray :: struct { using _: NS.Object }
@@ -5210,18 +4039,6 @@ VertexBufferLayoutDescriptorArray_setObject :: #force_inline proc "c" (self: ^Ve
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- VertexDescriptor
-Class Methods:
- alloc
- vertexDescriptor
-Methods:
- init
- attributes
- layouts
- reset
-*/
@(objc_class="MTLVertexDescriptor")
VertexDescriptor :: struct { using _: NS.Copying(VertexDescriptor) }
@@ -5252,17 +4069,7 @@ VertexDescriptor_vertexDescriptor :: #force_inline proc "c" () -> ^VertexDescrip
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- VisibleFunctionTableDescriptor
-Class Methods:
- alloc
- visibleFunctionTableDescriptor
-Methods:
- init
- functionCount
- setFunctionCount
-*/
+
@(objc_class="MTLVisibleFunctionTableDescriptor")
VisibleFunctionTableDescriptor :: struct { using _: NS.Copying(VisibleFunctionTableDescriptor) }
@@ -5289,14 +4096,7 @@ VisibleFunctionTableDescriptor_visibleFunctionTableDescriptor :: #force_inline p
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- AccelerationStructure
-Class Methods:
-Methods:
- size
- getResourceID
-*/
+
@(objc_class="MTLAccelerationStructure")
AccelerationStructure :: struct { using _: Resource }
@@ -5312,25 +4112,7 @@ AccelerationStructure_getResourceID :: #force_inline proc "c" (self: ^Accelerati
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- AccelerationStructureCommandEncoder
-Class Methods:
-Methods:
- buildAccelerationStructure
- copyAccelerationStructure
- copyAndCompactAccelerationStructure
- refitAccelerationStructure
- refitAccelerationStructureWithOptions
- sampleCountersInBuffer
- updateFence
- useHeap
- useHeaps
- useResource
- useResources
- waitForFence
- writeCompactedAccelerationStructureSize
-*/
+
@(objc_class="MTLAccelerationStructureCommandEncoder")
AccelerationStructureCommandEncoder :: struct { using _: CommandEncoder }
@@ -5590,38 +4372,6 @@ ObjectPayloadBinding_objectPayloadDataSize :: #force_inline proc "c" (self: ^Obj
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- ArgumentEncoder
-Class Methods:
-Methods:
- alignment
- constantDataAtIndex
- device
- encodedLength
- label
- newArgumentEncoderForBufferAtIndex
- setAccelerationStructure
- setArgumentBuffer
- setArgumentBuffer
- setBuffer
- setBuffers
- setComputePipelineState
- setComputePipelineStates
- setIndirectCommandBuffer
- setIndirectCommandBuffers
- setIntersectionFunctionTable
- setIntersectionFunctionTables
- setLabel
- setRenderPipelineState
- setRenderPipelineStates
- setSamplerState
- setSamplerStates
- setTexture
- setTextures
- setVisibleFunctionTable
- setVisibleFunctionTables
-*/
@(objc_class="MTLArgumentEncoder")
ArgumentEncoder :: struct { using _: NS.Object }
@@ -5740,19 +4490,6 @@ ArgumentEncoder_setVisibleFunctionTables :: #force_inline proc "odin" (self: ^Ar
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- BinaryArchive
-Class Methods:
-Methods:
- addComputePipelineFunctionsWithDescriptor
- addRenderPipelineFunctionsWithDescriptor
- addTileRenderPipelineFunctionsWithDescriptor
- device
- label
- serializeToURL
- setLabel
-*/
@(objc_class="MTLBinaryArchive")
BinaryArchive :: struct { using _: NS.Copying(BinaryArchive) }
@@ -5798,37 +4535,6 @@ BinaryArchive_addFunction :: #force_inline proc "contextless" (self: ^BinaryArch
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- BlitCommandEncoder
-Class Methods:
-Methods:
- copyFromBuffer
- copyFromBuffer
- copyFromBuffer
- copyFromTexture
- copyFromTexture
- copyFromTexture
- copyFromTexture
- copyFromTexture
- copyIndirectCommandBuffer
- fillBuffer
- generateMipmapsForTexture
- getTextureAccessCounters
- optimizeContentsForCPUAccess
- optimizeContentsForCPUAccess
- optimizeContentsForGPUAccess
- optimizeContentsForGPUAccess
- optimizeIndirectCommandBuffer
- resetCommandsInBuffer
- resetTextureAccessCounters
- resolveCounters
- sampleCountersInBuffer
- synchronizeResource
- synchronizeTexture
- updateFence
- waitForFence
-*/
@(objc_class="MTLBlitCommandEncoder")
BlitCommandEncoder :: struct { using _: CommandEncoder }
@@ -5935,20 +4641,6 @@ BlitCommandEncoder_waitForFence :: #force_inline proc "c" (self: ^BlitCommandEnc
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- Buffer
-Class Methods:
-Methods:
- addDebugMarker
- contents
- didModifyRange
- length
- newRemoteBufferViewForDevice
- newTextureWithDescriptor
- remoteStorageBuffer
- removeAllDebugMarkers
-*/
@(objc_class="MTLBuffer")
Buffer :: struct { using _: Resource }
@@ -6013,18 +4705,6 @@ Buffer_gpuAddress :: #force_inline proc "c" (self: ^Buffer) -> u64 {
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- CaptureScope
-Class Methods:
-Methods:
- beginScope
- commandQueue
- device
- endScope
- label
- setLabel
-*/
@(objc_class="MTLCaptureScope")
CaptureScope :: struct { using _: NS.Object }
@@ -6055,48 +4735,6 @@ CaptureScope_setLabel :: #force_inline proc "c" (self: ^CaptureScope, label: ^NS
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- CommandBuffer
-Class Methods:
-Methods:
- GPUEndTime
- GPUStartTime
- accelerationStructureCommandEncoder
- addCompletedHandler
- addScheduledHandler
- blitCommandEncoder
- blitCommandEncoderWithDescriptor
- commandQueue
- commit
- computeCommandEncoder
- computeCommandEncoderWithDescriptor
- computeCommandEncoderWithDispatchType
- device
- encodeSignalEvent
- encodeWaitForEvent
- enqueue
- error
- errorOptions
- kernelEndTime
- kernelStartTime
- label
- logs
- parallelRenderCommandEncoderWithDescriptor
- popDebugGroup
- presentDrawable
- presentDrawable
- presentDrawable
- pushDebugGroup
- renderCommandEncoderWithDescriptor
- resourceStateCommandEncoder
- resourceStateCommandEncoderWithDescriptor
- retainedReferences
- setLabel
- status
- waitUntilCompleted
- waitUntilScheduled
-*/
@(objc_class="MTLCommandBuffer")
CommandBuffer :: struct { using _: NS.Object }
@@ -6251,15 +4889,6 @@ CommandBuffer_waitUntilScheduled :: #force_inline proc "c" (self: ^CommandBuffer
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- CommandBufferEncoderInfo
-Class Methods:
-Methods:
- debugSignposts
- errorState
- label
-*/
@(objc_class="MTLCommandBufferEncoderInfo")
CommandBufferEncoderInfo :: struct { using _: NS.Object }
@@ -6278,19 +4907,6 @@ CommandBufferEncoderInfo_label :: #force_inline proc "c" (self: ^CommandBufferEn
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- CommandEncoder
-Class Methods:
-Methods:
- device
- endEncoding
- insertDebugSignpost
- label
- popDebugGroup
- pushDebugGroup
- setLabel
-*/
@(objc_class="MTLCommandEncoder")
CommandEncoder :: struct { using _: NS.Object }
@@ -6325,19 +4941,6 @@ CommandEncoder_setLabel :: #force_inline proc "c" (self: ^CommandEncoder, label:
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- CommandQueue
-Class Methods:
-Methods:
- commandBuffer
- commandBufferWithDescriptor
- commandBufferWithUnretainedReferences
- device
- insertDebugCaptureBoundary
- label
- setLabel
-*/
@(objc_class="MTLCommandQueue")
CommandQueue :: struct { using _: NS.Object }
@@ -6372,47 +4975,6 @@ CommandQueue_setLabel :: #force_inline proc "c" (self: ^CommandQueue, label: ^NS
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- ComputeCommandEncoder
-Class Methods:
-Methods:
- dispatchThreadgroups
- dispatchThreadgroupsWithIndirectBuffer
- dispatchThreads
- dispatchType
- executeCommandsInBuffer
- executeCommandsInBuffer
- memoryBarrierWithResources
- memoryBarrierWithScope
- sampleCountersInBuffer
- setAccelerationStructure
- setBuffer
- setBufferOffset
- setBuffers
- setBytes
- setComputePipelineState
- setImageblockWidth
- setIntersectionFunctionTable
- setIntersectionFunctionTables
- setSamplerState
- setSamplerState
- setSamplerStates
- setSamplerStates
- setStageInRegion
- setStageInRegionWithIndirectBuffer
- setTexture
- setTextures
- setThreadgroupMemoryLength
- setVisibleFunctionTable
- setVisibleFunctionTables
- updateFence
- useHeap
- useHeaps
- useResource
- useResources
- waitForFence
-*/
@(objc_class="MTLComputeCommandEncoder")
ComputeCommandEncoder :: struct { using _: CommandEncoder }
@@ -6562,23 +5124,6 @@ ComputeCommandEncoder_waitForFence :: #force_inline proc "c" (self: ^ComputeComm
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- ComputePipelineState
-Class Methods:
-Methods:
- device
- functionHandleWithFunction
- imageblockMemoryLengthForDimensions
- label
- maxTotalThreadsPerThreadgroup
- newComputePipelineStateWithAdditionalBinaryFunctions
- newIntersectionFunctionTableWithDescriptor
- newVisibleFunctionTableWithDescriptor
- staticThreadgroupMemoryLength
- supportIndirectCommandBuffers
- threadExecutionWidth
-*/
@(objc_class="MTLComputePipelineState")
ComputePipelineState :: struct { using _: NS.Object }
@@ -6630,13 +5175,6 @@ ComputePipelineState_threadExecutionWidth :: #force_inline proc "c" (self: ^Comp
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- Counter
-Class Methods:
-Methods:
- name
-*/
@(objc_class="MTLCounter")
Counter :: struct { using _: NS.Object }
@@ -6647,16 +5185,6 @@ Counter_name :: #force_inline proc "c" (self: ^Counter) -> ^NS.String {
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- CounterSampleBuffer
-Class Methods:
-Methods:
- device
- label
- resolveCounterRange
- sampleCount
-*/
@(objc_class="MTLCounterSampleBuffer")
CounterSampleBuffer :: struct { using _: NS.Object }
@@ -6679,14 +5207,6 @@ CounterSampleBuffer_sampleCount :: #force_inline proc "c" (self: ^CounterSampleB
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- CounterSet
-Class Methods:
-Methods:
- counters
- name
-*/
@(objc_class="MTLCounterSet")
CounterSet :: struct { using _: NS.Object }
@@ -6701,14 +5221,6 @@ CounterSet_name :: #force_inline proc "c" (self: ^CounterSet) -> ^NS.String {
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- DepthStencilState
-Class Methods:
-Methods:
- device
- label
-*/
@(objc_class="MTLDepthStencilState")
DepthStencilState :: struct { using _: NS.Object }
@@ -6723,107 +5235,6 @@ DepthStencilState_label :: #force_inline proc "c" (self: ^DepthStencilState) ->
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- Device
-Class Methods:
-Methods:
- accelerationStructureSizesWithDescriptor
- areBarycentricCoordsSupported
- areProgrammableSamplePositionsSupported
- areRasterOrderGroupsSupported
- argumentBuffersSupport
- convertSparsePixelRegions
- convertSparseTileRegions
- counterSets
- currentAllocatedSize
- getDefaultSamplePositions
- hasUnifiedMemory
- heapBufferSizeAndAlignWithLength
- heapTextureSizeAndAlignWithDescriptor
- isDepth24Stencil8PixelFormatSupported
- isHeadless
- isLowPower
- isRemovable
- location
- locationNumber
- maxArgumentBufferSamplerCount
- maxBufferLength
- maxThreadgroupMemoryLength
- maxThreadsPerThreadgroup
- maxTransferRate
- minimumLinearTextureAlignmentForPixelFormat
- minimumTextureBufferAlignmentForPixelFormat
- name
- newAccelerationStructureWithDescriptor
- newAccelerationStructureWithSize
- newArgumentEncoderWithArguments
- newBinaryArchiveWithDescriptor
- newBufferWithBytes
- newBufferWithBytesNoCopy
- newBufferWithLength
- newCommandQueue
- newCommandQueueWithMaxCommandBufferCount
- newComputePipelineStateWithDescriptor
- newComputePipelineStateWithDescriptor
- newComputePipelineStateWithFunction
- newComputePipelineStateWithFunction
- newComputePipelineStateWithFunction
- newComputePipelineStateWithFunction
- newCounterSampleBufferWithDescriptor
- newDefaultLibrary
- newDefaultLibraryWithBundle
- newDepthStencilStateWithDescriptor
- newDynamicLibrary
- newDynamicLibraryWithURL
- newEvent
- newFence
- newHeapWithDescriptor
- newIndirectCommandBufferWithDescriptor
- newLibraryWithData
- newLibraryWithFile
- newLibraryWithSource
- newLibraryWithSource
- newLibraryWithURL
- newRasterizationRateMapWithDescriptor
- newRenderPipelineStateWithDescriptor
- newRenderPipelineStateWithDescriptor
- newRenderPipelineStateWithDescriptor
- newRenderPipelineStateWithDescriptor
- newRenderPipelineStateWithTileDescriptor
- newRenderPipelineStateWithTileDescriptor
- newSamplerState
- newSharedEvent
- newSharedEventWithHandle
- newSharedTextureWithDescriptor
- newSharedTextureWithHandle
- newTextureWithDescriptor
- newTextureWithDescriptor
- peerCount
- peerGroupID
- peerIndex
- readWriteTextureSupport
- recommendedMaxWorkingSetSize
- registryID
- sampleTimestamps
- sparseTileSizeInBytes
- sparseTileSizeWithTextureType
- supports32BitFloatFiltering
- supports32BitMSAA
- supportsBCTextureCompression
- supportsCounterSampling
- supportsDynamicLibraries
- supportsFamily
- supportsFeatureSet
- supportsFunctionPointers
- supportsPullModelInterpolation
- supportsQueryTextureLOD
- supportsRasterizationRateMapWithLayerCount
- supportsRaytracing
- supportsShaderBarycentricCoordinates
- supportsTextureSampleCount
- supportsVertexAmplificationCount
-*/
@(objc_class="MTLDevice")
Device :: struct { using _: NS.Object }
@@ -7319,18 +5730,6 @@ Device_newIOCommandQueue :: #force_inline proc "contextless" (self: ^Device, des
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- Drawable
-Class Methods:
-Methods:
- addPresentedHandler
- drawableID
- present
- presentAfterMinimumDuration
- presentAtTime
- presentedTime
-*/
@(objc_class="MTLDrawable")
Drawable :: struct { using _: NS.Object }
@@ -7361,17 +5760,6 @@ Drawable_presentedTime :: #force_inline proc "c" (self: ^Drawable) -> CFTimeInte
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- DynamicLibrary
-Class Methods:
-Methods:
- device
- installName
- label
- serializeToURL
- setLabel
-*/
@(objc_class="MTLDynamicLibrary")
DynamicLibrary :: struct { using _: NS.Object }
@@ -7399,15 +5787,6 @@ DynamicLibrary_setLabel :: #force_inline proc "c" (self: ^DynamicLibrary, label:
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- Event
-Class Methods:
-Methods:
- device
- label
- setLabel
-*/
@(objc_class="MTLEvent")
Event :: struct { using _: NS.Object }
@@ -7426,15 +5805,6 @@ Event_setLabel :: #force_inline proc "c" (self: ^Event, label: ^NS.String) {
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- Fence
-Class Methods:
-Methods:
- device
- label
- setLabel
-*/
@(objc_class="MTLFence")
Fence :: struct { using _: NS.Object }
@@ -7453,25 +5823,6 @@ Fence_setLabel :: #force_inline proc "c" (self: ^Fence, label: ^NS.String) {
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- Function
-Class Methods:
-Methods:
- device
- functionConstantsDictionary
- functionType
- label
- name
- newArgumentEncoderWithBufferIndex
- newArgumentEncoderWithBufferIndex
- options
- patchControlPointCount
- patchType
- setLabel
- stageInputAttributes
- vertexAttributes
-*/
@(objc_class="MTLFunction")
Function :: struct { using _: NS.Object }
@@ -7530,15 +5881,6 @@ Function_vertexAttributes :: #force_inline proc "c" (self: ^Function) -> ^NS.Arr
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- FunctionHandle
-Class Methods:
-Methods:
- device
- functionType
- name
-*/
@(objc_class="MTLFunctionHandle")
FunctionHandle :: struct { using _: NS.Object }
@@ -7557,26 +5899,13 @@ FunctionHandle_name :: #force_inline proc "c" (self: ^FunctionHandle) -> ^NS.Str
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- LogContainer
-*/
@(objc_class="MTLLogContainer")
LogContainer :: struct { using _: NS.FastEnumeration }
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- FunctionLog
-Class Methods:
-Methods:
- debugLocation
- encoderLabel
- function
- type
-*/
+
@(objc_class="MTLFunctionLog")
FunctionLog :: struct { using _: NS.Object }
@@ -7599,16 +5928,6 @@ FunctionLog_type :: #force_inline proc "c" (self: ^FunctionLog) -> FunctionLogTy
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- FunctionLogDebugLocation
-Class Methods:
-Methods:
- URL
- column
- functionName
- line
-*/
@(objc_class="MTLFunctionLogDebugLocation")
FunctionLogDebugLocation :: struct { using _: NS.Object }
@@ -7631,29 +5950,7 @@ FunctionLogDebugLocation_line :: #force_inline proc "c" (self: ^FunctionLogDebug
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- Heap
-Class Methods:
-Methods:
- cpuCacheMode
- currentAllocatedSize
- device
- hazardTrackingMode
- label
- maxAvailableSizeWithAlignment
- newBufferWithLength
- newBufferWithLength
- newTextureWithDescriptor
- newTextureWithDescriptor
- resourceOptions
- setLabel
- setPurgeableState
- size
- storageMode
- type
- usedSize
-*/
+
@(objc_class="MTLHeap")
Heap :: struct { using _: NS.Object }
@@ -7764,16 +6061,6 @@ Heap_usedSize :: #force_inline proc "c" (self: ^Heap) -> NS.UInteger {
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- IndirectCommandBuffer
-Class Methods:
-Methods:
- indirectComputeCommandAtIndex
- indirectRenderCommandAtIndex
- resetWithRange
- size
-*/
@(objc_class="MTLIndirectCommandBuffer")
IndirectCommandBuffer :: struct { using _: Resource }
@@ -7811,22 +6098,6 @@ IndirectCommandBuffer_gpuResourceID :: #force_inline proc "c" (self: ^IndirectCo
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- IndirectComputeCommand
-Class Methods:
-Methods:
- clearBarrier
- concurrentDispatchThreadgroups
- concurrentDispatchThreads
- reset
- setBarrier
- setComputePipelineState
- setImageblockWidth
- setKernelBuffer
- setStageInRegion
- setThreadgroupMemoryLength
-*/
@(objc_class="MTLIndirectComputeCommand")
IndirectComputeCommand :: struct { using _: NS.Object }
@@ -7873,20 +6144,6 @@ IndirectComputeCommand_setThreadgroupMemoryLength :: #force_inline proc "c" (sel
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- IndirectRenderCommand
-Class Methods:
-Methods:
- drawIndexedPatches
- drawIndexedPrimitives
- drawPatches
- drawPrimitives
- reset
- setFragmentBuffer
- setRenderPipelineState
- setVertexBuffer
-*/
@(objc_class="MTLIndirectRenderCommand")
IndirectRenderCommand :: struct { using _: NS.Object }
@@ -7925,20 +6182,6 @@ IndirectRenderCommand_setVertexBuffer :: #force_inline proc "c" (self: ^Indirect
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- IntersectionFunctionTable
-Class Methods:
-Methods:
- setBuffer
- setBuffers
- setFunction
- setFunctions
- setOpaqueTriangleIntersectionFunctionWithSignature
- setOpaqueTriangleIntersectionFunctionWithSignature
- setVisibleFunctionTable
- setVisibleFunctionTables
-*/
@(objc_class="MTLIntersectionFunctionTable")
IntersectionFunctionTable :: struct { using _: Resource }
@@ -8142,30 +6385,6 @@ IOCommandBuffer_error :: #force_inline proc "c" (self: ^IOCommandBuffer) -> ^NS.
////////////////////////////////////////////////////////////////////////////////
-
-
-
-////////////////////////////////////////////////////////////////////////////////
-
-/*
-Class:
- Library
-Class Methods:
-Methods:
- device
- functionNames
- installName
- label
- newFunctionWithDescriptor
- newFunctionWithDescriptor
- newFunctionWithName
- newFunctionWithName
- newFunctionWithName
- newIntersectionFunctionWithDescriptor
- newIntersectionFunctionWithDescriptor
- setLabel
- type
-*/
@(objc_class="MTLLibrary")
Library :: struct { using _: NS.Object }
@@ -8236,19 +6455,6 @@ Library_type :: #force_inline proc "c" (self: ^Library) -> LibraryType {
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- ParallelRenderCommandEncoder
-Class Methods:
-Methods:
- renderCommandEncoder
- setColorStoreAction
- setColorStoreActionOptions
- setDepthStoreAction
- setDepthStoreActionOptions
- setStencilStoreAction
- setStencilStoreActionOptions
-*/
@(objc_class="MTLParallelRenderCommandEncoder")
ParallelRenderCommandEncoder :: struct { using _: CommandEncoder }
@@ -8283,22 +6489,6 @@ ParallelRenderCommandEncoder_setStencilStoreActionOptions :: #force_inline proc
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- RasterizationRateMap
-Class Methods:
-Methods:
- copyParameterDataToBuffer
- device
- label
- layerCount
- mapPhysicalToScreenCoordinates
- mapScreenToPhysicalCoordinates
- parameterBufferSizeAndAlign
- physicalGranularity
- physicalSizeForLayer
- screenSize
-*/
@(objc_class="MTLRasterizationRateMap")
RasterizationRateMap :: struct { using _: NS.Object }
@@ -8346,98 +6536,6 @@ RasterizationRateMap_screenSize :: #force_inline proc "c" (self: ^RasterizationR
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- RenderCommandEncoder
-Class Methods:
-Methods:
- dispatchThreadsPerTile
- drawIndexedPatches
- drawIndexedPatches
- drawIndexedPrimitives
- drawIndexedPrimitives
- drawIndexedPrimitives
- drawIndexedPrimitives
- drawPatches
- drawPatches
- drawPrimitives
- drawPrimitives
- drawPrimitives
- drawPrimitives
- executeCommandsInBuffer
- executeCommandsInBuffer
- memoryBarrierWithResources
- memoryBarrierWithScope
- sampleCountersInBuffer
- setBlendColorRed
- setColorStoreAction
- setColorStoreActionOptions
- setCullMode
- setDepthBias
- setDepthClipMode
- setDepthStencilState
- setDepthStoreAction
- setDepthStoreActionOptions
- setFragmentBuffer
- setFragmentBufferOffset
- setFragmentBuffers
- setFragmentBytes
- setFragmentSamplerState
- setFragmentSamplerState
- setFragmentSamplerStates
- setFragmentSamplerStates
- setFragmentTexture
- setFragmentTextures
- setFrontFacingWinding
- setRenderPipelineState
- setScissorRect
- setScissorRects
- setStencilFrontReferenceValue
- setStencilReferenceValue
- setStencilStoreAction
- setStencilStoreActionOptions
- setTessellationFactorBuffer
- setTessellationFactorScale
- setThreadgroupMemoryLength
- setTileBuffer
- setTileBufferOffset
- setTileBuffers
- setTileBytes
- setTileSamplerState
- setTileSamplerState
- setTileSamplerStates
- setTileSamplerStates
- setTileTexture
- setTileTextures
- setTriangleFillMode
- setVertexAmplificationCount
- setVertexBuffer
- setVertexBufferOffset
- setVertexBuffers
- setVertexBytes
- setVertexSamplerState
- setVertexSamplerState
- setVertexSamplerStates
- setVertexSamplerStates
- setVertexTexture
- setVertexTextures
- setViewport
- setViewports
- setVisibilityResultMode
- textureBarrier
- tileHeight
- tileWidth
- updateFence
- useHeap
- useHeap
- useHeaps
- useHeaps
- useResource
- useResource
- useResources
- useResources
- waitForFence
-*/
@(objc_class="MTLRenderCommandEncoder")
RenderCommandEncoder :: struct { using _: CommandEncoder }
@@ -8890,10 +6988,6 @@ RenderCommandEncoder_drawMeshThreadgroupsWithIndirectBuffer :: #force_inline pro
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- RenderPipelineFunctionsDescriptor
-*/
@(objc_class="MTLRenderPipelineFunctionsDescriptor")
RenderPipelineFunctionsDescriptor :: struct { using _: NS.Copying(RenderPipelineFunctionsDescriptor) }
@@ -8935,19 +7029,6 @@ RenderPipelineFunctionsDescriptor_setTileAdditionalBinaryFunctions :: #force_inl
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- RenderPipelineState
-Class Methods:
-Methods:
- device
- imageblockMemoryLengthForDimensions
- imageblockSampleLength
- label
- maxTotalThreadsPerThreadgroup
- supportIndirectCommandBuffers
- threadgroupSizeMatchesTileSize
-*/
@(objc_class="MTLRenderPipelineState")
RenderPipelineState :: struct { using _: NS.Object }
@@ -9031,25 +7112,6 @@ RenderPipelineState_gpuResourceID :: #force_inline proc "c" (self: ^RenderPipeli
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- Resource
-Class Methods:
-Methods:
- allocatedSize
- cpuCacheMode
- device
- hazardTrackingMode
- heap
- heapOffset
- isAliasable
- label
- makeAliasable
- resourceOptions
- setLabel
- setPurgeableState
- storageMode
-*/
@(objc_class="MTLResource")
Resource :: struct { using _: NS.Object }
@@ -9108,17 +7170,6 @@ Resource_storageMode :: #force_inline proc "c" (self: ^Resource) -> StorageMode
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- ResourceStateCommandEncoder
-Class Methods:
-Methods:
- updateFence
- updateTextureMapping
- updateTextureMapping
- updateTextureMappings
- waitForFence
-*/
@(objc_class="MTLResourceStateCommandEncoder")
ResourceStateCommandEncoder :: struct { using _: CommandEncoder }
@@ -9157,14 +7208,6 @@ ResourceStateCommandEncoder_moveTextureMappingsFromTexture :: #force_inline proc
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- SamplerState
-Class Methods:
-Methods:
- device
- label
-*/
@(objc_class="MTLSamplerState")
SamplerState :: struct { using _: NS.Object }
@@ -9183,16 +7226,6 @@ SamplerState_gpuResourceID :: #force_inline proc "c" (self: ^SamplerState) -> Re
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- SharedEvent
-Class Methods:
-Methods:
- newSharedEventHandle
- notifyListener
- setSignaledValue
- signaledValue
-*/
@(objc_class="MTLSharedEvent")
SharedEvent :: struct { using _: Event }
@@ -9215,47 +7248,6 @@ SharedEvent_signaledValue :: #force_inline proc "c" (self: ^SharedEvent) -> u64
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- Texture
-Class Methods:
-Methods:
- allowGPUOptimizedContents
- arrayLength
- buffer
- bufferBytesPerRow
- bufferOffset
- depth
- firstMipmapInTail
- getBytes
- getBytes
- height
- iosurface
- iosurfacePlane
- isFramebufferOnly
- isShareable
- isSparse
- mipmapLevelCount
- newRemoteTextureViewForDevice
- newSharedTextureHandle
- newTextureViewWithPixelFormat
- newTextureViewWithPixelFormat
- newTextureViewWithPixelFormat
- parentRelativeLevel
- parentRelativeSlice
- parentTexture
- pixelFormat
- remoteStorageTexture
- replaceRegion
- replaceRegion
- rootResource
- sampleCount
- swizzle
- tailSizeInBytes
- textureType
- usage
- width
-*/
@(objc_class="MTLTexture")
Texture :: struct { using _: Resource }
@@ -9419,14 +7411,6 @@ Texture_gpuResourceID :: #force_inline proc "c" (self: ^Texture) -> ResourceID {
////////////////////////////////////////////////////////////////////////////////
-/*
-Class:
- VisibleFunctionTable
-Class Methods:
-Methods:
- setFunction
- setFunctions
-*/
@(objc_class="MTLVisibleFunctionTable")
VisibleFunctionTable :: struct { using _: Resource }
@@ -9446,7 +7430,4 @@ VisibleFunctionTable_gpuResourceID :: #force_inline proc "c" (self: ^VisibleFunc
-// TODO: Entire FunctionStitching API (which appears not to be in been missed from the generator)
-
-
diff --git a/vendor/fontstash/fontstash.odin b/vendor/fontstash/fontstash.odin
new file mode 100644
index 000000000..edf9e12db
--- /dev/null
+++ b/vendor/fontstash/fontstash.odin
@@ -0,0 +1,1194 @@
+//+build windows, linux, darwin
+package fontstash
+
+import "core:runtime"
+import "core:log"
+import "core:os"
+import "core:mem"
+import "core:math"
+import "core:strings"
+import "core:slice"
+import stbtt "vendor:stb/truetype"
+
+// This is a port from Fontstash into odin - specialized for nanovg
+
+// Notable features of Fontstash:
+// Contains a *single* channel texture atlas for multiple fonts
+// Manages a lookup table for frequent glyphs
+// Allows blurred font glyphs
+// Atlas can resize
+
+// Changes from the original:
+// stb truetype only
+// no scratch allocation -> parts use odins dynamic arrays
+// leaves GPU vertex creation & texture management up to the user
+// texture atlas expands by default
+
+INVALID :: -1
+MAX_STATES :: 20
+HASH_LUT_SIZE :: 256
+INIT_GLYPHS :: 256
+INIT_ATLAS_NODES :: 256
+MAX_FALLBACKS :: 20
+Glyph_Index :: i32 // in case you want to change the handle for glyph indices
+
+AlignHorizontal :: enum {
+ LEFT,
+ CENTER,
+ RIGHT,
+}
+
+AlignVertical :: enum {
+ TOP,
+ MIDDLE,
+ BOTTOM,
+ BASELINE,
+}
+
+Font :: struct {
+ name: string, // allocated
+
+ info: stbtt.fontinfo,
+ loadedData: []byte,
+ freeLoadedData: bool, // in case you dont want loadedData to be removed
+
+ ascender: f32,
+ descender: f32,
+ lineHeight: f32,
+
+ glyphs: [dynamic]Glyph,
+ lut: [HASH_LUT_SIZE]int,
+
+ fallbacks: [MAX_FALLBACKS]int,
+ nfallbacks: int,
+}
+
+Glyph :: struct {
+ codepoint: rune,
+ index: Glyph_Index,
+ next: int,
+ isize: i16,
+ blurSize: i16,
+ x0, y0, x1, y1: i16,
+ xoff, yoff: i16,
+ xadvance: i16,
+}
+
+AtlasNode :: struct {
+ x, y, width: i16,
+}
+
+Vertex :: struct #packed {
+ x, y: f32,
+ u, v: f32,
+ color: [4]u8,
+}
+
+QuadLocation :: enum {
+ TOPLEFT,
+ BOTTOMLEFT,
+}
+
+FontContext :: struct {
+ fonts: [dynamic]Font, // allocated using context.allocator
+
+ // always assuming user wants to resize
+ nodes: [dynamic]AtlasNode,
+
+ // actual pixels
+ textureData: []byte, // allocated using context.allocator
+ width, height: int,
+ // 1 / texture_atlas_width, 1 / texture_atlas_height
+ itw, ith: f32,
+
+ // state
+ states: []State,
+ state_count: int, // used states
+
+ location: QuadLocation,
+
+ // dirty rectangle of the texture region that was updated
+ dirtyRect: [4]f32,
+
+ // callbacks with userData passed
+ userData: rawptr, // by default set to the context
+
+ // called when a texture is expanded and needs handling
+ callbackResize: proc(data: rawptr, w, h: int),
+ // called in state_end to update the texture region that changed
+ callbackUpdate: proc(data: rawptr, dirtyRect: [4]f32, textureData: rawptr),
+}
+
+Init :: proc(ctx: ^FontContext, w, h: int, loc: QuadLocation) {
+ ctx.userData = ctx
+ ctx.location = loc
+ ctx.fonts = make([dynamic]Font, 0, 8)
+
+ ctx.itw, ctx.ith = 1.0 / f32(w), 1.0 / f32(h)
+
+ ctx.textureData = make([]byte, w * h)
+
+ ctx.width = w
+ ctx.height = h
+ ctx.nodes = make([dynamic]AtlasNode, 0, INIT_ATLAS_NODES)
+ __dirtyRectReset(ctx)
+
+ ctx.states = make([]State, MAX_STATES)
+
+ // NOTE NECESSARY
+ append(&ctx.nodes, AtlasNode{
+ width = i16(w),
+ })
+
+ __AtlasAddWhiteRect(ctx, 2, 2)
+
+ PushState(ctx)
+ ClearState(ctx)
+}
+
+Destroy :: proc(ctx: ^FontContext) {
+ for font in ctx.fonts {
+ if font.freeLoadedData {
+ delete(font.loadedData)
+ }
+
+ delete(font.name)
+ delete(font.glyphs)
+ }
+
+ delete(ctx.states)
+ delete(ctx.textureData)
+ delete(ctx.fonts)
+ delete(ctx.nodes)
+}
+
+Reset :: proc(ctx: ^FontContext) {
+ __atlasReset(ctx, ctx.width, ctx.height)
+ __dirtyRectReset(ctx)
+ slice.zero(ctx.textureData)
+
+ for &font in ctx.fonts {
+ __lutReset(&font)
+ }
+
+ __AtlasAddWhiteRect(ctx, 2, 2)
+ PushState(ctx)
+ ClearState(ctx)
+}
+
+__atlasInsertNode :: proc(ctx: ^FontContext, idx: int, x, y, w: int) {
+ inject_at(&ctx.nodes, idx, AtlasNode{
+ x = i16(x),
+ y = i16(y),
+ width = i16(w),
+ })
+}
+
+__atlasRemoveNode :: proc(ctx: ^FontContext, idx: int) {
+ if len(ctx.nodes) == 0 {
+ return
+ }
+
+ ordered_remove(&ctx.nodes, idx)
+}
+
+__atlasExpand :: proc(ctx: ^FontContext, w, h: int) {
+ if w > ctx.width {
+ __atlasInsertNode(ctx, len(ctx.nodes), ctx.width, 0, w - ctx.width)
+ }
+
+ ctx.width, ctx.height = w, h
+}
+
+__atlasReset :: proc(ctx: ^FontContext, w, h: int) {
+ ctx.width, ctx.height = w, h
+ clear(&ctx.nodes)
+
+ // init root node
+ append(&ctx.nodes, AtlasNode{
+ width = i16(w),
+ })
+}
+
+__AtlasAddSkylineLevel :: proc(using ctx: ^FontContext, idx: int, x, y, w, h: int) {
+ // insert new node
+ __atlasInsertNode(ctx, idx, x, y + h, w)
+
+ // Delete skyline segments that fall under the shadow of the new segment.
+ for i := idx + 1; i < len(nodes); i += 1 {
+ if nodes[i].x >= nodes[i-1].x + nodes[i-1].width {
+ break
+ }
+ shrink := nodes[i-1].x + nodes[i-1].width - nodes[i].x
+ nodes[i].x += i16(shrink)
+ nodes[i].width -= i16(shrink)
+
+ if nodes[i].width > 0 {
+ break
+ }
+ __atlasRemoveNode(ctx, i)
+ i -= 1
+ }
+
+ // Merge same height skyline segments that are next to each other.
+ for i := 0; i < len(nodes) - 1; /**/ {
+ if nodes[i].y == nodes[i+1].y {
+ nodes[i].width += nodes[i+1].width
+ __atlasRemoveNode(ctx, i+1)
+ } else {
+ i += 1
+ }
+ }
+}
+
+__AtlasRectFits :: proc(using ctx: ^FontContext, i, w, h: int) -> int {
+ // Checks if there is enough space at the location of skyline span 'i',
+ // and return the max height of all skyline spans under that at that location,
+ // (think tetris block being dropped at that position). Or -1 if no space found.
+
+ i := i
+ x, y := int(nodes[i].x), int(nodes[i].y)
+
+ if x + w > width {
+ return -1
+ }
+
+ space_left := w
+ for space_left > 0 {
+ if i == len(nodes) {
+ return -1
+ }
+
+ y = max(y, int(nodes[i].y))
+ if y + h > height {
+ return -1
+ }
+
+ space_left -= int(nodes[i].width)
+ i += 1
+ }
+
+ return y
+}
+
+__AtlasAddRect :: proc(using ctx: ^FontContext, rw, rh: int) -> (rx, ry: int, ok: bool) {
+ bestw, besth := width, height
+ besti, bestx, besty := -1, -1, -1
+
+ // Bottom left fit heuristic.
+ for i in 0.. bool {
+ gx, gy := __AtlasAddRect(ctx, w, h) or_return
+
+ // Rasterize
+ dst := ctx.textureData[gx + gy * ctx.width:]
+ for _ in 0.. int {
+ data, ok := os.read_entire_file(path)
+
+ if !ok {
+ log.panicf("FONT: failed to read font at %s", path)
+ }
+
+ return AddFontMem(ctx, name, data, true)
+}
+
+// push a font to the font stack
+// optionally init with ascii characters at a wanted size
+AddFontMem :: proc(
+ ctx: ^FontContext,
+ name: string,
+ data: []u8,
+ freeLoadedData: bool,
+) -> int {
+ append(&ctx.fonts, Font{})
+ res := &ctx.fonts[len(ctx.fonts) - 1]
+ res.loadedData = data
+ res.freeLoadedData = freeLoadedData
+ res.name = strings.clone(name)
+
+ stbtt.InitFont(&res.info, &res.loadedData[0], 0)
+ ascent, descent, line_gap: i32
+
+ stbtt.GetFontVMetrics(&res.info, &ascent, &descent, &line_gap)
+ fh := f32(ascent - descent)
+ res.ascender = f32(ascent) / fh
+ res.descender = f32(descent) / fh
+ res.lineHeight = (fh + f32(line_gap)) / fh
+ res.glyphs = make([dynamic]Glyph, 0, INIT_GLYPHS)
+
+ __lutReset(res)
+ return len(ctx.fonts) - 1
+}
+
+AddFont :: proc { AddFontPath, AddFontMem }
+
+AddFallbackFont :: proc(ctx: ^FontContext, base, fallback: int) -> bool {
+ base_font := __getFont(ctx, base)
+
+ if base_font.nfallbacks < MAX_FALLBACKS {
+ base_font.fallbacks[base_font.nfallbacks] = fallback
+ base_font.nfallbacks += 1
+ return true
+ }
+
+ return false
+}
+
+ResetFallbackFont :: proc(ctx: ^FontContext, base: int) {
+ base_font := __getFont(ctx, base)
+ base_font.nfallbacks = 0
+ clear(&base_font.glyphs)
+ __lutReset(base_font)
+}
+
+// find font by name
+GetFontByName :: proc(ctx: ^FontContext, name: string) -> int {
+ for font, i in ctx.fonts {
+ if font.name == name {
+ return i
+ }
+ }
+
+ return INVALID
+}
+
+__lutReset :: proc(font: ^Font) {
+ // set lookup table
+ slice.fill(font.lut[:], -1)
+}
+
+__hashint :: proc(a: u32) -> u32 {
+ a := a
+ a += ~(a << 15)
+ a ~= (a >> 10)
+ a += (a << 3)
+ a ~= (a >> 6)
+ a += (a << 11)
+ a ~= (a >> 16)
+ return a
+}
+
+__renderGlyphBitmap :: proc(
+ font: ^Font,
+ output: []u8,
+ outWidth: i32,
+ outHeight: i32,
+ outStride: i32,
+ scaleX: f32,
+ scaleY: f32,
+ glyphIndex: Glyph_Index,
+) {
+ stbtt.MakeGlyphBitmap(&font.info, raw_data(output), outWidth, outHeight, outStride, scaleX, scaleY, glyphIndex)
+}
+
+__buildGlyphBitmap :: proc(
+ font: ^Font,
+ glyphIndex: Glyph_Index,
+ pixelSize: f32,
+ scale: f32,
+) -> (advance, lsb, x0, y0, x1, y1: i32) {
+ stbtt.GetGlyphHMetrics(&font.info, glyphIndex, &advance, &lsb)
+ stbtt.GetGlyphBitmapBox(&font.info, glyphIndex, scale, scale, &x0, &y0, &x1, &y1)
+ return
+}
+
+// get glyph and push to atlas if not exists
+__getGlyph :: proc(
+ ctx: ^FontContext,
+ font: ^Font,
+ codepoint: rune,
+ isize: i16,
+ blur: i16 = 0,
+) -> (res: ^Glyph, ok: bool) #no_bounds_check {
+ if isize < 2 {
+ return
+ }
+
+ // find code point and size
+ h := __hashint(u32(codepoint)) & (HASH_LUT_SIZE - 1)
+ for i := font.lut[h]; i != -1; /**/ {
+ glyph := &font.glyphs[i]
+
+ if glyph.codepoint == codepoint &&
+ glyph.isize == isize &&
+ glyph.blurSize == blur {
+ res = glyph
+ ok = true
+ return
+ }
+
+ i = glyph.next
+ }
+
+ // could not find glyph, create it.
+ render_font := font // font used to render
+ glyph_index := __getGlyph_index(font, codepoint)
+ if glyph_index == 0 {
+ // lookout for possible fallbacks
+ for i in 0.. 0 {
+ __blur(dst, int(gw), int(gh), ctx.width, blurSize)
+ }
+
+ ctx.dirtyRect[0] = f32(min(int(ctx.dirtyRect[0]), int(res.x0)))
+ ctx.dirtyRect[1] = f32(min(int(ctx.dirtyRect[1]), int(res.y0)))
+ ctx.dirtyRect[2] = f32(max(int(ctx.dirtyRect[2]), int(res.x1)))
+ ctx.dirtyRect[3] = f32(max(int(ctx.dirtyRect[3]), int(res.y1)))
+
+ ok = true
+ return
+}
+
+/////////////////////////////////
+// blur
+/////////////////////////////////
+
+// Based on Exponential blur, Jani Huhtanen, 2006
+
+BLUR_APREC :: 16
+BLUR_ZPREC :: 7
+
+__blurCols :: proc(dst: []u8, w, h, dstStride, alpha: int) {
+ dst := dst
+
+ for _ in 0..> BLUR_APREC
+ dst[x] = u8(z >> BLUR_ZPREC)
+ }
+
+ dst[w - 1] = 0 // force zero border
+ z = 0
+
+ for x := w - 2; x >= 0; x -= 1 {
+ z += (alpha * ((int(dst[x]) << BLUR_ZPREC) - z)) >> BLUR_APREC
+ dst[x] = u8(z >> BLUR_ZPREC)
+ }
+
+ dst[0] = 0 // force zero border
+ dst = dst[dstStride:] // advance slice
+ }
+}
+
+__blurRows :: proc(dst: []u8, w, h, dstStride, alpha: int) {
+ dst := dst
+
+ for _ in 0..> BLUR_APREC
+ dst[y] = u8(z >> BLUR_ZPREC)
+ }
+
+ dst[(h - 1) * dstStride] = 0 // force zero border
+ z = 0
+
+ for y := (h - 2) * dstStride; y >= 0; y -= dstStride {
+ z += (alpha * ((int(dst[y]) << BLUR_ZPREC) - z)) >> BLUR_APREC
+ dst[y] = u8(z >> BLUR_ZPREC)
+ }
+
+ dst[0] = 0 // force zero border
+ dst = dst[1:] // advance
+ }
+}
+
+__blur :: proc(dst: []u8, w, h, dstStride: int, blurSize: i16) {
+ assert(blurSize > 0)
+
+ // Calculate the alpha such that 90% of the kernel is within the radius. (Kernel extends to infinity)
+ sigma := f32(blurSize) * 0.57735 // 1 / sqrt(3)
+ alpha := int((1 << BLUR_APREC) * (1 - math.exp(-2.3 / (sigma + 1))))
+ __blurRows(dst, w, h, dstStride, alpha)
+ __blurCols(dst, w, h, dstStride, alpha)
+ __blurRows(dst, w, h, dstStride, alpha)
+ __blurCols(dst, w, h, dstStride, alpha)
+}
+
+/////////////////////////////////
+// Texture expansion
+/////////////////////////////////
+
+ExpandAtlas :: proc(ctx: ^FontContext, width, height: int, allocator := context.allocator) -> bool {
+ w := max(ctx.width, width)
+ h := max(ctx.height, height)
+
+ if w == ctx.width && h == ctx.height {
+ return true
+ }
+
+ if ctx.callbackResize != nil {
+ ctx.callbackResize(ctx.userData, w, h)
+ }
+
+ data := make([]byte, w * h, allocator)
+
+ for i in 0.. ctx.width {
+ mem.set(&data[i * w + ctx.width], 0, w - ctx.width)
+ }
+ }
+
+ if h > ctx.height {
+ mem.set(&data[ctx.height * w], 0, (h - ctx.height) * w)
+ }
+
+ delete(ctx.textureData)
+ ctx.textureData = data
+
+ // increase atlas size
+ __atlasExpand(ctx, w, h)
+
+ // add existing data as dirty
+ maxy := i16(0)
+ for node in ctx.nodes {
+ maxy = max(maxy, node.y)
+ }
+ ctx.dirtyRect[0] = 0
+ ctx.dirtyRect[1] = 0
+ ctx.dirtyRect[2] = f32(ctx.width)
+ ctx.dirtyRect[3] = f32(maxy)
+
+ ctx.width = w
+ ctx.height = h
+ ctx.itw = 1.0 / f32(w)
+ ctx.ith = 1.0 / f32(h)
+
+ return true
+}
+
+ResetAtlas :: proc(ctx: ^FontContext, width, height: int, allocator := context.allocator) -> bool {
+ if width == ctx.width && height == ctx.height {
+ // just clear
+ slice.zero(ctx.textureData)
+ } else {
+ // realloc
+ delete(ctx.textureData, allocator)
+ ctx.textureData = make([]byte, width * height, allocator)
+ }
+
+ ctx.dirtyRect[0] = f32(width)
+ ctx.dirtyRect[1] = f32(height)
+ ctx.dirtyRect[2] = 0
+ ctx.dirtyRect[3] = 0
+
+ // reset fonts
+ for &font in ctx.fonts {
+ clear(&font.glyphs)
+ __lutReset(&font)
+ }
+
+ ctx.width = width
+ ctx.height = height
+ ctx.itw = 1.0 / f32(width)
+ ctx.ith = 1.0 / f32(height)
+
+ _ = __AtlasAddWhiteRect(ctx, 2, 2)
+ return true
+}
+
+__getGlyph_index :: proc(font: ^Font, codepoint: rune) -> Glyph_Index {
+ return stbtt.FindGlyphIndex(&font.info, codepoint)
+}
+
+__getPixelHeightScale :: proc(font: ^Font, pixel_height: f32) -> f32 {
+ return stbtt.ScaleForPixelHeight(&font.info, pixel_height)
+}
+
+__getGlyphKernAdvance :: proc(font: ^Font, glyph1, glyph2: Glyph_Index) -> i32 {
+ return stbtt.GetGlyphKernAdvance(&font.info, glyph1, glyph2)
+}
+
+// get a font with bounds checking
+__getFont :: proc(ctx: ^FontContext, index: int, loc := #caller_location) -> ^Font #no_bounds_check {
+ runtime.bounds_check_error_loc(loc, index, len(ctx.fonts))
+ return &ctx.fonts[index]
+}
+
+// only useful for single glyphs where you quickly want the width
+CodepointWidth :: proc(
+ font: ^Font,
+ codepoint: rune,
+ scale: f32,
+) -> f32 {
+ glyph_index := __getGlyph_index(font, codepoint)
+ xadvance, lsb: i32
+ stbtt.GetGlyphHMetrics(&font.info, glyph_index, &xadvance, &lsb)
+ return f32(xadvance) * scale
+}
+
+// get top and bottom line boundary
+LineBounds :: proc(ctx: ^FontContext, y: f32) -> (miny, maxy: f32) {
+ state := __getState(ctx)
+ font := __getFont(ctx, state.font)
+ isize := i16(state.size * 10.0)
+ y := y
+ y += __getVerticalAlign(ctx, font, state.av, isize)
+
+ if ctx.location == .TOPLEFT {
+ miny = y - font.ascender * f32(isize) / 10
+ maxy = miny + font.lineHeight * f32(isize / 10)
+ } else if ctx.location == .BOTTOMLEFT {
+ miny = y + font.ascender * f32(isize) / 10
+ maxy = miny - font.lineHeight * f32(isize / 10)
+ }
+
+ return
+}
+
+// reset dirty rect
+__dirtyRectReset :: proc(using ctx: ^FontContext) {
+ dirtyRect[0] = f32(width)
+ dirtyRect[1] = f32(height)
+ dirtyRect[2] = 0
+ dirtyRect[3] = 0
+}
+
+// true when the dirty rectangle is valid and needs a texture update on the gpu
+ValidateTexture :: proc(using ctx: ^FontContext, dirty: ^[4]f32) -> bool {
+ if dirtyRect[0] < dirtyRect[2] && dirtyRect[1] < dirtyRect[3] {
+ dirty[0] = dirtyRect[0]
+ dirty[1] = dirtyRect[1]
+ dirty[2] = dirtyRect[2]
+ dirty[3] = dirtyRect[3]
+ __dirtyRectReset(ctx)
+ return true
+ }
+
+ return false
+}
+
+// get alignment based on font
+__getVerticalAlign :: proc(
+ ctx: ^FontContext,
+ font: ^Font,
+ av: AlignVertical,
+ pixelSize: i16,
+) -> (res: f32) {
+ switch ctx.location {
+ case .TOPLEFT:
+ switch av {
+ case .TOP: res = font.ascender * f32(pixelSize) / 10
+ case .MIDDLE: res = (font.ascender + font.descender) / 2 * f32(pixelSize) / 10
+ case .BASELINE: res = 0
+ case .BOTTOM: res = font.descender * f32(pixelSize) / 10
+ }
+
+ case .BOTTOMLEFT:
+ switch av {
+ case .TOP: res = -font.ascender * f32(pixelSize) / 10
+ case .MIDDLE: res = -(font.ascender + font.descender) / 2 * f32(pixelSize) / 10
+ case .BASELINE: res = 0
+ case .BOTTOM: res = -font.descender * f32(pixelSize) / 10
+ }
+ }
+
+ return
+}
+
+@(private)
+UTF8_ACCEPT :: 0
+
+@(private)
+UTF8_REJECT :: 1
+
+@(private)
+utf8d := [400]u8{
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 00..1f
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 20..3f
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 40..5f
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 60..7f
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, // 80..9f
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, // a0..bf
+ 8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, // c0..df
+ 0xa,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x4,0x3,0x3, // e0..ef
+ 0xb,0x6,0x6,0x6,0x5,0x8,0x8,0x8,0x8,0x8,0x8,0x8,0x8,0x8,0x8,0x8, // f0..ff
+ 0x0,0x1,0x2,0x3,0x5,0x8,0x7,0x1,0x1,0x1,0x4,0x6,0x1,0x1,0x1,0x1, // s0..s0
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,0,1,0,1,1,1,1,1,1, // s1..s2
+ 1,2,1,1,1,1,1,2,1,2,1,1,1,1,1,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1, // s3..s4
+ 1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,1,1,1,3,1,3,1,1,1,1,1,1, // s5..s6
+ 1,3,1,1,1,1,1,3,1,3,1,1,1,1,1,1,1,3,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // s7..s8
+}
+
+// decode codepoints from a state
+@(private)
+__decutf8 :: #force_inline proc(state: ^rune, codep: ^rune, b: byte) -> bool {
+ b := rune(b)
+ type := utf8d[b]
+ codep^ = (state^ != UTF8_ACCEPT) ? ((b & 0x3f) | (codep^ << 6)) : ((0xff >> type) & (b))
+ state^ = rune(utf8d[256 + state^ * 16 + rune(type)])
+ return state^ == UTF8_ACCEPT
+}
+
+// state used to share font options
+State :: struct {
+ font: int,
+ size: f32,
+ color: [4]u8,
+ spacing: f32,
+ blur: f32,
+
+ ah: AlignHorizontal,
+ av: AlignVertical,
+}
+
+// quad that should be used to draw from the texture atlas
+Quad :: struct {
+ x0, y0, s0, t0: f32,
+ x1, y1, s1, t1: f32,
+}
+
+// text iteration with custom settings
+TextIter :: struct {
+ x, y, nextx, nexty, scale, spacing: f32,
+ isize, iblur: i16,
+
+ font: ^Font,
+ previousGlyphIndex: Glyph_Index,
+
+ // unicode iteration
+ utf8state: rune, // utf8
+ codepoint: rune,
+ text: string,
+ codepointCount: int,
+
+ // byte indices
+ str: int,
+ next: int,
+ end: int,
+}
+
+// push a state, copies the current one over to the next one
+PushState :: proc(using ctx: ^FontContext, loc := #caller_location) #no_bounds_check {
+ runtime.bounds_check_error_loc(loc, state_count, MAX_STATES)
+
+ if state_count > 0 {
+ states[state_count] = states[state_count - 1]
+ }
+
+ state_count += 1
+}
+
+// pop a state
+PopState :: proc(using ctx: ^FontContext) {
+ if state_count <= 1 {
+ log.error("FONTSTASH: state underflow! to many pops were called")
+ } else {
+ state_count -= 1
+ }
+}
+
+// clear current state
+ClearState :: proc(ctx: ^FontContext) {
+ state := __getState(ctx)
+ state.size = 12
+ state.color = 255
+ state.blur = 0
+ state.spacing = 0
+ state.font = 0
+ state.ah = .LEFT
+ state.av = .BASELINE
+}
+
+__getState :: #force_inline proc(ctx: ^FontContext) -> ^State #no_bounds_check {
+ return &ctx.states[ctx.state_count - 1]
+}
+
+SetSize :: proc(ctx: ^FontContext, size: f32) {
+ __getState(ctx).size = size
+}
+
+SetColor :: proc(ctx: ^FontContext, color: [4]u8) {
+ __getState(ctx).color = color
+}
+
+SetSpacing :: proc(ctx: ^FontContext, spacing: f32) {
+ __getState(ctx).spacing = spacing
+}
+
+SetBlur :: proc(ctx: ^FontContext, blur: f32) {
+ __getState(ctx).blur = blur
+}
+
+SetFont :: proc(ctx: ^FontContext, font: int) {
+ __getState(ctx).font = font
+}
+
+SetAH :: SetAlignHorizontal
+SetAV :: SetAlignVertical
+
+SetAlignHorizontal :: proc(ctx: ^FontContext, ah: AlignHorizontal) {
+ __getState(ctx).ah = ah
+}
+
+SetAlignVertical :: proc(ctx: ^FontContext, av: AlignVertical) {
+ __getState(ctx).av = av
+}
+
+__getQuad :: proc(
+ ctx: ^FontContext,
+ font: ^Font,
+
+ previousGlyphIndex: i32,
+ glyph: ^Glyph,
+
+ scale: f32,
+ spacing: f32,
+
+ x, y: ^f32,
+ quad: ^Quad,
+) {
+ if previousGlyphIndex != -1 {
+ adv := f32(__getGlyphKernAdvance(font, previousGlyphIndex, glyph.index)) * scale
+ x^ += f32(int(adv + spacing + 0.5))
+ }
+
+ // fill props right
+ rx, ry, x0, y0, x1, y1, xoff, yoff: f32
+ xoff = f32(glyph.xoff + 1)
+ yoff = f32(glyph.yoff + 1)
+ x0 = f32(glyph.x0 + 1)
+ y0 = f32(glyph.y0 + 1)
+ x1 = f32(glyph.x1 - 1)
+ y1 = f32(glyph.y1 - 1)
+
+ switch ctx.location {
+ case .TOPLEFT:
+ rx = math.floor(x^ + xoff)
+ ry = math.floor(y^ + yoff)
+
+ quad.x0 = rx
+ quad.y0 = ry
+ quad.x1 = rx + x1 - x0
+ quad.y1 = ry + y1 - y0
+
+ quad.s0 = x0 * ctx.itw
+ quad.t0 = y0 * ctx.ith
+ quad.s1 = x1 * ctx.itw
+ quad.t1 = y1 * ctx.ith
+
+ case .BOTTOMLEFT:
+ rx = math.floor(x^ + xoff)
+ ry = math.floor(y^ - yoff)
+
+ quad.x0 = rx
+ quad.y0 = ry
+ quad.x1 = rx + x1 - x0
+ quad.y1 = ry - y1 + y0
+
+ quad.s0 = x0 * ctx.itw
+ quad.t0 = y0 * ctx.ith
+ quad.s1 = x1 * ctx.itw
+ quad.t1 = y1 * ctx.ith
+ }
+
+ x^ += f32(int(f32(glyph.xadvance) / 10 + 0.5))
+}
+
+// init text iter struct with settings
+TextIterInit :: proc(
+ ctx: ^FontContext,
+ x: f32,
+ y: f32,
+ text: string,
+) -> (res: TextIter) {
+
+ x, y := x, y
+
+ state := __getState(ctx)
+ res.font = __getFont(ctx, state.font)
+ res.isize = i16(f32(state.size) * 10)
+ res.iblur = i16(state.blur)
+ res.scale = __getPixelHeightScale(res.font, f32(res.isize) / 10)
+
+ // align horizontally
+ switch state.ah {
+ case .LEFT:
+ /**/
+ case .CENTER:
+ width := TextBounds(ctx, text, x, y, nil)
+ x = math.round(x - width * 0.5)
+ case .RIGHT:
+ width := TextBounds(ctx, text, x, y, nil)
+ x -= width
+ }
+
+ // align vertically
+ y = math.round(y + __getVerticalAlign(ctx, res.font, state.av, res.isize))
+
+ // set positions
+ res.x, res.nextx = x, x
+ res.y, res.nexty = y, y
+ res.previousGlyphIndex = -1
+ res.spacing = state.spacing
+ res.text = text
+
+ res.str = 0
+ res.next = 0
+ res.end = len(text)
+
+ return
+}
+
+// step through each codepoint
+TextIterNext :: proc(
+ ctx: ^FontContext,
+ iter: ^TextIter,
+ quad: ^Quad,
+) -> (ok: bool) {
+ str := iter.next
+ iter.str = iter.next
+
+ for str < iter.end {
+ defer str += 1
+
+ if __decutf8(&iter.utf8state, &iter.codepoint, iter.text[str]) {
+ iter.x = iter.nextx
+ iter.y = iter.nexty
+ iter.codepointCount += 1
+ if glyph, glyph_ok := __getGlyph(ctx, iter.font, iter.codepoint, iter.isize, iter.iblur); glyph_ok {
+ __getQuad(ctx, iter.font, iter.previousGlyphIndex, glyph, iter.scale, iter.spacing, &iter.nextx, &iter.nexty, quad)
+ iter.previousGlyphIndex = glyph.index
+ } else {
+ iter.previousGlyphIndex = -1
+ }
+ ok = true
+ break
+ }
+ }
+
+ iter.next = str
+ return
+}
+
+// width of a text line, optionally the full rect
+TextBounds :: proc(
+ ctx: ^FontContext,
+ text: string,
+ x: f32 = 0,
+ y: f32 = 0,
+ bounds: ^[4]f32 = nil,
+) -> f32 {
+ state := __getState(ctx)
+ isize := i16(state.size * 10)
+ iblur := i16(state.blur)
+ font := __getFont(ctx, state.font)
+
+ // bunch of state
+ x, y := x, y
+ minx, maxx := x, x
+ miny, maxy := y, y
+ start_x := x
+
+ // iterate
+ scale := __getPixelHeightScale(font, f32(isize) / 10)
+ previousGlyphIndex: Glyph_Index = -1
+ quad: Quad
+ utf8state: rune
+ codepoint: rune
+ for byte_offset in 0.. maxx {
+ maxx = quad.x1
+ }
+
+ if ctx.location == .TOPLEFT {
+ if quad.y0 < miny {
+ miny = quad.y0
+ }
+ if quad.y1 > maxy {
+ maxy = quad.y1
+ }
+ } else if ctx.location == .BOTTOMLEFT {
+ if quad.y1 < miny {
+ miny = quad.y1
+ }
+ if quad.y0 > maxy {
+ maxy = quad.y0
+ }
+ }
+
+ previousGlyphIndex = glyph.index
+ } else {
+ previousGlyphIndex = -1
+ }
+
+ }
+ }
+
+ // horizontal alignment
+ advance := x - start_x
+ switch state.ah {
+ case .LEFT:
+ /**/
+ case .CENTER:
+ minx -= advance * 0.5
+ maxx -= advance * 0.5
+ case .RIGHT:
+ minx -= advance
+ maxx -= advance
+ }
+
+ if bounds != nil {
+ bounds^ = { minx, miny, maxx, maxy }
+ }
+
+ return advance
+}
+
+VerticalMetrics :: proc(
+ ctx: ^FontContext,
+) -> (ascender, descender, lineHeight: f32) {
+ state := __getState(ctx)
+ isize := i16(state.size * 10.0)
+ font := __getFont(ctx, state.font)
+ ascender = font.ascender * f32(isize / 10)
+ descender = font.descender * f32(isize / 10)
+ lineHeight = font.lineHeight * f32(isize / 10)
+ return
+}
+
+// reset to single state
+BeginState :: proc(ctx: ^FontContext) {
+ ctx.state_count = 0
+ PushState(ctx)
+ ClearState(ctx)
+}
+
+// checks for texture updates after potential __getGlyph calls
+EndState :: proc(using ctx: ^FontContext) {
+ // check for texture update
+ if dirtyRect[0] < dirtyRect[2] && dirtyRect[1] < dirtyRect[3] {
+ if callbackUpdate != nil {
+ callbackUpdate(userData, dirtyRect, raw_data(textureData))
+ }
+ __dirtyRectReset(ctx)
+ }
+}
\ No newline at end of file
diff --git a/vendor/ggpo/ggpo.odin b/vendor/ggpo/ggpo.odin
index d17c33638..b38f4fb65 100644
--- a/vendor/ggpo/ggpo.odin
+++ b/vendor/ggpo/ggpo.odin
@@ -50,7 +50,7 @@ Player :: struct {
player_num: c.int,
using u: struct #raw_union {
local: struct {},
- remove: struct {
+ remote: struct {
ip_address: [32]byte,
port: u16,
},
diff --git a/vendor/nanovg/gl/frag.glsl b/vendor/nanovg/gl/frag.glsl
new file mode 100644
index 000000000..423214c9b
--- /dev/null
+++ b/vendor/nanovg/gl/frag.glsl
@@ -0,0 +1,123 @@
+#ifdef GL_ES
+#if defined(GL_FRAGMENT_PRECISION_HIGH) || defined(NANOVG_GL3)
+ precision highp float;
+#else
+ precision mediump float;
+#endif
+#endif
+#ifdef NANOVG_GL3
+#ifdef USE_UNIFORMBUFFER
+ layout(std140) uniform frag {
+ mat3 scissorMat;
+ mat3 paintMat;
+ vec4 innerCol;
+ vec4 outerCol;
+ vec2 scissorExt;
+ vec2 scissorScale;
+ vec2 extent;
+ float radius;
+ float feather;
+ float strokeMult;
+ float strokeThr;
+ int texType;
+ int type;
+ };
+#else // NANOVG_GL3 && !USE_UNIFORMBUFFER
+ uniform vec4 frag[UNIFORMARRAY_SIZE];
+#endif
+ uniform sampler2D tex;
+ in vec2 ftcoord;
+ in vec2 fpos;
+ out vec4 outColor;
+#else // !NANOVG_GL3
+ uniform vec4 frag[UNIFORMARRAY_SIZE];
+ uniform sampler2D tex;
+ varying vec2 ftcoord;
+ varying vec2 fpos;
+#endif
+#ifndef USE_UNIFORMBUFFER
+ #define scissorMat mat3(frag[0].xyz, frag[1].xyz, frag[2].xyz)
+ #define paintMat mat3(frag[3].xyz, frag[4].xyz, frag[5].xyz)
+ #define innerCol frag[6]
+ #define outerCol frag[7]
+ #define scissorExt frag[8].xy
+ #define scissorScale frag[8].zw
+ #define extent frag[9].xy
+ #define radius frag[9].z
+ #define feather frag[9].w
+ #define strokeMult frag[10].x
+ #define strokeThr frag[10].y
+ #define texType int(frag[10].z)
+ #define type int(frag[10].w)
+#endif
+
+float sdroundrect(vec2 pt, vec2 ext, float rad) {
+ vec2 ext2 = ext - vec2(rad,rad);
+ vec2 d = abs(pt) - ext2;
+ return min(max(d.x,d.y),0.0) + length(max(d,0.0)) - rad;
+}
+
+// Scissoring
+float scissorMask(vec2 p) {
+ vec2 sc = (abs((scissorMat * vec3(p,1.0)).xy) - scissorExt);
+ sc = vec2(0.5,0.5) - sc * scissorScale;
+ return clamp(sc.x,0.0,1.0) * clamp(sc.y,0.0,1.0);
+}
+#ifdef EDGE_AA
+// Stroke - from [0..1] to clipped pyramid, where the slope is 1px.
+float strokeMask() {
+ return min(1.0, (1.0-abs(ftcoord.x*2.0-1.0))*strokeMult) * min(1.0, ftcoord.y);
+}
+#endif
+
+void main(void) {
+ vec4 result;
+ float scissor = scissorMask(fpos);
+#ifdef EDGE_AA
+ float strokeAlpha = strokeMask();
+ if (strokeAlpha < strokeThr) discard;
+#else
+ float strokeAlpha = 1.0;
+#endif
+ if (type == 0) { // Gradient
+ // Calculate gradient color using box gradient
+ vec2 pt = (paintMat * vec3(fpos,1.0)).xy;
+ float d = clamp((sdroundrect(pt, extent, radius) + feather*0.5) / feather, 0.0, 1.0);
+ vec4 color = mix(innerCol,outerCol,d);
+ // Combine alpha
+ color *= strokeAlpha * scissor;
+ result = color;
+ } else if (type == 1) { // Image
+ // Calculate color fron texture
+ vec2 pt = (paintMat * vec3(fpos,1.0)).xy / extent;
+#ifdef NANOVG_GL3
+ vec4 color = texture(tex, pt);
+#else
+ vec4 color = texture2D(tex, pt);
+#endif
+ if (texType == 1) color = vec4(color.xyz*color.w,color.w);
+ if (texType == 2) color = vec4(color.x);
+ // Apply color tint and alpha.
+ color *= innerCol;
+ // Combine alpha
+ color *= strokeAlpha * scissor;
+ result = color;
+ } else if (type == 2) { // Stencil fill
+ result = vec4(1,1,1,1);
+ } else if (type == 3) { // Textured tris
+#ifdef NANOVG_GL3
+ vec4 color = texture(tex, ftcoord);
+#else
+ vec4 color = texture2D(tex, ftcoord);
+#endif
+ if (texType == 1) color = vec4(color.xyz*color.w,color.w);
+ if (texType == 2) color = vec4(color.x);
+ color *= scissor;
+ result = color * innerCol;
+ }
+#ifdef NANOVG_GL3
+ outColor = result;
+#else
+ gl_FragColor = result;
+#endif
+}
\ No newline at end of file
diff --git a/vendor/nanovg/gl/gl.odin b/vendor/nanovg/gl/gl.odin
new file mode 100644
index 000000000..50df6e1b5
--- /dev/null
+++ b/vendor/nanovg/gl/gl.odin
@@ -0,0 +1,1453 @@
+//+build windows, linux, darwin
+package nanovg_gl
+
+import "core:log"
+import "core:strings"
+import "core:mem"
+import "core:math"
+import "core:fmt"
+import gl "vendor:OpenGL"
+import nvg "../../nanovg"
+
+Color :: nvg.Color
+Vertex :: nvg.Vertex
+ImageFlags :: nvg.ImageFlags
+TextureType :: nvg.Texture
+Paint :: nvg.Paint
+ScissorT :: nvg.ScissorT
+
+CreateFlag :: enum {
+ // Flag indicating if geometry based anti-aliasing is used (may not be needed when using MSAA).
+ ANTI_ALIAS,
+ // Flag indicating if strokes should be drawn using stencil buffer. The rendering will be a little
+ // slower, but path overlaps (i.e. self-intersecting or sharp turns) will be drawn just once.
+ STENCIL_STROKES,
+ // additional debug checks
+ DEBUG,
+}
+CreateFlags :: bit_set[CreateFlag]
+
+USE_STATE_FILTER :: #config(USE_STATE_FILTER, true)
+
+UniformLoc :: enum {
+ VIEW_SIZE,
+ TEX,
+ FRAG,
+}
+
+ShaderType :: enum i32 {
+ FILL_GRAD,
+ FILL_IMG,
+ SIMPLE,
+ IMG,
+}
+
+Shader :: struct {
+ prog: u32,
+ frag: u32,
+ vert: u32,
+ loc: [UniformLoc]i32,
+}
+
+Texture :: struct {
+ id: int,
+ tex: u32,
+ width, height: int,
+ type: TextureType,
+ flags: ImageFlags,
+}
+
+Blend :: struct {
+ src_RGB: u32,
+ dst_RGB: u32,
+ src_alpha: u32,
+ dst_alpha: u32,
+}
+
+CallType :: enum {
+ NONE,
+ FILL,
+ CONVEX_FILL,
+ STROKE,
+ TRIANGLES,
+}
+
+Call :: struct {
+ type: CallType,
+ image: int,
+ pathOffset: int,
+ pathCount: int,
+ triangleOffset: int,
+ triangleCount: int,
+ uniformOffset: int,
+ blendFunc: Blend,
+}
+
+Path :: struct {
+ fillOffset: int,
+ fillCount: int,
+ strokeOffset: int,
+ strokeCount: int,
+}
+
+GL_UNIFORMARRAY_SIZE :: 11
+
+when GL2_IMPLEMENTATION {
+ FragUniforms :: struct #raw_union {
+ using _: struct {
+ scissorMat: [12]f32, // matrices are actually 3 vec4s
+ paintMat: [12]f32,
+ innerColor: Color,
+ outerColor: Color,
+ scissorExt: [2]f32,
+ scissorScale: [2]f32,
+ extent: [2]f32,
+ radius: f32,
+ feather: f32,
+ strokeMult: f32,
+ strokeThr: f32,
+ texType: i32,
+ type: ShaderType,
+ },
+ uniform_array: [GL_UNIFORMARRAY_SIZE][4]f32,
+ }
+} else {
+ FragUniforms :: struct #packed {
+ scissorMat: [12]f32, // matrices are actually 3 vec4s
+ paintMat: [12]f32,
+ innerColor: Color,
+ outerColor: Color,
+ scissorExt: [2]f32,
+ scissorScale: [2]f32,
+ extent: [2]f32,
+ radius: f32,
+ feather: f32,
+ strokeMult: f32,
+ strokeThr: f32,
+ texType: i32,
+ type: ShaderType,
+ }
+}
+
+DEFAULT_IMPLEMENTATION_STRING :: #config(NANOVG_GL_IMPL, "GL3")
+GL2_IMPLEMENTATION :: DEFAULT_IMPLEMENTATION_STRING == "GL2"
+GL3_IMPLEMENTATION :: DEFAULT_IMPLEMENTATION_STRING == "GL3"
+GLES2_IMPLEMENTATION :: DEFAULT_IMPLEMENTATION_STRING == "GLES2"
+GLES3_IMPLEMENTATION :: DEFAULT_IMPLEMENTATION_STRING == "GLES3"
+
+when GL2_IMPLEMENTATION {
+ GL2 :: true
+ GL3 :: false
+ GLES2 :: false
+ GLES3 :: false
+ GL_IMPLEMENTATION :: true
+ GL_USE_UNIFORMBUFFER :: false
+} else when GL3_IMPLEMENTATION {
+ GL2 :: false
+ GL3 :: true
+ GLES2 :: false
+ GLES3 :: false
+ GL_IMPLEMENTATION :: true
+ GL_USE_UNIFORMBUFFER :: true
+} else when GLES2_IMPLEMENTATION {
+ GL2 :: false
+ GL3 :: false
+ GLES2 :: true
+ GLES3 :: false
+ GL_IMPLEMENTATION :: true
+ GL_USE_UNIFORMBUFFER :: false
+} else when GLES3_IMPLEMENTATION {
+ GL2 :: false
+ GL3 :: false
+ GLES2 :: false
+ GLES3 :: true
+ GL_IMPLEMENTATION :: true
+ GL_USE_UNIFORMBUFFER :: false
+}
+
+Context :: struct {
+ shader: Shader,
+ textures: [dynamic]Texture,
+ view: [2]f32,
+ textureId: int,
+
+ vertBuf: u32,
+ vertArr: u32, // GL3
+ fragBuf: u32, // USE_UNIFORMBUFFER
+ fragSize: int,
+ flags: CreateFlags,
+ frag_binding: u32,
+
+ // Per frame buffers
+ calls: [dynamic]Call,
+ paths: [dynamic]Path,
+ verts: [dynamic]Vertex,
+ uniforms: [dynamic]byte,
+
+ // cached state used for state filter
+ boundTexture: u32,
+ stencilMask: u32,
+ stencilFunc: u32,
+ stencilFuncRef: i32,
+ stencilFuncMask: u32,
+ blendFunc: Blend,
+
+ dummyTex: int,
+}
+
+__nearestPow2 :: proc(num: uint) -> uint {
+ n := num > 0 ? num - 1 : 0
+ n |= n >> 1
+ n |= n >> 2
+ n |= n >> 4
+ n |= n >> 8
+ n |= n >> 16
+ n += 1
+ return n
+}
+
+__bindTexture :: proc(ctx: ^Context, tex: u32) {
+ when USE_STATE_FILTER {
+ if ctx.boundTexture != tex {
+ ctx.boundTexture = tex
+ gl.BindTexture(gl.TEXTURE_2D, tex)
+ }
+ } else {
+ gl.BindTexture(gl.TEXTURE_2D, tex)
+ }
+}
+
+__stencilMask :: proc(ctx: ^Context, mask: u32) {
+ when USE_STATE_FILTER {
+ if ctx.stencilMask != mask {
+ ctx.stencilMask = mask
+ gl.StencilMask(mask)
+ }
+ } else {
+ gl.StencilMask(mask)
+ }
+}
+
+__stencilFunc :: proc(ctx: ^Context, func: u32, ref: i32, mask: u32) {
+ when USE_STATE_FILTER {
+ if ctx.stencilFunc != func ||
+ ctx.stencilFuncRef != ref ||
+ ctx.stencilFuncMask != mask {
+ ctx.stencilFunc = func
+ ctx.stencilFuncRef = ref
+ ctx.stencilFuncMask = mask
+ gl.StencilFunc(func, ref, mask)
+ }
+ } else {
+ gl.StencilFunc(func, ref, mask)
+ }
+}
+
+__blendFuncSeparate :: proc(ctx: ^Context, blend: ^Blend) {
+ when USE_STATE_FILTER {
+ if ctx.blendFunc != blend^ {
+ ctx.blendFunc = blend^
+ gl.BlendFuncSeparate(blend.src_RGB, blend.dst_RGB, blend.src_alpha, blend.dst_alpha)
+ }
+ } else {
+ gl.BlendFuncSeparate(blend.src_RGB, blend.dst_RGB, blend.src_alpha, blend.dst_alpha)
+ }
+}
+
+__allocTexture :: proc(ctx: ^Context) -> (tex: ^Texture) {
+ for &texture in ctx.textures {
+ if texture.id == 0 {
+ tex = &texture
+ break
+ }
+ }
+
+ if tex == nil {
+ append(&ctx.textures, Texture {})
+ tex = &ctx.textures[len(ctx.textures) - 1]
+ }
+
+ tex^ = {}
+ ctx.textureId += 1
+ tex.id = ctx.textureId
+
+ return
+}
+
+__findTexture :: proc(ctx: ^Context, id: int) -> ^Texture {
+ for &texture in ctx.textures {
+ if texture.id == id {
+ return &texture
+ }
+ }
+
+ return nil
+}
+
+__deleteTexture :: proc(ctx: ^Context, id: int) -> bool {
+ for &texture, i in ctx.textures {
+ if texture.id == id {
+ if texture.tex != 0 && (.NO_DELETE not_in texture.flags) {
+ gl.DeleteTextures(1, &texture.tex)
+ }
+
+ ctx.textures[i] = {}
+ return true
+ }
+ }
+
+ return false
+}
+
+__deleteShader :: proc(shader: ^Shader) {
+ if shader.prog != 0 {
+ gl.DeleteProgram(shader.prog)
+ }
+
+ if shader.vert != 0 {
+ gl.DeleteShader(shader.vert)
+ }
+
+ if shader.frag != 0 {
+ gl.DeleteShader(shader.frag)
+ }
+}
+
+__getUniforms :: proc(shader: ^Shader) {
+ shader.loc[.VIEW_SIZE] = gl.GetUniformLocation(shader.prog, "viewSize")
+ shader.loc[.TEX] = gl.GetUniformLocation(shader.prog, "tex")
+
+ when GL_USE_UNIFORMBUFFER {
+ shader.loc[.FRAG] = i32(gl.GetUniformBlockIndex(shader.prog, "frag"))
+ } else {
+ shader.loc[.FRAG] = gl.GetUniformLocation(shader.prog, "frag")
+ }
+}
+
+vert_shader := #load("vert.glsl")
+frag_shader := #load("frag.glsl")
+
+__renderCreate :: proc(uptr: rawptr) -> bool {
+ ctx := cast(^Context) uptr
+
+ // just build the string at runtime
+ builder := strings.builder_make(0, 512, context.temp_allocator)
+
+ when GL2 {
+ strings.write_string(&builder, "#define NANOVG_GL2 1\n")
+ } else when GL3 {
+ strings.write_string(&builder, "#version 150 core\n#define NANOVG_GL3 1\n")
+ } else when GLES2 {
+ strings.write_string(&builder, "#version 100\n#define NANOVG_GL2 1\n")
+ } else when GLES3 {
+ strings.write_string(&builder, "#version 300 es\n#define NANOVG_GL3 1\n")
+ }
+
+ when GL_USE_UNIFORMBUFFER {
+ strings.write_string(&builder, "#define USE_UNIFORMBUFFER 1\n")
+ } else {
+ strings.write_string(&builder, "#define UNIFORMARRAY_SIZE 11\n")
+ }
+
+ __checkError(ctx, "init")
+
+ shader_header := strings.to_string(builder)
+ anti: string = .ANTI_ALIAS in ctx.flags ? "#define EDGE_AA 1\n" : " "
+ if !__createShader(
+ &ctx.shader,
+ shader_header,
+ anti,
+ string(vert_shader),
+ string(frag_shader),
+ ) {
+ return false
+ }
+
+ __checkError(ctx, "uniform locations")
+ __getUniforms(&ctx.shader)
+
+ when GL3 {
+ gl.GenVertexArrays(1, &ctx.vertArr)
+ }
+
+ gl.GenBuffers(1, &ctx.vertBuf)
+ align := i32(4)
+
+ when GL_USE_UNIFORMBUFFER {
+ // Create UBOs
+ gl.UniformBlockBinding(ctx.shader.prog, u32(ctx.shader.loc[.FRAG]), ctx.frag_binding)
+ gl.GenBuffers(1, &ctx.fragBuf)
+ gl.GetIntegerv(gl.UNIFORM_BUFFER_OFFSET_ALIGNMENT, &align)
+ }
+
+ ctx.fragSize = int(size_of(FragUniforms) + align - size_of(FragUniforms) % align)
+ // ctx.fragSize = size_of(FragUniforms)
+ ctx.dummyTex = __renderCreateTexture(ctx, .Alpha, 1, 1, {}, nil)
+
+ __checkError(ctx, "create done")
+
+ gl.Finish()
+
+ return true
+}
+
+__renderCreateTexture :: proc(
+ uptr: rawptr,
+ type: TextureType,
+ w, h: int,
+ imageFlags: ImageFlags,
+ data: []byte,
+) -> int {
+ ctx := cast(^Context) uptr
+ tex := __allocTexture(ctx)
+ imageFlags := imageFlags
+
+ if tex == nil {
+ return 0
+ }
+
+ when GLES2 {
+ if __nearestPow2(uint(w)) != uint(w) || __nearestPow2(uint(h)) != uint(h) {
+ // No repeat
+ if (.REPEAT_X in imageFlags) || (.REPEAT_Y in imageFlags) {
+ log.errorf("Repeat X/Y is not supported for non power-of-two textures (%d x %d)\n", w, h)
+ excl(&imageFlags, ImageFlags { .REPEAT_X, .REPEAT_Y })
+ }
+
+ // No mips.
+ if .GENERATE_MIPMAPS in imageFlags {
+ log.errorf("Mip-maps is not support for non power-of-two textures (%d x %d)\n", w, h)
+ excl(&imageFlags, ImageFlags { .GENERATE_MIPMAPS })
+ }
+ }
+ }
+
+ gl.GenTextures(1, &tex.tex)
+ tex.width = w
+ tex.height = h
+ tex.type = type
+ tex.flags = imageFlags
+ __bindTexture(ctx, tex.tex)
+
+ gl.PixelStorei(gl.UNPACK_ALIGNMENT,1)
+
+ when GLES2 {
+ gl.PixelStorei(gl.UNPACK_ROW_LENGTH, i32(tex.width))
+ gl.PixelStorei(gl.UNPACK_SKIP_PIXELS, 0)
+ gl.PixelStorei(gl.UNPACK_SKIP_ROWS, 0)
+ }
+
+ when GL2 {
+ if .GENERATE_MIPMAPS in imageFlags {
+ gl.TexParameteri(gl.TEXTURE_2D, gl.GENERATE_MIPMAP, 1)
+ }
+ }
+
+ if type == .RGBA {
+ gl.TexImage2D(gl.TEXTURE_2D, 0, gl.RGBA, i32(w), i32(h), 0, gl.RGBA, gl.UNSIGNED_BYTE, raw_data(data))
+ } else {
+ when GLES2 || GL2 {
+ gl.TexImage2D(gl.TEXTURE_2D, 0, gl.LUMINANCE, i32(w), i32(h), 0, gl.LUMINANCE, gl.UNSIGNED_BYTE, raw_data(data))
+ } else when GLES3 {
+ gl.TexImage2D(gl.TEXTURE_2D, 0, gl.R8, i32(w), i32(h), 0, gl.RED, gl.UNSIGNED_BYTE, raw_data(data))
+ } else {
+ gl.TexImage2D(gl.TEXTURE_2D, 0, gl.RED, i32(w), i32(h), 0, gl.RED, gl.UNSIGNED_BYTE, raw_data(data))
+ }
+ }
+
+ if .GENERATE_MIPMAPS in imageFlags {
+ if .NEAREST in imageFlags {
+ gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST_MIPMAP_NEAREST)
+ } else {
+ gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR_MIPMAP_LINEAR)
+ }
+ } else {
+ if .NEAREST in imageFlags {
+ gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST)
+ } else {
+ gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR)
+ }
+ }
+
+ if .NEAREST in imageFlags {
+ gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST)
+ } else {
+ gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR)
+ }
+
+ if .REPEAT_X in imageFlags {
+ gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.REPEAT)
+ } else {
+ gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE)
+ }
+
+ if .REPEAT_Y in imageFlags {
+ gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.REPEAT)
+ } else {
+ gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE)
+ }
+
+ gl.PixelStorei(gl.UNPACK_ALIGNMENT, 4)
+
+ when GLES2 {
+ gl.PixelStorei(gl.UNPACK_ROW_LENGTH, 0)
+ gl.PixelStorei(gl.UNPACK_SKIP_PIXELS, 0)
+ gl.PixelStorei(gl.UNPACK_SKIP_ROWS, 0)
+ }
+
+ // The new way to build mipmaps on GLES and GL3
+ when !GL2 {
+ if .GENERATE_MIPMAPS in imageFlags {
+ gl.GenerateMipmap(gl.TEXTURE_2D)
+ }
+ }
+
+ __checkError(ctx, "create tex")
+ __bindTexture(ctx, 0)
+
+ return tex.id
+}
+
+__checkError :: proc(ctx: ^Context, str: string) {
+ if .DEBUG in ctx.flags {
+ err := gl.GetError()
+
+ if err != gl.NO_ERROR {
+ log.errorf("FOUND ERROR %08x:\n\t%s\n", err, str)
+ }
+ }
+}
+
+__checkProgramError :: proc(prog: u32) {
+ status: i32
+ gl.GetProgramiv(prog, gl.LINK_STATUS, &status)
+ length: i32
+ gl.GetProgramiv(prog, gl.INFO_LOG_LENGTH, &length)
+
+ if status == 0 {
+ temp := make([]byte, length)
+ defer delete(temp)
+
+ gl.GetProgramInfoLog(prog, length, nil, raw_data(temp))
+ log.errorf("Program Error:\n%s\n", string(temp[:length]))
+ }
+}
+
+__checkShaderError :: proc(shader: u32, type: string) {
+ status: i32
+ gl.GetShaderiv(shader, gl.COMPILE_STATUS, &status)
+ length: i32
+ gl.GetShaderiv(shader, gl.INFO_LOG_LENGTH, &length)
+
+ if status == 0 {
+ temp := make([]byte, length)
+ defer delete(temp)
+
+ gl.GetShaderInfoLog(shader, length, nil, raw_data(temp))
+ log.errorf("Shader error:\n%s\n", string(temp[:length]))
+ }
+}
+
+// TODO good case for or_return
+__createShader :: proc(
+ shader: ^Shader,
+ header: string,
+ opts: string,
+ vshader: string,
+ fshader: string,
+) -> bool {
+ shader^ = {}
+ str: [3]cstring
+ lengths: [3]i32
+ str[0] = cstring(raw_data(header))
+ str[1] = cstring(raw_data(opts))
+
+ lengths[0] = i32(len(header))
+ lengths[1] = i32(len(opts))
+
+ prog := gl.CreateProgram()
+ vert := gl.CreateShader(gl.VERTEX_SHADER)
+ frag := gl.CreateShader(gl.FRAGMENT_SHADER)
+
+ // vert shader
+ str[2] = cstring(raw_data(vshader))
+ lengths[2] = i32(len(vshader))
+ gl.ShaderSource(vert, 3, &str[0], &lengths[0])
+ gl.CompileShader(vert)
+ __checkShaderError(vert, "vert")
+
+ // fragment shader
+ str[2] = cstring(raw_data(fshader))
+ lengths[2] = i32(len(fshader))
+ gl.ShaderSource(frag, 3, &str[0], &lengths[0])
+ gl.CompileShader(frag)
+ __checkShaderError(frag, "frag")
+
+ gl.AttachShader(prog, vert)
+ gl.AttachShader(prog, frag)
+
+ gl.BindAttribLocation(prog, 0, "vertex")
+ gl.BindAttribLocation(prog, 1, "tcoord")
+
+ gl.LinkProgram(prog)
+ __checkProgramError(prog)
+
+ shader.prog = prog
+ shader.vert = vert
+ shader.frag = frag
+ return true
+}
+
+__renderDeleteTexture :: proc(uptr: rawptr, image: int) -> bool {
+ ctx := cast(^Context) uptr
+ return __deleteTexture(ctx, image)
+}
+
+__renderUpdateTexture :: proc(
+ uptr: rawptr,
+ image: int,
+ x, y: int,
+ w, h: int,
+ data: []byte,
+) -> bool {
+ ctx := cast(^Context) uptr
+ tex := __findTexture(ctx, image)
+
+ if tex == nil {
+ return false
+ }
+
+ __bindTexture(ctx, tex.tex)
+
+ gl.PixelStorei(gl.UNPACK_ALIGNMENT,1)
+
+ x := x
+ w := w
+ data := data
+
+ when GLES2 {
+ gl.PixelStorei(gl.UNPACK_ROW_LENGTH, i32(tex.width))
+ gl.PixelStorei(gl.UNPACK_SKIP_PIXELS, i32(x))
+ gl.PixelStorei(gl.UNPACK_SKIP_ROWS, i32(y))
+ } else {
+ // No support for all of skip, need to update a whole row at a time.
+ if tex.type == .RGBA {
+ data = data[y * tex.width * 4:]
+ } else {
+ data = data[y * tex.width:]
+ }
+
+ x = 0
+ w = tex.width
+ }
+
+ if tex.type == .RGBA {
+ gl.TexSubImage2D(gl.TEXTURE_2D, 0, i32(x), i32(y), i32(w), i32(h), gl.RGBA, gl.UNSIGNED_BYTE, raw_data(data))
+ } else {
+ when GLES2 || GL2 {
+ gl.TexSubImage2D(gl.TEXTURE_2D, 0, i32(x), i32(y), i32(w), i32(h), gl.LUMINANCE, gl.UNSIGNED_BYTE, raw_data(data))
+ } else {
+ gl.TexSubImage2D(gl.TEXTURE_2D, 0, i32(x), i32(y), i32(w), i32(h), gl.RED, gl.UNSIGNED_BYTE, raw_data(data))
+ }
+ }
+
+ gl.PixelStorei(gl.UNPACK_ALIGNMENT, 4)
+
+ when GLES2 {
+ gl.PixelStorei(gl.UNPACK_ROW_LENGTH, 0)
+ gl.PixelStorei(gl.UNPACK_SKIP_PIXELS, 0)
+ gl.PixelStorei(gl.UNPACK_SKIP_ROWS, 0)
+ }
+
+ __bindTexture(ctx, 0)
+
+ return true
+}
+
+__renderGetTextureSize :: proc(uptr: rawptr, image: int, w, h: ^int) -> bool {
+ ctx := cast(^Context) uptr
+ tex := __findTexture(ctx, image)
+
+ if tex == nil {
+ return false
+ }
+
+ w^ = tex.width
+ h^ = tex.height
+ return true
+}
+
+__xformToMat3x4 :: proc(m3: ^[12]f32, t: [6]f32) {
+ m3[0] = t[0]
+ m3[1] = t[1]
+ m3[2] = 0
+ m3[3] = 0
+ m3[4] = t[2]
+ m3[5] = t[3]
+ m3[6] = 0
+ m3[7] = 0
+ m3[8] = t[4]
+ m3[9] = t[5]
+ m3[10] = 1
+ m3[11] = 0
+}
+
+__premulColor :: proc(c: Color) -> (res: Color) {
+ res = c
+ res.r *= c.a
+ res.g *= c.a
+ res.b *= c.a
+ return
+}
+
+__convertPaint :: proc(
+ ctx: ^Context,
+ frag: ^FragUniforms,
+ paint: ^Paint,
+ scissor: ^ScissorT,
+ width: f32,
+ fringe: f32,
+ strokeThr: f32,
+) -> bool {
+ invxform: [6]f32
+ frag^ = {}
+ frag.innerColor = __premulColor(paint.innerColor)
+ frag.outerColor = __premulColor(paint.outerColor)
+
+ if scissor.extent[0] < -0.5 || scissor.extent[1] < -0.5 {
+ frag.scissorMat = {}
+ frag.scissorExt[0] = 1.0
+ frag.scissorExt[1] = 1.0
+ frag.scissorScale[0] = 1.0
+ frag.scissorScale[1] = 1.0
+ } else {
+ nvg.TransformInverse(&invxform, scissor.xform)
+ __xformToMat3x4(&frag.scissorMat, invxform)
+ frag.scissorExt[0] = scissor.extent[0]
+ frag.scissorExt[1] = scissor.extent[1]
+ frag.scissorScale[0] = math.sqrt(scissor.xform[0]*scissor.xform[0] + scissor.xform[2]*scissor.xform[2]) / fringe
+ frag.scissorScale[1] = math.sqrt(scissor.xform[1]*scissor.xform[1] + scissor.xform[3]*scissor.xform[3]) / fringe
+ }
+
+ frag.extent = paint.extent
+ frag.strokeMult = (width * 0.5 + fringe * 0.5) / fringe
+ frag.strokeThr = strokeThr
+
+ if paint.image != 0 {
+ tex := __findTexture(ctx, paint.image)
+
+ if tex == nil {
+ return false
+ }
+
+ // TODO maybe inversed?
+ if .FLIP_Y in tex.flags {
+ m1: [6]f32
+ m2: [6]f32
+ nvg.TransformTranslate(&m1, 0.0, frag.extent[1] * 0.5)
+ nvg.TransformMultiply(&m1, paint.xform)
+ nvg.TransformScale(&m2, 1.0, -1.0)
+ nvg.TransformMultiply(&m2, m1)
+ nvg.TransformTranslate(&m1, 0.0, -frag.extent[1] * 0.5)
+ nvg.TransformMultiply(&m1, m2)
+ nvg.TransformInverse(&invxform, m1)
+ } else {
+ nvg.TransformInverse(&invxform, paint.xform)
+ }
+
+ frag.type = .FILL_IMG
+
+ when GL_USE_UNIFORMBUFFER {
+ if tex.type == .RGBA {
+ frag.texType = (.PREMULTIPLIED in tex.flags) ? 0 : 1
+ } else {
+ frag.texType = 2
+ }
+ } else {
+ if tex.type == .RGBA {
+ frag.texType = (.PREMULTIPLIED in tex.flags) ? 0.0 : 1.0
+ } else {
+ frag.texType = 2.0
+ }
+ }
+ } else {
+ frag.type = .FILL_GRAD
+ frag.radius = paint.radius
+ frag.feather = paint.feather
+ nvg.TransformInverse(&invxform, paint.xform)
+ }
+
+ __xformToMat3x4(&frag.paintMat, invxform)
+
+ return true
+}
+
+__setUniforms :: proc(ctx: ^Context, uniformOffset: int, image: int) {
+ when GL_USE_UNIFORMBUFFER {
+ gl.BindBufferRange(gl.UNIFORM_BUFFER, ctx.frag_binding, ctx.fragBuf, uniformOffset, size_of(FragUniforms))
+ } else {
+ frag := __fragUniformPtr(ctx, uniformOffset)
+ gl.Uniform4fv(ctx.shader.loc[.FRAG], GL_UNIFORMARRAY_SIZE, cast(^f32) frag)
+ }
+
+ __checkError(ctx, "uniform4")
+
+ tex: ^Texture
+ if image != 0 {
+ tex = __findTexture(ctx, image)
+ }
+
+ // If no image is set, use empty texture
+ if tex == nil {
+ tex = __findTexture(ctx, ctx.dummyTex)
+ }
+
+ __bindTexture(ctx, tex != nil ? tex.tex : 0)
+ __checkError(ctx, "tex paint tex")
+}
+
+__renderViewport :: proc(uptr: rawptr, width, height, devicePixelRatio: f32) {
+ ctx := cast(^Context) uptr
+ ctx.view[0] = width
+ ctx.view[1] = height
+}
+
+__fill :: proc(ctx: ^Context, call: ^Call) {
+ paths := ctx.paths[call.pathOffset:]
+
+ // Draw shapes
+ gl.Enable(gl.STENCIL_TEST)
+ __stencilMask(ctx, 0xff)
+ __stencilFunc(ctx, gl.ALWAYS, 0, 0xff)
+ gl.ColorMask(gl.FALSE, gl.FALSE, gl.FALSE, gl.FALSE)
+
+ // set bindpoint for solid loc
+ __setUniforms(ctx, call.uniformOffset, 0)
+ __checkError(ctx, "fill simple")
+
+ gl.StencilOpSeparate(gl.FRONT, gl.KEEP, gl.KEEP, gl.INCR_WRAP)
+ gl.StencilOpSeparate(gl.BACK, gl.KEEP, gl.KEEP, gl.DECR_WRAP)
+ gl.Disable(gl.CULL_FACE)
+ for i in 0.. 0 {
+ gl.DrawArrays(gl.TRIANGLE_STRIP, i32(paths[i].strokeOffset), i32(paths[i].strokeCount))
+ }
+ }
+}
+
+__stroke :: proc(ctx: ^Context, call: ^Call) {
+ paths := ctx.paths[call.pathOffset:]
+
+ if .STENCIL_STROKES in ctx.flags {
+ gl.Enable(gl.STENCIL_TEST)
+ __stencilMask(ctx, 0xff)
+
+ // Fill the stroke base without overlap
+ __stencilFunc(ctx, gl.EQUAL, 0x0, 0xff)
+ gl.StencilOp(gl.KEEP, gl.KEEP, gl.INCR)
+ __setUniforms(ctx, call.uniformOffset + ctx.fragSize, call.image)
+ __checkError(ctx, "stroke fill 0")
+
+ for i in 0.. Blend {
+ table := BLEND_FACTOR_TABLE
+ blend := Blend {
+ table[op.srcRGB],
+ table[op.dstRGB],
+ table[op.srcAlpha],
+ table[op.dstAlpha],
+ }
+ return blend
+}
+
+__renderFlush :: proc(uptr: rawptr) {
+ ctx := cast(^Context) uptr
+
+ if len(ctx.calls) > 0 {
+ // Setup require GL state.
+ gl.UseProgram(ctx.shader.prog)
+
+ gl.Enable(gl.CULL_FACE)
+ gl.CullFace(gl.BACK)
+ gl.FrontFace(gl.CCW)
+ gl.Enable(gl.BLEND)
+ gl.Disable(gl.DEPTH_TEST)
+ gl.Disable(gl.SCISSOR_TEST)
+ gl.ColorMask(gl.TRUE, gl.TRUE, gl.TRUE, gl.TRUE)
+ gl.StencilMask(0xffffffff)
+ gl.StencilOp(gl.KEEP, gl.KEEP, gl.KEEP)
+ gl.StencilFunc(gl.ALWAYS, 0, 0xffffffff)
+ gl.ActiveTexture(gl.TEXTURE0)
+ gl.BindTexture(gl.TEXTURE_2D, 0)
+
+ when USE_STATE_FILTER {
+ ctx.boundTexture = 0
+ ctx.stencilMask = 0xffffffff
+ ctx.stencilFunc = gl.ALWAYS
+ ctx.stencilFuncRef = 0
+ ctx.stencilFuncMask = 0xffffffff
+ ctx.blendFunc.src_RGB = gl.INVALID_ENUM
+ ctx.blendFunc.src_alpha = gl.INVALID_ENUM
+ ctx.blendFunc.dst_RGB = gl.INVALID_ENUM
+ ctx.blendFunc.dst_alpha = gl.INVALID_ENUM
+ }
+
+ when GL_USE_UNIFORMBUFFER {
+ // Upload ubo for frag shaders
+ gl.BindBuffer(gl.UNIFORM_BUFFER, ctx.fragBuf)
+ gl.BufferData(gl.UNIFORM_BUFFER, len(ctx.uniforms), raw_data(ctx.uniforms), gl.STREAM_DRAW)
+ }
+
+ // Upload vertex data
+ when GL3 {
+ gl.BindVertexArray(ctx.vertArr)
+ }
+
+ gl.BindBuffer(gl.ARRAY_BUFFER, ctx.vertBuf)
+ gl.BufferData(gl.ARRAY_BUFFER, len(ctx.verts) * size_of(Vertex), raw_data(ctx.verts), gl.STREAM_DRAW)
+ gl.EnableVertexAttribArray(0)
+ gl.EnableVertexAttribArray(1)
+ gl.VertexAttribPointer(0, 2, gl.FLOAT, gl.FALSE, size_of(Vertex), 0)
+ gl.VertexAttribPointer(1, 2, gl.FLOAT, gl.FALSE, size_of(Vertex), 2 * size_of(f32))
+
+ // Set view and texture just once per frame.
+ gl.Uniform1i(ctx.shader.loc[.TEX], 0)
+ gl.Uniform2fv(ctx.shader.loc[.VIEW_SIZE], 1, &ctx.view[0])
+
+ when GL_USE_UNIFORMBUFFER {
+ gl.BindBuffer(gl.UNIFORM_BUFFER, ctx.fragBuf)
+ }
+
+ for i in 0.. (count: int) {
+ for i in 0.. ^Call {
+ append(&ctx.calls, Call {})
+ return &ctx.calls[len(ctx.calls) - 1]
+}
+
+// alloc paths and return the original start position
+__allocPaths :: proc(ctx: ^Context, count: int) -> int {
+ old := len(ctx.paths)
+ resize(&ctx.paths, len(ctx.paths) + count)
+ return old
+}
+
+// alloc verts and return the original start position
+__allocVerts :: proc(ctx: ^Context, count: int) -> int {
+ old := len(ctx.verts)
+ resize(&ctx.verts, len(ctx.verts) + count)
+ return old
+}
+
+// alloc uniforms and return the original start position
+__allocFragUniforms :: proc(ctx: ^Context, count: int) -> int {
+ ret := len(ctx.uniforms)
+ resize(&ctx.uniforms, len(ctx.uniforms) + count * ctx.fragSize)
+ return ret
+}
+
+// get frag uniforms from byte slice offset
+__fragUniformPtr :: proc(ctx: ^Context, offset: int) -> ^FragUniforms {
+ return cast(^FragUniforms) &ctx.uniforms[offset]
+}
+
+///////////////////////////////////////////////////////////
+// CALLBACKS
+///////////////////////////////////////////////////////////
+
+__renderFill :: proc(
+ uptr: rawptr,
+ paint: ^nvg.Paint,
+ compositeOperation: nvg.CompositeOperationState,
+ scissor: ^ScissorT,
+ fringe: f32,
+ bounds: [4]f32,
+ paths: []nvg.Path,
+) {
+ ctx := cast(^Context) uptr
+ call := __allocCall(ctx)
+
+ call.type = .FILL
+ call.triangleCount = 4
+ call.pathOffset = __allocPaths(ctx, len(paths))
+ call.pathCount = len(paths)
+ call.image = paint.image
+ call.blendFunc = __blendCompositeOperation(compositeOperation)
+
+ if len(paths) == 1 && paths[0].convex {
+ call.type = .CONVEX_FILL
+ call.triangleCount = 0
+ }
+
+ // allocate vertices for all the paths
+ maxverts := __maxVertCount(paths) + call.triangleCount
+ offset := __allocVerts(ctx, maxverts)
+
+ for i in 0.. 0 {
+ copy.fillOffset = offset
+ copy.fillCount = len(path.fill)
+ mem.copy(&ctx.verts[offset], &path.fill[0], size_of(Vertex) * len(path.fill))
+ offset += len(path.fill)
+ }
+
+ if len(path.stroke) > 0 {
+ copy.strokeOffset = offset
+ copy.strokeCount = len(path.stroke)
+ mem.copy(&ctx.verts[offset], &path.stroke[0], size_of(Vertex) * len(path.stroke))
+ offset += len(path.stroke)
+ }
+ }
+
+ // setup uniforms for draw calls
+ if call.type == .FILL {
+ // quad
+ call.triangleOffset = offset
+ quad := ctx.verts[call.triangleOffset:call.triangleOffset+4]
+ quad[0] = { bounds[2], bounds[3], 0.5, 1 }
+ quad[1] = { bounds[2], bounds[1], 0.5, 1 }
+ quad[2] = { bounds[0], bounds[3], 0.5, 1 }
+ quad[3] = { bounds[0], bounds[1], 0.5, 1 }
+
+ // simple shader for stencil
+ call.uniformOffset = __allocFragUniforms(ctx, 2)
+ frag := __fragUniformPtr(ctx, call.uniformOffset)
+ frag^ = {}
+ frag.strokeThr = -1
+ frag.type = .SIMPLE
+
+ // fill shader
+ __convertPaint(
+ ctx,
+ __fragUniformPtr(ctx, call.uniformOffset + ctx.fragSize),
+ paint,
+ scissor,
+ fringe,
+ fringe,
+ -1,
+ )
+ } else {
+ call.uniformOffset = __allocFragUniforms(ctx, 1)
+ // fill shader
+ __convertPaint(
+ ctx,
+ __fragUniformPtr(ctx, call.uniformOffset),
+ paint,
+ scissor,
+ fringe,
+ fringe,
+ -1,
+ )
+ }
+}
+
+__renderStroke :: proc(
+ uptr: rawptr,
+ paint: ^Paint,
+ compositeOperation: nvg.CompositeOperationState,
+ scissor: ^ScissorT,
+ fringe: f32,
+ strokeWidth: f32,
+ paths: []nvg.Path,
+) {
+ ctx := cast(^Context) uptr
+ call := __allocCall(ctx)
+
+ call.type = .STROKE
+ call.pathOffset = __allocPaths(ctx, len(paths))
+ call.pathCount = len(paths)
+ call.image = paint.image
+ call.blendFunc = __blendCompositeOperation(compositeOperation)
+
+ // allocate vertices for all the paths
+ maxverts := __maxVertCount(paths)
+ offset := __allocVerts(ctx, maxverts)
+
+ for i in 0.. ^nvg.Context {
+ ctx := new(Context)
+ params: nvg.Params
+ params.renderCreate = __renderCreate
+ params.renderCreateTexture = __renderCreateTexture
+ params.renderDeleteTexture = __renderDeleteTexture
+ params.renderUpdateTexture = __renderUpdateTexture
+ params.renderGetTextureSize = __renderGetTextureSize
+ params.renderViewport = __renderViewport
+ params.renderCancel = __renderCancel
+ params.renderFlush = __renderFlush
+ params.renderFill = __renderFill
+ params.renderStroke = __renderStroke
+ params.renderTriangles = __renderTriangles
+ params.renderDelete = __renderDelete
+ params.userPtr = ctx
+ params.edgeAntiAlias = (.ANTI_ALIAS in flags)
+ ctx.flags = flags
+ return nvg.CreateInternal(params)
+}
+
+Destroy :: proc(ctx: ^nvg.Context) {
+ nvg.DeleteInternal(ctx)
+}
+
+CreateImageFromHandle :: proc(ctx: ^nvg.Context, textureId: u32, w, h: int, imageFlags: ImageFlags) -> int {
+ gctx := cast(^Context) ctx.params.userPtr
+ tex := __allocTexture(gctx)
+ tex.type = .RGBA
+ tex.tex = textureId
+ tex.flags = imageFlags
+ tex.width = w
+ tex.height = h
+ return tex.id
+}
+
+ImageHandle :: proc(ctx: ^nvg.Context, textureId: int) -> u32 {
+ gctx := cast(^Context) ctx.params.userPtr
+ tex := __findTexture(gctx, textureId)
+ return tex.tex
+}
+
+// framebuffer additional
+
+framebuffer :: struct {
+ ctx: ^nvg.Context,
+ fbo: u32,
+ rbo: u32,
+ texture: u32,
+ image: int,
+}
+
+DEFAULT_FBO :: 100_000
+defaultFBO := i32(DEFAULT_FBO)
+
+// helper function to create GL frame buffer to render to
+BindFramebuffer :: proc(fb: ^framebuffer) {
+ if defaultFBO == DEFAULT_FBO {
+ gl.GetIntegerv(gl.FRAMEBUFFER_BINDING, &defaultFBO)
+ }
+ gl.BindFramebuffer(gl.FRAMEBUFFER, fb != nil ? fb.fbo : u32(defaultFBO))
+}
+
+CreateFramebuffer :: proc(ctx: ^nvg.Context, w, h: int, imageFlags: ImageFlags) -> (fb: framebuffer) {
+ tempFBO: i32
+ tempRBO: i32
+ gl.GetIntegerv(gl.FRAMEBUFFER_BINDING, &tempFBO)
+ gl.GetIntegerv(gl.RENDERBUFFER_BINDING, &tempRBO)
+
+ imageFlags := imageFlags
+ incl(&imageFlags, ImageFlags { .FLIP_Y, .PREMULTIPLIED })
+ fb.image = nvg.CreateImageRGBA(ctx, w, h, imageFlags, nil)
+ fb.texture = ImageHandle(ctx, fb.image)
+ fb.ctx = ctx
+
+ // frame buffer object
+ gl.GenFramebuffers(1, &fb.fbo)
+ gl.BindFramebuffer(gl.FRAMEBUFFER, fb.fbo)
+
+ // render buffer object
+ gl.GenRenderbuffers(1, &fb.rbo)
+ gl.BindRenderbuffer(gl.RENDERBUFFER, fb.rbo)
+ gl.RenderbufferStorage(gl.RENDERBUFFER, gl.STENCIL_INDEX8, i32(w), i32(h))
+
+ // combine all
+ gl.FramebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, fb.texture, 0)
+ gl.FramebufferRenderbuffer(gl.FRAMEBUFFER, gl.STENCIL_ATTACHMENT, gl.RENDERBUFFER, fb.rbo)
+
+ if gl.CheckFramebufferStatus(gl.FRAMEBUFFER) != gl.FRAMEBUFFER_COMPLETE {
+// #ifdef gl.DEPTH24_STENCIL8
+ // If gl.STENCIL_INDEX8 is not supported, try gl.DEPTH24_STENCIL8 as a fallback.
+ // Some graphics cards require a depth buffer along with a stencil.
+ gl.RenderbufferStorage(gl.RENDERBUFFER, gl.DEPTH24_STENCIL8, i32(w), i32(h))
+ gl.FramebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, fb.texture, 0)
+ gl.FramebufferRenderbuffer(gl.FRAMEBUFFER, gl.STENCIL_ATTACHMENT, gl.RENDERBUFFER, fb.rbo)
+
+ if gl.CheckFramebufferStatus(gl.FRAMEBUFFER) != gl.FRAMEBUFFER_COMPLETE {
+ fmt.eprintln("ERROR")
+ }
+// #endif // gl.DEPTH24_STENCIL8
+// goto error
+ }
+
+ gl.BindFramebuffer(gl.FRAMEBUFFER, u32(tempFBO))
+ gl.BindRenderbuffer(gl.RENDERBUFFER, u32(tempRBO))
+ return
+}
+
+DeleteFramebuffer :: proc(fb: ^framebuffer) {
+ if fb == nil {
+ return
+ }
+
+ if fb.fbo != 0 {
+ gl.DeleteFramebuffers(1, &fb.fbo)
+ }
+
+ if fb.rbo != 0 {
+ gl.DeleteRenderbuffers(1, &fb.rbo)
+ }
+
+ if fb.image >= 0 {
+ nvg.DeleteImage(fb.ctx, fb.image)
+ }
+
+ fb.ctx = nil
+ fb.fbo = 0
+ fb.rbo = 0
+ fb.texture = 0
+ fb.image = -1
+}
\ No newline at end of file
diff --git a/vendor/nanovg/gl/vert.glsl b/vendor/nanovg/gl/vert.glsl
new file mode 100644
index 000000000..f937da09a
--- /dev/null
+++ b/vendor/nanovg/gl/vert.glsl
@@ -0,0 +1,19 @@
+#ifdef NANOVG_GL3
+ uniform vec2 viewSize;
+ in vec2 vertex;
+ in vec2 tcoord;
+ out vec2 ftcoord;
+ out vec2 fpos;
+#else
+ uniform vec2 viewSize;
+ attribute vec2 vertex;
+ attribute vec2 tcoord;
+ varying vec2 ftcoord;
+ varying vec2 fpos;
+#endif
+
+void main(void) {
+ ftcoord = tcoord;
+ fpos = vertex;
+ gl_Position = vec4(2.0*vertex.x/viewSize.x - 1.0, 1.0 - 2.0*vertex.y/viewSize.y, 0, 1);
+}
\ No newline at end of file
diff --git a/vendor/nanovg/nanovg.odin b/vendor/nanovg/nanovg.odin
new file mode 100644
index 000000000..8d30a407d
--- /dev/null
+++ b/vendor/nanovg/nanovg.odin
@@ -0,0 +1,3443 @@
+//+build windows, linux, darwin
+package nanovg
+
+// TODO rename structs to old nanovg style!
+// TODO rename enums to old nanovg style!
+
+import "core:mem"
+import "core:math"
+import "core:fmt"
+import "../fontstash"
+import stbi "vendor:stb/image"
+
+AlignVertical :: fontstash.AlignVertical
+AlignHorizontal :: fontstash.AlignHorizontal
+
+INIT_FONTIMAGE_SIZE :: 512
+MAX_FONTIMAGE_SIZE :: 2048
+MAX_FONTIMAGES :: 4
+
+MAX_STATES :: 32
+INIT_COMMANDS_SIZE :: 256
+INIT_POINTS_SIZE :: 128
+INIT_PATH_SIZE :: 16
+INIT_VERTS_SIZE :: 26
+KAPPA :: 0.5522847493
+
+Color :: [4]f32
+Matrix :: [6]f32
+Vertex :: [4]f32 // x,y,u,v
+
+ImageFlag :: enum {
+ GENERATE_MIPMAPS,
+ REPEAT_X,
+ REPEAT_Y,
+ FLIP_Y,
+ PREMULTIPLIED,
+ NEAREST,
+ NO_DELETE,
+}
+ImageFlags :: bit_set[ImageFlag]
+
+Paint :: struct {
+ xform: Matrix,
+ extent: [2]f32,
+ radius: f32,
+ feather: f32,
+ innerColor: Color,
+ outerColor: Color,
+ image: int,
+}
+
+Winding :: enum {
+ CCW = 1,
+ CW,
+}
+
+Solidity :: enum {
+ SOLID = 1, // CCW
+ HOLE, // CW
+}
+
+LineCapType :: enum {
+ BUTT,
+ ROUND,
+ SQUARE,
+ BEVEL,
+ MITER,
+}
+
+BlendFactor :: enum {
+ ZERO,
+ ONE,
+ SRC_COLOR,
+ ONE_MINUS_SRC_COLOR,
+ DST_COLOR,
+ ONE_MINUS_DST_COLOR,
+ SRC_ALPHA,
+ ONE_MINUS_SRC_ALPHA,
+ DST_ALPHA,
+ ONE_MINUS_DST_ALPHA,
+ SRC_ALPHA_SATURATE,
+}
+
+CompositeOperation :: enum {
+ SOURCE_OVER,
+ SOURCE_IN,
+ SOURCE_OUT,
+ ATOP,
+ DESTINATION_OVER,
+ DESTINATION_IN,
+ DESTINATION_OUT,
+ DESTINATION_ATOP,
+ LIGHTER,
+ COPY,
+ XOR,
+}
+
+CompositeOperationState :: struct {
+ srcRGB: BlendFactor,
+ dstRGB: BlendFactor,
+ srcAlpha: BlendFactor,
+ dstAlpha: BlendFactor,
+}
+
+// render data structures
+
+Texture :: enum {
+ Alpha,
+ RGBA,
+}
+
+ScissorT :: struct {
+ xform: Matrix,
+ extent: [2]f32,
+}
+
+Commands :: enum {
+ MOVE_TO,
+ LINE_TO,
+ BEZIER_TO,
+ CLOSE,
+ WINDING,
+}
+
+PointFlag :: enum {
+ CORNER,
+ LEFT,
+ BEVEL,
+ INNER_BEVEL,
+}
+PointFlags :: bit_set[PointFlag]
+
+Point :: struct {
+ x, y: f32,
+ dx, dy: f32,
+ len: f32,
+ dmx, dmy: f32,
+ flags: PointFlags,
+}
+
+PathCache :: struct {
+ points: [dynamic]Point,
+ paths: [dynamic]Path,
+ verts: [dynamic]Vertex,
+ bounds: [4]f32,
+}
+
+Path :: struct {
+ first: int,
+ count: int,
+ closed: bool,
+ nbevel: int,
+ fill: []Vertex,
+ stroke: []Vertex,
+ winding: Winding,
+ convex: bool,
+}
+
+State :: struct {
+ compositeOperation: CompositeOperationState,
+ shapeAntiAlias: bool,
+ fill: Paint,
+ stroke: Paint,
+ strokeWidth: f32,
+ miterLimit: f32,
+ lineJoin: LineCapType,
+ lineCap: LineCapType,
+ alpha: f32,
+ xform: Matrix,
+ scissor: ScissorT,
+
+ // font state
+ fontSize: f32,
+ letterSpacing: f32,
+ lineHeight: f32,
+ fontBlur: f32,
+ alignHorizontal: AlignHorizontal,
+ alignVertical: AlignVertical,
+ fontId: int,
+}
+
+Context :: struct {
+ params: Params,
+ commands: [dynamic]f32,
+ commandx, commandy: f32,
+ states: [MAX_STATES]State,
+ nstates: int,
+ cache: PathCache,
+ tessTol: f32,
+ distTol: f32,
+ fringeWidth: f32,
+ devicePxRatio: f32,
+
+ // font
+ fs: fontstash.FontContext,
+ fontImages: [MAX_FONTIMAGES]int,
+ fontImageIdx: int,
+
+ // stats
+ drawCallCount: int,
+ fillTriCount: int,
+ strokeTriCount: int,
+ textTriCount: int,
+
+ // flush texture
+ textureDirty: bool,
+}
+
+Params :: struct {
+ userPtr: rawptr,
+ edgeAntiAlias: bool,
+
+ // callbacks to fill out
+ renderCreate: proc(uptr: rawptr) -> bool,
+ renderDelete: proc(uptr: rawptr),
+
+ // textures calls
+ renderCreateTexture: proc(
+ uptr: rawptr,
+ type: Texture,
+ w, h: int,
+ imageFlags: ImageFlags,
+ data: []byte,
+ ) -> int,
+ renderDeleteTexture: proc(uptr: rawptr, image: int) -> bool,
+ renderUpdateTexture: proc(
+ uptr: rawptr,
+ image: int,
+ x, y: int,
+ w, h: int,
+ data: []byte,
+ ) -> bool,
+ renderGetTextureSize: proc(uptr: rawptr, image: int, w, h: ^int) -> bool,
+
+ // rendering calls
+ renderViewport: proc(uptr: rawptr, width, height, devicePixelRatio: f32),
+ renderCancel: proc(uptr: rawptr),
+ renderFlush: proc(uptr: rawptr),
+ renderFill: proc(
+ uptr: rawptr,
+ paint: ^Paint,
+ compositeOperation: CompositeOperationState,
+ scissor: ^ScissorT,
+ fringe: f32,
+ bounds: [4]f32,
+ paths: []Path,
+ ),
+ renderStroke: proc(
+ uptr: rawptr,
+ paint: ^Paint,
+ compositeOperation: CompositeOperationState,
+ scissor: ^ScissorT,
+ fringe: f32,
+ strokeWidth: f32,
+ paths: []Path,
+ ),
+ renderTriangles: proc(
+ uptr: rawptr,
+ paint: ^Paint,
+ compositeOperation: CompositeOperationState,
+ scissor: ^ScissorT,
+ verts: []Vertex,
+ fringe: f32,
+ ),
+}
+
+__allocPathCache :: proc(c: ^PathCache) {
+ c.points = make([dynamic]Point, 0, INIT_POINTS_SIZE)
+ c.paths = make([dynamic]Path, 0, INIT_PATH_SIZE)
+ c.verts = make([dynamic]Vertex, 0, INIT_VERTS_SIZE)
+}
+
+__deletePathCache :: proc(c: PathCache) {
+ delete(c.points)
+ delete(c.paths)
+ delete(c.verts)
+}
+
+__setDevicePxRatio :: proc(ctx: ^Context, ratio: f32) {
+ ctx.tessTol = 0.25 / ratio
+ ctx.distTol = 0.01 / ratio
+ ctx.fringeWidth = 1.0 / ratio
+ ctx.devicePxRatio = ratio
+}
+
+__getState :: #force_inline proc(ctx: ^Context) -> ^State #no_bounds_check {
+ return &ctx.states[ctx.nstates-1]
+}
+
+CreateInternal :: proc(params: Params) -> (ctx: ^Context) {
+ ctx = new(Context)
+ ctx.params = params
+ ctx.commands = make([dynamic]f32, 0, INIT_COMMANDS_SIZE)
+ __allocPathCache(&ctx.cache)
+
+ Save(ctx)
+ Reset(ctx)
+ __setDevicePxRatio(ctx, 1)
+
+ assert(ctx.params.renderCreate != nil)
+ if !ctx.params.renderCreate(ctx.params.userPtr) {
+ DeleteInternal(ctx)
+ panic("Nanovg - CreateInternal failed")
+ }
+
+ w := INIT_FONTIMAGE_SIZE
+ h := INIT_FONTIMAGE_SIZE
+ fontstash.Init(&ctx.fs, w, h, .TOPLEFT)
+ assert(ctx.params.renderCreateTexture != nil)
+ ctx.fs.userData = ctx
+
+ // handle to the image needs to be set to the new generated texture
+ ctx.fs.callbackResize = proc(data: rawptr, w, h: int) {
+ ctx := (^Context)(data)
+ ctx.fontImages[0] = ctx.params.renderCreateTexture(ctx.params.userPtr, .Alpha, w, h, {}, ctx.fs.textureData)
+ }
+
+ // texture atlas
+ ctx.fontImages[0] = ctx.params.renderCreateTexture(ctx.params.userPtr, .Alpha, w, h, {}, nil)
+ ctx.fontImageIdx = 0
+
+ return
+}
+
+DeleteInternal :: proc(ctx: ^Context) {
+ __deletePathCache(ctx.cache)
+ fontstash.Destroy(&ctx.fs)
+
+ for image in ctx.fontImages {
+ if image != 0 {
+ DeleteImage(ctx, image)
+ }
+ }
+
+ if ctx.params.renderDelete != nil {
+ ctx.params.renderDelete(ctx.params.userPtr)
+ }
+
+ free(ctx)
+}
+
+/*
+ Begin drawing a new frame
+ Calls to nanovg drawing API should be wrapped in nvgBeginFrame() & nvgEndFrame()
+ nvgBeginFrame() defines the size of the window to render to in relation currently
+ set viewport (i.e. glViewport on GL backends). Device pixel ration allows to
+ control the rendering on Hi-DPI devices.
+ For example, GLFW returns two dimension for an opened window: window size and
+ frame buffer size. In that case you would set windowWidth/Height to the window size
+ devicePixelRatio to: frameBufferWidth / windowWidth.
+*/
+BeginFrame :: proc(
+ ctx: ^Context,
+ windowWidth: f32,
+ windowHeight: f32,
+ devicePixelRatio: f32,
+) {
+ ctx.nstates = 0
+ Save(ctx)
+ Reset(ctx)
+ __setDevicePxRatio(ctx, devicePixelRatio)
+
+ assert(ctx.params.renderViewport != nil)
+ ctx.params.renderViewport(ctx.params.userPtr, windowWidth, windowHeight, devicePixelRatio)
+
+ ctx.drawCallCount = 0
+ ctx.fillTriCount = 0
+ ctx.strokeTriCount = 0
+ ctx.textTriCount = 0
+}
+
+@(deferred_out=EndFrame)
+FrameScoped :: proc(
+ ctx: ^Context,
+ windowWidth: f32,
+ windowHeight: f32,
+ devicePixelRatio: f32,
+) -> ^Context {
+ BeginFrame(ctx, windowWidth, windowHeight, devicePixelRatio)
+ return ctx
+}
+
+// Cancels drawing the current frame.
+CancelFrame :: proc(ctx: ^Context) {
+ assert(ctx.params.renderCancel != nil)
+ ctx.params.renderCancel(ctx.params.userPtr)
+}
+
+// Ends drawing flushing remaining render state.
+EndFrame :: proc(ctx: ^Context) {
+ // flush texture only once
+ if ctx.textureDirty {
+ __flushTextTexture(ctx)
+ ctx.textureDirty = false
+ }
+
+ assert(ctx.params.renderFlush != nil)
+ ctx.params.renderFlush(ctx.params.userPtr)
+
+ // delete textures with invalid size
+ if ctx.fontImageIdx != 0 {
+ font_image := ctx.fontImages[ctx.fontImageIdx]
+ ctx.fontImages[ctx.fontImageIdx] = 0
+
+ if font_image == 0 {
+ return
+ }
+
+ iw, ih := ImageSize(ctx, font_image)
+ j: int
+ for i in 0.. Color {
+ return RGBA(r, g, b, 255)
+}
+
+// Returns a color value from red, green, blue and alpha values.
+RGBA :: proc(r, g, b, a: u8) -> (res: Color) {
+ res.r = f32(r) / f32(255)
+ res.g = f32(g) / f32(255)
+ res.b = f32(b) / f32(255)
+ res.a = f32(a) / f32(255)
+ return
+}
+
+// Linearly interpolates from color c0 to c1, and returns resulting color value.
+LerpRGBA :: proc(c0, c1: Color, u: f32) -> (cint: Color) {
+ clamped := clamp(u, 0.0, 1.0)
+ oneminu := 1.0 - clamped
+ for _, i in cint {
+ cint[i] = c0[i] * oneminu + c1[i] * clamped
+ }
+
+ return
+}
+
+// Returns color value specified by hue, saturation and lightness.
+// HSL values are all in range [0..1], alpha will be set to 255.
+HSL :: proc(h, s, l: f32) -> Color {
+ return HSLA(h,s,l,255)
+}
+
+// Returns color value specified by hue, saturation and lightness and alpha.
+// HSL values are all in range [0..1], alpha in range [0..255]
+HSLA :: proc(hue, saturation, lightness: f32, a: u8) -> (col: Color) {
+ hue_get :: proc(h, m1, m2: f32) -> f32 {
+ h := h
+
+ if h < 0 {
+ h += 1
+ }
+
+ if h > 1 {
+ h -= 1
+ }
+
+ if h < 1.0 / 6.0 {
+ return m1 + (m2 - m1) * h * 6.0
+ } else if h < 3.0 / 6.0 {
+ return m2
+ } else if h < 4.0 / 6.0 {
+ return m1 + (m2 - m1) * (2.0 / 3.0 - h) * 6.0
+ }
+
+ return m1
+ }
+
+ h := math.mod(hue, 1.0)
+ if h < 0.0 {
+ h += 1.0
+ }
+ s := clamp(saturation, 0.0, 1.0)
+ l := clamp(lightness, 0.0, 1.0)
+ m2 := l <= 0.5 ? (l * (1 + s)) : (l + s - l * s)
+ m1 := 2 * l - m2
+ col.r = clamp(hue_get(h + 1.0/3.0, m1, m2), 0.0, 1.0)
+ col.g = clamp(hue_get(h, m1, m2), 0.0, 1.0)
+ col.b = clamp(hue_get(h - 1.0/3.0, m1, m2), 0.0, 1.0)
+ col.a = f32(a) / 255.0
+ return
+}
+
+// hex to 0xAARRGGBB color
+ColorHex :: proc(color: u32) -> (res: Color) {
+ color := color
+ res.b = f32(0x000000FF & color) / 255; color >>= 8
+ res.g = f32(0x000000FF & color) / 255; color >>= 8
+ res.r = f32(0x000000FF & color) / 255; color >>= 8
+ res.a = f32(0x000000FF & color) / 255
+ return
+}
+
+///////////////////////////////////////////////////////////
+// TRANSFORMS
+//
+// The following functions can be used to make calculations on 2x3 transformation matrices.
+// A 2x3 matrix is represented as float[6].
+///////////////////////////////////////////////////////////
+
+// Sets the transform to identity matrix.
+TransformIdentity :: proc(t: ^Matrix) {
+ t[0] = 1
+ t[1] = 0
+ t[2] = 0
+ t[3] = 1
+ t[4] = 0
+ t[5] = 0
+}
+
+// Sets the transform to translation matrix matrix.
+TransformTranslate :: proc(t: ^Matrix, tx, ty: f32) {
+ t[0] = 1
+ t[1] = 0
+ t[2] = 0
+ t[3] = 1
+ t[4] = tx
+ t[5] = ty
+}
+
+// Sets the transform to scale matrix.
+TransformScale :: proc(t: ^Matrix, sx, sy: f32) {
+ t[0] = sx
+ t[1] = 0
+ t[2] = 0
+ t[3] = sy
+ t[4] = 0
+ t[5] = 0
+}
+
+// Sets the transform to rotate matrix. Angle is specified in radians.
+TransformRotate :: proc(t: ^Matrix, a: f32) {
+ cs := math.cos(a)
+ sn := math.sin(a)
+ t[0] = cs
+ t[1] = sn
+ t[2] = -sn
+ t[3] = cs
+ t[4] = 0
+ t[5] = 0
+}
+
+// Sets the transform to skew-x matrix. Angle is specified in radians.
+TransformSkewX :: proc(t: ^Matrix, a: f32) {
+ t[0] = 1
+ t[1] = 0
+ t[2] = math.tan(a)
+ t[3] = 1
+ t[4] = 0
+ t[5] = 0
+}
+
+// Sets the transform to skew-y matrix. Angle is specified in radians.
+TransformSkewY :: proc(t: ^Matrix, a: f32) {
+ t[0] = 1
+ t[1] = math.tan(a)
+ t[2] = 0
+ t[3] = 1
+ t[4] = 0
+ t[5] = 0
+}
+
+// Sets the transform to the result of multiplication of two transforms, of A = A*B.
+TransformMultiply :: proc(t: ^Matrix, s: Matrix) {
+ t0 := t[0] * s[0] + t[1] * s[2]
+ t2 := t[2] * s[0] + t[3] * s[2]
+ t4 := t[4] * s[0] + t[5] * s[2] + s[4]
+ t[1] = t[0] * s[1] + t[1] * s[3]
+ t[3] = t[2] * s[1] + t[3] * s[3]
+ t[5] = t[4] * s[1] + t[5] * s[3] + s[5]
+ t[0] = t0
+ t[2] = t2
+ t[4] = t4
+}
+
+// Sets the transform to the result of multiplication of two transforms, of A = B*A.
+TransformPremultiply :: proc(t: ^Matrix, s: Matrix) {
+ temp := s
+ TransformMultiply(&temp, t^)
+ t^ = temp
+}
+
+// Sets the destination to inverse of specified transform.
+// Returns true if the inverse could be calculated, else false.
+TransformInverse :: proc(inv: ^Matrix, t: Matrix) -> bool {
+ // TODO could be bad math? due to types
+ det := f64(t[0]) * f64(t[3]) - f64(t[2]) * f64(t[1])
+
+ if det > -1e-6 && det < 1e-6 {
+ TransformIdentity(inv)
+ return false
+ }
+
+ invdet := 1.0 / det
+ inv[0] = f32(f64(t[3]) * invdet)
+ inv[2] = f32(f64(-t[2]) * invdet)
+ inv[4] = f32((f64(t[2]) * f64(t[5]) - f64(t[3]) * f64(t[4])) * invdet)
+ inv[1] = f32(f64(-t[1]) * invdet)
+ inv[3] = f32(f64(t[0]) * invdet)
+ inv[5] = f32((f64(t[1]) * f64(t[4]) - f64(t[0]) * f64(t[5])) * invdet)
+ return true
+}
+
+// Transform a point by given transform.
+TransformPoint :: proc(
+ dx: ^f32,
+ dy: ^f32,
+ t: Matrix,
+ sx: f32,
+ sy: f32,
+) {
+ dx^ = sx * t[0] + sy * t[2] + t[4]
+ dy^ = sx * t[1] + sy * t[3] + t[5]
+}
+
+DegToRad :: proc(deg: f32) -> f32 {
+ return deg / 180.0 * math.PI
+}
+
+RadToDeg :: proc(rad: f32) -> f32 {
+ return rad / math.PI * 180.0
+}
+
+///////////////////////////////////////////////////////////
+// STATE MANAGEMENT
+//
+// NanoVG contains state which represents how paths will be rendered.
+// The state contains transform, fill and stroke styles, text and font styles,
+// and scissor clipping.
+///////////////////////////////////////////////////////////
+
+// Pushes and saves the current render state into a state stack.
+// A matching nvgRestore() must be used to restore the state.
+Save :: proc(ctx: ^Context) {
+ if ctx.nstates >= MAX_STATES {
+ return
+ }
+
+ // copy prior
+ if ctx.nstates > 0 {
+ ctx.states[ctx.nstates] = ctx.states[ctx.nstates-1]
+ }
+
+ ctx.nstates += 1
+}
+
+// Pops and restores current render state.
+Restore :: proc(ctx: ^Context) {
+ if ctx.nstates <= 1 {
+ return
+ }
+
+ ctx.nstates -= 1
+}
+
+// NOTE useful helper
+@(deferred_in=Restore)
+SaveScoped :: #force_inline proc(ctx: ^Context) {
+ Save(ctx)
+}
+
+__setPaintColor :: proc(p: ^Paint, color: Color) {
+ p^ = {}
+ TransformIdentity(&p.xform)
+ p.radius = 0
+ p.feather = 1
+ p.innerColor = color
+ p.outerColor = color
+}
+
+// Resets current render state to default values. Does not affect the render state stack.
+Reset :: proc(ctx: ^Context) {
+ state := __getState(ctx)
+ state^ = {}
+
+ __setPaintColor(&state.fill, RGBA(255, 255, 255, 255))
+ __setPaintColor(&state.stroke, RGBA(0, 0, 0, 255))
+
+ state.compositeOperation = __compositeOperationState(.SOURCE_OVER)
+ state.shapeAntiAlias = true
+ state.strokeWidth = 1
+ state.miterLimit = 10
+ state.lineCap = .BUTT
+ state.lineJoin = .MITER
+ state.alpha = 1
+ TransformIdentity(&state.xform)
+
+ state.scissor.extent[0] = -1
+ state.scissor.extent[1] = -1
+
+ // font settings
+ state.fontSize = 16
+ state.letterSpacing = 0
+ state.lineHeight = 1
+ state.fontBlur = 0
+ state.alignHorizontal = .LEFT
+ state.alignVertical = .BASELINE
+ state.fontId = 0
+}
+
+///////////////////////////////////////////////////////////
+// STATE SETTING
+///////////////////////////////////////////////////////////
+
+// Sets whether to draw antialias for nvgStroke() and nvgFill(). It's enabled by default.
+ShapeAntiAlias :: proc(ctx: ^Context, enabled: bool) {
+ state := __getState(ctx)
+ state.shapeAntiAlias = enabled
+}
+
+// Sets the stroke width of the stroke style.
+StrokeWidth :: proc(ctx: ^Context, width: f32) {
+ state := __getState(ctx)
+ state.strokeWidth = width
+}
+
+// Sets the miter limit of the stroke style.
+// Miter limit controls when a sharp corner is beveled.
+MiterLimit :: proc(ctx: ^Context, limit: f32) {
+ state := __getState(ctx)
+ state.miterLimit = limit
+}
+
+// Sets how the end of the line (cap) is drawn,
+// Can be one of: NVG_BUTT (default), NVG_ROUND, NVG_SQUARE.
+LineCap :: proc(ctx: ^Context, cap: LineCapType) {
+ state := __getState(ctx)
+ state.lineCap = cap
+}
+
+// Sets how sharp path corners are drawn.
+// Can be one of NVG_MITER (default), NVG_ROUND, NVG_BEVEL.
+LineJoin :: proc(ctx: ^Context, join: LineCapType) {
+ state := __getState(ctx)
+ state.lineJoin = join
+}
+
+// Sets the transparency applied to all rendered shapes.
+// Already transparent paths will get proportionally more transparent as well.
+GlobalAlpha :: proc(ctx: ^Context, alpha: f32) {
+ state := __getState(ctx)
+ state.alpha = alpha
+}
+
+// Sets current stroke style to a solid color.
+StrokeColor :: proc(ctx: ^Context, color: Color) {
+ state := __getState(ctx)
+ __setPaintColor(&state.stroke, color)
+}
+
+// Sets current stroke style to a paint, which can be a one of the gradients or a pattern.
+StrokePaint :: proc(ctx: ^Context, paint: Paint) {
+ state := __getState(ctx)
+ state.stroke = paint
+ TransformMultiply(&state.stroke.xform, state.xform)
+}
+
+// Sets current fill style to a solid color.
+FillColor :: proc(ctx: ^Context, color: Color) {
+ state := __getState(ctx)
+ __setPaintColor(&state.fill, color)
+}
+
+// Sets current fill style to a paint, which can be a one of the gradients or a pattern.
+FillPaint :: proc(ctx: ^Context, paint: Paint) {
+ state := __getState(ctx)
+ state.fill = paint
+ TransformMultiply(&state.fill.xform, state.xform)
+}
+
+///////////////////////////////////////////////////////////
+// STATE TRANSFORMS
+//
+// The paths, gradients, patterns and scissor region are transformed by an transformation
+// matrix at the time when they are passed to the API.
+// The current transformation matrix is a affine matrix:
+// [sx kx tx]
+// [ky sy ty]
+// [ 0 0 1]
+// Where: sx,sy define scaling, kx,ky skewing, and tx,ty translation.
+// The last row is assumed to be 0,0,1 and is not stored.
+//
+// Apart from nvgResetTransform(), each transformation function first creates
+// specific transformation matrix and pre-multiplies the current transformation by it.
+//
+// Current coordinate system (transformation) can be saved and restored using nvgSave() and nvgRestore().
+///////////////////////////////////////////////////////////
+
+Transform :: proc(ctx: ^Context, a, b, c, d, e, f: f32) {
+ state := __getState(ctx)
+ TransformPremultiply(&state.xform, {a, b, c, d, e, f})
+}
+
+// Resets current transform to a identity matrix.
+ResetTransform :: proc(ctx: ^Context) {
+ state := __getState(ctx)
+ TransformIdentity(&state.xform)
+}
+
+// Translates current coordinate system.
+Translate :: proc(ctx: ^Context, x, y: f32) {
+ state := __getState(ctx)
+ temp: Matrix
+ TransformTranslate(&temp, x, y)
+ TransformPremultiply(&state.xform, temp)
+}
+
+// Rotates current coordinate system. Angle is specified in radians.
+Rotate :: proc(ctx: ^Context, angle: f32) {
+ state := __getState(ctx)
+ temp: Matrix
+ TransformRotate(&temp, angle)
+ TransformPremultiply(&state.xform, temp)
+}
+
+// Skews the current coordinate system along X axis. Angle is specified in radians.
+SkewX :: proc(ctx: ^Context, angle: f32) {
+ state := __getState(ctx)
+ temp: Matrix
+ TransformSkewX(&temp, angle)
+ TransformPremultiply(&state.xform, temp)
+}
+
+// Skews the current coordinate system along Y axis. Angle is specified in radians.
+SkewY :: proc(ctx: ^Context, angle: f32) {
+ state := __getState(ctx)
+ temp: Matrix
+ TransformSkewY(&temp, angle)
+ TransformPremultiply(&state.xform, temp)
+}
+
+// Scales the current coordinate system.
+Scale :: proc(ctx: ^Context, x, y: f32) {
+ state := __getState(ctx)
+ temp: Matrix
+ TransformScale(&temp, x, y)
+ TransformPremultiply(&state.xform, temp)
+}
+
+/*
+ Stores the top part (a-f) of the current transformation matrix in to the specified buffer.
+ [a c e]
+ [b d f]
+ [0 0 1]
+ There should be space for 6 floats in the return buffer for the values a-f.
+*/
+CurrentTransform :: proc(ctx: ^Context, xform: ^Matrix) {
+ if xform == nil {
+ return
+ }
+ state := __getState(ctx)
+ xform^ = state.xform
+}
+
+///////////////////////////////////////////////////////////
+// IMAGE HANDLING
+//
+// NanoVG allows you to load jpg, png, psd, tga, pic and gif files to be used for rendering.
+// In addition you can upload your own image. The image loading is provided by stb_image.
+// The parameter imageFlags is a combination of flags defined in NVGimageFlags.
+///////////////////////////////////////////////////////////
+
+// Creates image by loading it from the disk from specified file name.
+// Returns handle to the image.
+CreateImagePath :: proc(ctx: ^Context, filename: cstring, imageFlags: ImageFlags) -> int {
+ stbi.set_unpremultiply_on_load(1)
+ stbi.convert_iphone_png_to_rgb(1)
+ w, h, n: i32
+ img := stbi.load(filename, &w, &h, &n, 4)
+
+ if img == nil {
+ return 0
+ }
+
+ data := img[:int(w) * int(h) * int(n)]
+ image := CreateImageRGBA(ctx, int(w), int(h), imageFlags, data)
+ stbi.image_free(img)
+ return image
+}
+
+// Creates image by loading it from the specified chunk of memory.
+// Returns handle to the image.
+CreateImageMem :: proc(ctx: ^Context, data: []byte, imageFlags: ImageFlags) -> int {
+ stbi.set_unpremultiply_on_load(1)
+ stbi.convert_iphone_png_to_rgb(1)
+ w, h, n: i32
+ img := stbi.load_from_memory(raw_data(data), i32(len(data)), &w, &h, &n, 4)
+
+ if img == nil {
+ return 0
+ }
+
+ pixel_data := img[:int(w) * int(h) * int(n)]
+ image := CreateImageRGBA(ctx, int(w), int(h), imageFlags, pixel_data)
+ stbi.image_free(img)
+ return image
+}
+
+CreateImage :: proc{CreateImagePath, CreateImageMem}
+
+// Creates image from specified image data.
+// Returns handle to the image.
+CreateImageRGBA :: proc(ctx: ^Context, w, h: int, imageFlags: ImageFlags, data: []byte) -> int {
+ assert(ctx.params.renderCreateTexture != nil)
+ return ctx.params.renderCreateTexture(
+ ctx.params.userPtr,
+ .RGBA,
+ w, h,
+ imageFlags,
+ data,
+ )
+}
+
+// Updates image data specified by image handle.
+UpdateImage :: proc(ctx: ^Context, image: int, data: []byte) {
+ assert(ctx.params.renderGetTextureSize != nil)
+ assert(ctx.params.renderUpdateTexture != nil)
+
+ w, h: int
+ found := ctx.params.renderGetTextureSize(ctx.params.userPtr, image, &w, &h)
+ if found {
+ ctx.params.renderUpdateTexture(ctx.params.userPtr, image, 0, 0, w, h, data)
+ }
+}
+
+// Returns the dimensions of a created image.
+ImageSize :: proc(ctx: ^Context, image: int) -> (w, h: int) {
+ assert(ctx.params.renderGetTextureSize != nil)
+ ctx.params.renderGetTextureSize(ctx.params.userPtr, image, &w, &h)
+ return
+}
+
+// Deletes created image.
+DeleteImage :: proc(ctx: ^Context, image: int) {
+ assert(ctx.params.renderDeleteTexture != nil)
+ ctx.params.renderDeleteTexture(ctx.params.userPtr, image)
+}
+
+///////////////////////////////////////////////////////////
+// PAINT gradients / image
+//
+// NanoVG supports four types of paints: linear gradient, box gradient, radial gradient and image pattern.
+// These can be used as paints for strokes and fills.
+///////////////////////////////////////////////////////////
+
+/*
+ Creates and returns a linear gradient. Parameters (sx,sy)-(ex,ey) specify the start and end coordinates
+ of the linear gradient, icol specifies the start color and ocol the end color.
+ The gradient is transformed by the current transform when it is passed to nvgFillPaint() or nvgStrokePaint().
+*/
+LinearGradient :: proc(
+ sx, sy: f32,
+ ex, ey: f32,
+ icol: Color,
+ ocol: Color,
+) -> (p: Paint) {
+ LARGE :: f32(1e5)
+
+ // Calculate transform aligned to the line
+ dx := ex - sx
+ dy := ey - sy
+ d := math.sqrt(dx*dx + dy*dy)
+ if d > 0.0001 {
+ dx /= d
+ dy /= d
+ } else {
+ dx = 0
+ dy = 1
+ }
+
+ p.xform[0] = dy
+ p.xform[1] = -dx
+ p.xform[2] = dx
+ p.xform[3] = dy
+ p.xform[4] = sx - dx*LARGE
+ p.xform[5] = sy - dy*LARGE
+
+ p.extent[0] = LARGE
+ p.extent[1] = LARGE + d*0.5
+
+ p.feather = max(1.0, d)
+
+ p.innerColor = icol
+ p.outerColor = ocol
+
+ return
+}
+
+/*
+ Creates and returns a box gradient. Box gradient is a feathered rounded rectangle, it is useful for rendering
+ drop shadows or highlights for boxes. Parameters (x,y) define the top-left corner of the rectangle,
+ (w,h) define the size of the rectangle, r defines the corner radius, and f feather. Feather defines how blurry
+ the border of the rectangle is. Parameter icol specifies the inner color and ocol the outer color of the gradient.
+ The gradient is transformed by the current transform when it is passed to nvgFillPaint() or nvgStrokePaint().
+*/
+RadialGradient :: proc(
+ cx, cy: f32,
+ inr: f32,
+ outr: f32,
+ icol: Color,
+ ocol: Color,
+) -> (p: Paint) {
+ r := (inr+outr)*0.5
+ f := (outr-inr)
+
+ TransformIdentity(&p.xform)
+ p.xform[4] = cx
+ p.xform[5] = cy
+
+ p.extent[0] = r
+ p.extent[1] = r
+
+ p.radius = r
+ p.feather = max(1.0, f)
+
+ p.innerColor = icol
+ p.outerColor = ocol
+
+ return
+}
+
+/*
+ Creates and returns a radial gradient. Parameters (cx,cy) specify the center, inr and outr specify
+ the inner and outer radius of the gradient, icol specifies the start color and ocol the end color.
+ The gradient is transformed by the current transform when it is passed to nvgFillPaint() or nvgStrokePaint().
+*/
+BoxGradient :: proc(
+ x, y: f32,
+ w, h: f32,
+ r: f32,
+ f: f32,
+ icol: Color,
+ ocol: Color,
+) -> (p: Paint) {
+ TransformIdentity(&p.xform)
+ p.xform[4] = x+w*0.5
+ p.xform[5] = y+h*0.5
+
+ p.extent[0] = w*0.5
+ p.extent[1] = h*0.5
+
+ p.radius = r
+ p.feather = max(1.0, f)
+
+ p.innerColor = icol
+ p.outerColor = ocol
+
+ return
+}
+
+/*
+ Creates and returns an image pattern. Parameters (ox,oy) specify the left-top location of the image pattern,
+ (ex,ey) the size of one image, angle rotation around the top-left corner, image is handle to the image to render.
+ The gradient is transformed by the current transform when it is passed to nvgFillPaint() or nvgStrokePaint().
+*/
+ImagePattern :: proc(
+ cx, cy: f32,
+ w, h: f32,
+ angle: f32,
+ image: int,
+ alpha: f32,
+) -> (p: Paint) {
+ TransformRotate(&p.xform, angle)
+ p.xform[4] = cx
+ p.xform[5] = cy
+
+ p.extent[0] = w
+ p.extent[1] = h
+
+ p.image = image
+ p.innerColor = {1, 1, 1, alpha}
+ p.outerColor = p.innerColor
+
+ return
+}
+
+///////////////////////////////////////////////////////////
+// SCISSOR
+//
+// Scissoring allows you to clip the rendering into a rectangle. This is useful for various
+// user interface cases like rendering a text edit or a timeline.
+///////////////////////////////////////////////////////////
+
+// Sets the current scissor rectangle.
+// The scissor rectangle is transformed by the current transform.
+Scissor :: proc(
+ ctx: ^Context,
+ x, y: f32,
+ width, height: f32,
+) {
+ state := __getState(ctx)
+ w := max(width, 0)
+ h := max(height, 0)
+
+ TransformIdentity(&state.scissor.xform)
+ state.scissor.xform[4] = x + w * 0.5
+ state.scissor.xform[5] = y + h * 0.5
+ TransformMultiply(&state.scissor.xform, state.xform)
+
+ state.scissor.extent[0] = w * 0.5
+ state.scissor.extent[1] = h * 0.5
+}
+
+/*
+ Intersects current scissor rectangle with the specified rectangle.
+ The scissor rectangle is transformed by the current transform.
+ Note: in case the rotation of previous scissor rect differs from
+ the current one, the intersection will be done between the specified
+ rectangle and the previous scissor rectangle transformed in the current
+ transform space. The resulting shape is always rectangle.
+*/
+IntersectScissor :: proc(
+ ctx: ^Context,
+ x, y, w, h: f32,
+) {
+ isect_rects :: proc(
+ dst: ^[4]f32,
+ ax, ay, aw, ah: f32,
+ bx, by, bw, bh: f32,
+ ) {
+ minx := max(ax, bx)
+ miny := max(ay, by)
+ maxx := min(ax + aw, bx + bw)
+ maxy := min(ay + ah, by + bh)
+ dst[0] = minx
+ dst[1] = miny
+ dst[2] = max(0.0, maxx - minx)
+ dst[3] = max(0.0, maxy - miny)
+ }
+
+ state := __getState(ctx)
+
+ // If no previous scissor has been set, set the scissor as current scissor.
+ if state.scissor.extent[0] < 0 {
+ Scissor(ctx, x, y, w, h)
+ return
+ }
+
+ pxform := state.scissor.xform
+ ex := state.scissor.extent[0]
+ ey := state.scissor.extent[1]
+
+ invxorm: Matrix
+ TransformInverse(&invxorm, state.xform)
+ TransformMultiply(&pxform, invxorm)
+ tex := ex * abs(pxform[0]) + ey * abs(pxform[2])
+ tey := ex * abs(pxform[1]) + ey * abs(pxform[3])
+
+ rect: [4]f32
+ isect_rects(&rect, pxform[4] - tex, pxform[5] - tey, tex * 2, tey * 2, x,y,w,h)
+ Scissor(ctx, rect.x, rect.y, rect.z, rect.w)
+}
+
+// Reset and disables scissoring.
+ResetScissor :: proc(ctx: ^Context) {
+ state := __getState(ctx)
+ state.scissor.xform = 0
+ state.scissor.extent[0] = -1
+ state.scissor.extent[1] = -1
+}
+
+///////////////////////////////////////////////////////////
+// Global composite operation
+//
+// The composite operations in NanoVG are modeled after HTML Canvas API, and
+// the blend func is based on OpenGL (see corresponding manuals for more info).
+// The colors in the blending state have premultiplied alpha.
+///////////////////////////////////////////////////////////
+
+// state table instead of if else chains
+OP_STATE_TABLE := [CompositeOperation][2]BlendFactor {
+ .SOURCE_OVER = {.ONE, .ONE_MINUS_SRC_ALPHA},
+ .SOURCE_IN = {.DST_ALPHA, .ZERO},
+ .SOURCE_OUT = {.ONE_MINUS_DST_ALPHA, .ZERO},
+ .ATOP = {.DST_ALPHA, .ONE_MINUS_SRC_ALPHA},
+
+ .DESTINATION_OVER = {.ONE_MINUS_DST_ALPHA, .ONE},
+ .DESTINATION_IN = {.ZERO, .SRC_ALPHA},
+ .DESTINATION_OUT = {.ZERO, .ONE_MINUS_SRC_ALPHA},
+ .DESTINATION_ATOP = {.ONE_MINUS_DST_ALPHA, .SRC_ALPHA},
+
+ .LIGHTER = {.ONE, .ONE},
+ .COPY = {.ONE, .ZERO},
+ .XOR = {.ONE_MINUS_DST_ALPHA, .ONE_MINUS_SRC_ALPHA},
+}
+
+__compositeOperationState :: proc(op: CompositeOperation) -> (res: CompositeOperationState) {
+ factors := OP_STATE_TABLE[op]
+ res.srcRGB = factors.x
+ res.dstRGB = factors.y
+ res.srcAlpha = factors.x
+ res.dstAlpha = factors.y
+ return
+}
+
+// Sets the composite operation. The op parameter should be one of NVGcompositeOperation.
+GlobalCompositeOperation :: proc(ctx: ^Context, op: CompositeOperation) {
+ state := __getState(ctx)
+ state.compositeOperation = __compositeOperationState(op)
+}
+
+// Sets the composite operation with custom pixel arithmetic. The parameters should be one of NVGblendFactor.
+GlobalCompositeBlendFunc :: proc(ctx: ^Context, sfactor, dfactor: BlendFactor) {
+ GlobalCompositeBlendFuncSeparate(ctx, sfactor, dfactor, sfactor, dfactor)
+}
+
+// Sets the composite operation with custom pixel arithmetic for RGB and alpha components separately. The parameters should be one of NVGblendFactor.
+GlobalCompositeBlendFuncSeparate :: proc(
+ ctx: ^Context,
+ srcRGB: BlendFactor,
+ dstRGB: BlendFactor,
+ srcAlpha: BlendFactor,
+ dstAlpha: BlendFactor,
+) {
+ state := __getState(ctx)
+ state.compositeOperation = CompositeOperationState{
+ srcRGB,
+ dstRGB,
+ srcAlpha,
+ dstAlpha,
+ }
+}
+
+///////////////////////////////////////////////////////////
+// Points / Path handling
+///////////////////////////////////////////////////////////
+
+__cross :: proc(dx0, dy0, dx1, dy1: f32) -> f32 {
+ return dx1*dy0 - dx0*dy1
+}
+
+__ptEquals :: proc(x1, y1, x2, y2, tol: f32) -> bool {
+ dx := x2 - x1
+ dy := y2 - y1
+ return dx * dx + dy * dy < tol * tol
+}
+
+__distPtSeg :: proc(x, y, px, py, qx, qy: f32) -> f32 {
+ pqx := qx - px
+ pqy := qy - py
+ dx := x - px
+ dy := y - py
+ d := pqx * pqx + pqy * pqy
+ t := pqx * dx + pqy * dy
+
+ if d > 0 {
+ t /= d
+ }
+ t = clamp(t, 0, 1)
+
+ dx = px + t * pqx - x
+ dy = py + t * pqy - y
+ return dx * dx + dy * dy
+}
+
+__appendCommands :: proc(ctx: ^Context, values: ..f32) {
+ state := __getState(ctx)
+
+ if Commands(values[0]) != .CLOSE && Commands(values[0]) != .WINDING {
+ ctx.commandx = values[len(values)-2]
+ ctx.commandy = values[len(values)-1]
+ }
+ for i := 0; i < len(values); /**/ {
+ cmd := Commands(values[i])
+
+ switch cmd {
+ case .MOVE_TO, .LINE_TO:
+ TransformPoint(&values[i+1], &values[i+2], state.xform, values[i+1], values[i+2])
+ i += 3
+ case .BEZIER_TO:
+ TransformPoint(&values[i+1], &values[i+2], state.xform, values[i+1], values[i+2])
+ TransformPoint(&values[i+3], &values[i+4], state.xform, values[i+3], values[i+4])
+ TransformPoint(&values[i+5], &values[i+6], state.xform, values[i+5], values[i+6])
+ i += 7
+ case .CLOSE:
+ i += 1
+ case .WINDING:
+ i += 2
+ case:
+ i += 1
+ }
+ }
+
+ // append values
+ append(&ctx.commands, ..values)
+}
+
+__clearPathCache :: proc(ctx: ^Context) {
+ clear(&ctx.cache.points)
+ clear(&ctx.cache.paths)
+}
+
+__lastPath :: proc(ctx: ^Context) -> ^Path {
+ if len(ctx.cache.paths) > 0 {
+ return &ctx.cache.paths[len(ctx.cache.paths)-1]
+ }
+
+ return nil
+}
+
+__addPath :: proc(ctx: ^Context) {
+ append(&ctx.cache.paths, Path{
+ first = len(ctx.cache.points),
+ winding = .CCW,
+ })
+}
+
+__lastPoint :: proc(ctx: ^Context) -> ^Point {
+ if len(ctx.cache.paths) > 0 {
+ return &ctx.cache.points[len(ctx.cache.points)-1]
+ }
+
+ return nil
+}
+
+__addPoint :: proc(ctx: ^Context, x, y: f32, flags: PointFlags) {
+ path := __lastPath(ctx)
+
+ if path == nil {
+ return
+ }
+
+ if path.count > 0 && len(ctx.cache.points) > 0 {
+ pt := __lastPoint(ctx)
+
+ if __ptEquals(pt.x, pt.y, x, y, ctx.distTol) {
+ pt.flags |= flags
+ return
+ }
+ }
+
+ append(&ctx.cache.points, Point{
+ x = x,
+ y = y,
+ flags = flags,
+ })
+ path.count += 1
+}
+
+__closePath :: proc(ctx: ^Context) {
+ path := __lastPath(ctx)
+ if path == nil {
+ return
+ }
+ path.closed = true
+}
+
+__pathWinding :: proc(ctx: ^Context, winding: Winding) {
+ path := __lastPath(ctx)
+ if path == nil {
+ return
+ }
+ path.winding = winding
+}
+
+__getAverageScale :: proc(t: []f32) -> f32 {
+ assert(len(t) > 4)
+ sx := math.sqrt(f64(t[0]) * f64(t[0]) + f64(t[2]) * f64(t[2]))
+ sy := math.sqrt(f64(t[1]) * f64(t[1]) + f64(t[3]) * f64(t[3]))
+ return f32((sx + sy) * 0.5)
+ // sx := math.sqrt(t[0] * t[0] + t[2] * t[2])
+ // sy := math.sqrt(t[1] * t[1] + t[3] * t[3])
+ // return (sx + sy) * 0.5
+}
+
+__triarea2 :: proc(ax, ay, bx, by, cx, cy: f32) -> f32 {
+ abx := bx - ax
+ aby := by - ay
+ acx := cx - ax
+ acy := cy - ay
+ return acx * aby - abx * acy
+}
+
+__polyArea :: proc(points: []Point) -> f32 {
+ area := f32(0)
+
+ for i := 2; i < len(points); i += 1 {
+ a := &points[0]
+ b := &points[i-1]
+ c := &points[i]
+ area += __triarea2(a.x, a.y, b.x, b.y, c.x, c.y)
+ }
+
+ return area * 0.5
+}
+
+__polyReverse :: proc(points: []Point) {
+ tmp: Point
+ i := 0
+ j := len(points) - 1
+
+ for i < j {
+ tmp = points[i]
+ points[i] = points[j]
+ points[j] = tmp
+ i += 1
+ j -= 1
+ }
+}
+
+__normalize :: proc(x, y: ^f32) -> f32 {
+ d := math.sqrt(x^ * x^ + y^ * y^)
+ if d > 1e-6 {
+ id := 1.0 / d
+ x^ *= id
+ y^ *= id
+ }
+ return d
+}
+
+__tesselateBezier :: proc(
+ ctx: ^Context,
+ x1, y1: f32,
+ x2, y2: f32,
+ x3, y3: f32,
+ x4, y4: f32,
+ level: int,
+ flags: PointFlags,
+) {
+ if level > 10 {
+ return
+ }
+
+ x12 := (x1 + x2) * 0.5
+ y12 := (y1 + y2) * 0.5
+ x23 := (x2 + x3) * 0.5
+ y23 := (y2 + y3) * 0.5
+ x34 := (x3 + x4) * 0.5
+ y34 := (y3 + y4) * 0.5
+ x123 := (x12 + x23) * 0.5
+ y123 := (y12 + y23) * 0.5
+
+ dx := x4 - x1
+ dy := y4 - y1
+ d2 := abs(((x2 - x4) * dy - (y2 - y4) * dx))
+ d3 := abs(((x3 - x4) * dy - (y3 - y4) * dx))
+
+ if (d2 + d3)*(d2 + d3) < ctx.tessTol * (dx*dx + dy*dy) {
+ __addPoint(ctx, x4, y4, flags)
+ return
+ }
+
+ x234 := (x23 + x34) * 0.5
+ y234 := (y23 + y34) * 0.5
+ x1234 := (x123 + x234) * 0.5
+ y1234 := (y123 + y234) * 0.5
+
+ __tesselateBezier(ctx, x1,y1, x12,y12, x123,y123, x1234,y1234, level+1, {})
+ __tesselateBezier(ctx, x1234,y1234, x234,y234, x34,y34, x4,y4, level+1, flags)
+}
+
+__flattenPaths :: proc(ctx: ^Context) {
+ cache := &ctx.cache
+
+ if len(cache.paths) > 0 {
+ return
+ }
+
+ // flatten
+ i := 0
+ for i < len(ctx.commands) {
+ cmd := Commands(ctx.commands[i])
+
+ switch cmd {
+ case .MOVE_TO:
+ __addPath(ctx)
+ p := ctx.commands[i + 1:]
+ __addPoint(ctx, p[0], p[1], {.CORNER})
+ i += 3
+
+ case .LINE_TO:
+ p := ctx.commands[i + 1:]
+ __addPoint(ctx, p[0], p[1], {.CORNER})
+ i += 3
+
+ case .BEZIER_TO:
+ if last := __lastPoint(ctx); last != nil {
+ cp1 := ctx.commands[i + 1:]
+ cp2 := ctx.commands[i + 3:]
+ p := ctx.commands[i + 5:]
+ __tesselateBezier(ctx, last.x,last.y, cp1[0],cp1[1], cp2[0],cp2[1], p[0],p[1], 0, {.CORNER})
+ }
+
+ i += 7
+
+ case .CLOSE:
+ __closePath(ctx)
+ i += 1
+
+ case .WINDING:
+ __pathWinding(ctx, Winding(ctx.commands[i + 1]))
+ i += 2
+
+ case: i += 1
+ }
+ }
+
+ cache.bounds[0] = 1e6
+ cache.bounds[1] = 1e6
+ cache.bounds[2] = -1e6
+ cache.bounds[3] = -1e6
+
+ // Calculate the direction and length of line segments.
+ for &path in cache.paths {
+ pts := cache.points[path.first:]
+
+ // If the first and last points are the same, remove the last, mark as closed path.
+ p0 := &pts[path.count-1]
+ p1 := &pts[0]
+ if __ptEquals(p0.x,p0.y, p1.x,p1.y, ctx.distTol) {
+ path.count -= 1
+ p0 = &pts[path.count-1]
+ path.closed = true
+ }
+
+ // enforce winding
+ if path.count > 2 {
+ area := __polyArea(pts[:path.count])
+
+ if path.winding == .CCW && area < 0 {
+ __polyReverse(pts[:path.count])
+ }
+
+ if path.winding == .CW && area > 0 {
+ __polyReverse(pts[:path.count])
+ }
+ }
+
+ for _ in 0.. f32 {
+ da := math.acos(r / (r + tol)) * 2
+ return max(2, math.ceil(arc / da))
+}
+
+__chooseBevel :: proc(
+ bevel: bool,
+ p0: ^Point,
+ p1: ^Point,
+ w: f32,
+ x0, y0, x1, y1: ^f32,
+) {
+ if bevel {
+ x0^ = p1.x + p0.dy * w
+ y0^ = p1.y - p0.dx * w
+ x1^ = p1.x + p1.dy * w
+ y1^ = p1.y - p1.dx * w
+ } else {
+ x0^ = p1.x + p1.dmx * w
+ y0^ = p1.y + p1.dmy * w
+ x1^ = p1.x + p1.dmx * w
+ y1^ = p1.y + p1.dmy * w
+ }
+}
+
+///////////////////////////////////////////////////////////
+// Vertice Setting
+///////////////////////////////////////////////////////////
+
+// set vertex & increase slice position (decreases length)
+__vset :: proc(dst: ^[]Vertex, x, y, u, v: f32, loc := #caller_location) {
+ dst[0] = {x, y, u, v}
+ dst^ = dst[1:]
+}
+
+__roundJoin :: proc(
+ dst: ^[]Vertex,
+ p0, p1: ^Point,
+ lw, rw: f32,
+ lu,ru: f32,
+ ncap: int,
+) {
+ dlx0, dly0 := p0.dy, -p0.dx
+ dlx1, dly1 := p1.dy, -p1.dx
+
+ if .LEFT in p1.flags {
+ lx0,ly0,lx1,ly1: f32
+ __chooseBevel(.INNER_BEVEL in p1.flags, p0, p1, lw, &lx0,&ly0, &lx1,&ly1)
+ a0 := math.atan2(-dly0, -dlx0)
+ a1 := math.atan2(-dly1, -dlx1)
+
+ if a1 > a0 {
+ a1 -= math.PI * 2
+ }
+
+ __vset(dst, lx0, ly0, lu, 1)
+ __vset(dst, p1.x - dlx0 * rw, p1.y - dly0 * rw, ru, 1)
+
+ temp := int(math.ceil((a0 - a1) / math.PI * f32(ncap)))
+ n := clamp(temp, 2, ncap)
+
+ for i := 0; i < n; i += 1 {
+ u := f32(i) / f32(n - 1)
+ a := a0 + u * (a1 - a0)
+ rx := p1.x + math.cos(a) * rw
+ ry := p1.y + math.sin(a) * rw
+ __vset(dst, p1.x, p1.y, 0.5, 1)
+ __vset(dst, rx, ry, ru, 1)
+ }
+
+ __vset(dst, lx1, ly1, lu, 1)
+ __vset(dst, p1.x - dlx1*rw, p1.y - dly1*rw, ru, 1)
+ } else {
+ rx0,ry0,rx1,ry1: f32
+ __chooseBevel(.INNER_BEVEL in p1.flags, p0, p1, -rw, &rx0, &ry0, &rx1, &ry1)
+ a0 := math.atan2(dly0, dlx0)
+ a1 := math.atan2(dly1, dlx1)
+ if a1 < a0 {
+ a1 += math.PI * 2
+ }
+
+ __vset(dst, p1.x + dlx0*rw, p1.y + dly0*rw, lu, 1)
+ __vset(dst, rx0, ry0, ru, 1)
+
+ temp := int(math.ceil((a1 - a0) / math.PI * f32(ncap)))
+ n := clamp(temp, 2, ncap)
+
+ for i := 0; i < n; i += 1 {
+ u := f32(i) / f32(n - 1)
+ a := a0 + u*(a1-a0)
+ lx := p1.x + math.cos(a) * lw
+ ly := p1.y + math.sin(a) * lw
+ __vset(dst, lx, ly, lu, 1)
+ __vset(dst, p1.x, p1.y, 0.5, 1)
+ }
+
+ __vset(dst, p1.x + dlx1*rw, p1.y + dly1*rw, lu, 1)
+ __vset(dst, rx1, ry1, ru, 1)
+ }
+}
+
+__bevelJoin :: proc(
+ dst: ^[]Vertex,
+ p0, p1: ^Point,
+ lw, rw: f32,
+ lu, ru: f32,
+) {
+ dlx0,dly0 := p0.dy, -p0.dx
+ dlx1, dly1 := p1.dy, -p1.dx
+
+ rx0, ry0, rx1, ry1: f32
+ lx0, ly0, lx1, ly1: f32
+
+ if .LEFT in p1.flags {
+ __chooseBevel(.INNER_BEVEL in p1.flags, p0, p1, lw, &lx0,&ly0, &lx1,&ly1)
+
+ __vset(dst, lx0, ly0, lu, 1)
+ __vset(dst, p1.x - dlx0*rw, p1.y - dly0*rw, ru, 1)
+
+ if .BEVEL in p1.flags {
+ __vset(dst, lx0, ly0, lu, 1)
+ __vset(dst, p1.x - dlx0*rw, p1.y - dly0*rw, ru, 1)
+
+ __vset(dst, lx1, ly1, lu, 1)
+ __vset(dst, p1.x - dlx1*rw, p1.y - dly1*rw, ru, 1)
+ } else {
+ rx0 = p1.x - p1.dmx * rw
+ ry0 = p1.y - p1.dmy * rw
+
+ __vset(dst, p1.x, p1.y, 0.5, 1)
+ __vset(dst, p1.x - dlx0*rw, p1.y - dly0*rw, ru, 1)
+
+ __vset(dst, rx0, ry0, ru, 1)
+ __vset(dst, rx0, ry0, ru, 1)
+
+ __vset(dst, p1.x, p1.y, 0.5, 1)
+ __vset(dst, p1.x - dlx1*rw, p1.y - dly1*rw, ru, 1)
+ }
+
+ __vset(dst, lx1, ly1, lu, 1)
+ __vset(dst, p1.x - dlx1*rw, p1.y - dly1*rw, ru, 1)
+ } else {
+ __chooseBevel(.INNER_BEVEL in p1.flags, p0, p1, -rw, &rx0,&ry0, &rx1,&ry1)
+
+ __vset(dst, p1.x + dlx0*lw, p1.y + dly0*lw, lu, 1)
+ __vset(dst, rx0, ry0, ru, 1)
+
+ if .BEVEL in p1.flags {
+ __vset(dst, p1.x + dlx0*lw, p1.y + dly0*lw, lu, 1)
+ __vset(dst, rx0, ry0, ru, 1)
+
+ __vset(dst, p1.x + dlx1*lw, p1.y + dly1*lw, lu, 1)
+ __vset(dst, rx1, ry1, ru, 1)
+ } else {
+ lx0 = p1.x + p1.dmx * lw
+ ly0 = p1.y + p1.dmy * lw
+
+ __vset(dst, p1.x + dlx0*lw, p1.y + dly0*lw, lu, 1)
+ __vset(dst, p1.x, p1.y, 0.5, 1)
+
+ __vset(dst, lx0, ly0, lu, 1)
+ __vset(dst, lx0, ly0, lu, 1)
+
+ __vset(dst, p1.x + dlx1*lw, p1.y + dly1*lw, lu, 1)
+ __vset(dst, p1.x, p1.y, 0.5, 1)
+ }
+
+ __vset(dst, p1.x + dlx1*lw, p1.y + dly1*lw, lu, 1)
+ __vset(dst, rx1, ry1, ru, 1)
+ }
+}
+
+__buttCapStart :: proc(
+ dst: ^[]Vertex,
+ p: ^Point,
+ dx, dy: f32,
+ w: f32,
+ d: f32,
+ aa: f32,
+ u0: f32,
+ u1: f32,
+) {
+ px := p.x - dx * d
+ py := p.y - dy * d
+ dlx := dy
+ dly := -dx
+ __vset(dst, px + dlx*w - dx*aa, py + dly*w - dy*aa, u0,0)
+ __vset(dst, px - dlx*w - dx*aa, py - dly*w - dy*aa, u1,0)
+ __vset(dst, px + dlx*w, py + dly*w, u0,1)
+ __vset(dst, px - dlx*w, py - dly*w, u1,1)
+}
+
+__buttCapEnd :: proc(
+ dst: ^[]Vertex,
+ p: ^Point,
+ dx, dy: f32,
+ w: f32,
+ d: f32,
+ aa: f32,
+ u0: f32,
+ u1: f32,
+) {
+ px := p.x + dx * d
+ py := p.y + dy * d
+ dlx := dy
+ dly := -dx
+ __vset(dst, px + dlx*w, py + dly*w, u0,1)
+ __vset(dst, px - dlx*w, py - dly*w, u1,1)
+ __vset(dst, px + dlx*w + dx*aa, py + dly*w + dy*aa, u0,0)
+ __vset(dst, px - dlx*w + dx*aa, py - dly*w + dy*aa, u1,0)
+}
+
+__roundCapStart :: proc(
+ dst: ^[]Vertex,
+ p: ^Point,
+ dx, dy: f32,
+ w: f32,
+ ncap: int,
+ u0: f32,
+ u1: f32,
+) {
+ px := p.x
+ py := p.y
+ dlx := dy
+ dly := -dx
+
+ for i in 0.. 0 {
+ iw = 1.0 / w
+ }
+
+ // Calculate which joins needs extra vertices to append, and gather vertex count.
+ for &path in cache.paths {
+ pts := cache.points[path.first:]
+ p0 := &pts[path.count-1]
+ p1 := &pts[0]
+ nleft := 0
+ path.nbevel = 0
+
+ for _ in 0.. 0.000001 {
+ scale := 1.0 / dmr2
+ if scale > 600.0 {
+ scale = 600.0
+ }
+ p1.dmx *= scale
+ p1.dmy *= scale
+ }
+
+ // Clear flags, but keep the corner.
+ p1.flags = {.CORNER} if .CORNER in p1.flags else nil
+
+ // Keep track of left turns.
+ __cross = p1.dx * p0.dy - p0.dx * p1.dy
+ if __cross > 0.0 {
+ nleft += 1
+ p1.flags += {.LEFT}
+ }
+
+ // Calculate if we should use bevel or miter for inner join.
+ limit = max(1.01, min(p0.len, p1.len) * iw)
+ if (dmr2 * limit * limit) < 1.0 {
+ p1.flags += {.INNER_BEVEL}
+ }
+
+ // Check to see if the corner needs to be beveled.
+ if .CORNER in p1.flags {
+ if (dmr2 * miterLimit*miterLimit) < 1.0 || lineJoin == .BEVEL || lineJoin == .ROUND {
+ p1.flags += {.BEVEL}
+ }
+ }
+
+ if (.BEVEL in p1.flags) || (.INNER_BEVEL in p1.flags) {
+ path.nbevel += 1
+ }
+
+ p0 = p1
+ p1 = mem.ptr_offset(p1, 1)
+ }
+
+ path.convex = nleft == path.count
+ }
+}
+
+// TODO could be done better? or not need dynamic
+__allocTempVerts :: proc(ctx: ^Context, nverts: int) -> []Vertex {
+ resize(&ctx.cache.verts, nverts)
+ return ctx.cache.verts[:]
+}
+
+__expandStroke :: proc(
+ ctx: ^Context,
+ w: f32,
+ fringe: f32,
+ lineCap: LineCapType,
+ lineJoin: LineCapType,
+ miterLimit: f32,
+) -> bool {
+ cache := &ctx.cache
+ aa := fringe
+ u0 := f32(0.0)
+ u1 := f32(1.0)
+ ncap := __curveDivs(w, math.PI, ctx.tessTol) // Calculate divisions per half circle.
+
+ w := w
+ w += aa * 0.5
+
+ // Disable the gradient used for antialiasing when antialiasing is not used.
+ if aa == 0.0 {
+ u0 = 0.5
+ u1 = 0.5
+ }
+
+ __calculateJoins(ctx, w, lineJoin, miterLimit)
+
+ // Calculate max vertex usage.
+ cverts := 0
+ for path in cache.paths {
+ loop := path.closed
+
+ // TODO check if f32 calculation necessary?
+ if lineJoin == .ROUND {
+ cverts += (path.count + path.nbevel * int(ncap + 2) + 1) * 2 // plus one for loop
+ } else {
+ cverts += (path.count + path.nbevel*5 + 1) * 2 // plus one for loop
+ }
+
+ if !loop {
+ // space for caps
+ if lineCap == .ROUND {
+ cverts += int(ncap*2 + 2)*2
+ } else {
+ cverts += (3 + 3)*2
+ }
+ }
+ }
+
+ verts := __allocTempVerts(ctx, cverts)
+ dst_index: int
+
+ for &path in cache.paths {
+ pts := cache.points[path.first:]
+ p0, p1: ^Point
+ start, end: int
+ dx, dy: f32
+
+ // nil the fil
+ path.fill = nil
+
+ // Calculate fringe or stroke
+ loop := path.closed
+ dst := verts[dst_index:]
+ dst_start_length := len(dst)
+
+ if loop {
+ // Looping
+ p0 = &pts[path.count-1]
+ p1 = &pts[0]
+ start = 0
+ end = path.count
+ } else {
+ // Add cap
+ p0 = &pts[0]
+ p1 = &pts[1]
+ start = 1
+ end = path.count - 1
+ }
+
+ if !loop {
+ // Add cap
+ dx = p1.x - p0.x
+ dy = p1.y - p0.y
+ __normalize(&dx, &dy)
+
+ if lineCap == .BUTT {
+ __buttCapStart(&dst, p0, dx, dy, w, -aa*0.5, aa, u0, u1)
+ } else if lineCap == .BUTT || lineCap == .SQUARE {
+ __buttCapStart(&dst, p0, dx, dy, w, w-aa, aa, u0, u1)
+ } else if lineCap == .ROUND {
+ __roundCapStart(&dst, p0, dx, dy, w, int(ncap), u0, u1)
+ }
+ }
+
+ for j in start.. bool {
+ cache := &ctx.cache
+ aa := ctx.fringeWidth
+ fringe := w > 0.0
+ __calculateJoins(ctx, w, lineJoin, miterLimit)
+
+ // Calculate max vertex usage.
+ cverts := 0
+ for path in cache.paths {
+ cverts += path.count + path.nbevel + 1
+
+ if fringe {
+ cverts += (path.count + path.nbevel*5 + 1) * 2 // plus one for loop
+ }
+ }
+
+ convex := len(cache.paths) == 1 && cache.paths[0].convex
+ verts := __allocTempVerts(ctx, cverts)
+ dst_index: int
+
+ for &path in cache.paths {
+ pts := cache.points[path.first:]
+ p0, p1: ^Point
+ rw, lw, woff: f32
+ ru, lu: f32
+
+ // Calculate shape vertices.
+ woff = 0.5*aa
+ dst := verts[dst_index:]
+ dst_start_length := len(dst)
+
+ if fringe {
+ // Looping
+ p0 = &pts[path.count-1]
+ p1 = &pts[0]
+
+ for _ in 0.. f32 {
+ return f32(cmd)
+}
+
+// Clears the current path and sub-paths.
+BeginPath :: proc(ctx: ^Context) {
+ clear(&ctx.commands)
+ __clearPathCache(ctx)
+}
+
+@(deferred_in=Fill)
+FillScoped :: proc(ctx: ^Context) {
+ BeginPath(ctx)
+}
+
+@(deferred_in=Stroke)
+StrokeScoped :: proc(ctx: ^Context) {
+ BeginPath(ctx)
+}
+
+@(deferred_in=Stroke)
+FillStrokeScoped :: proc(ctx: ^Context) {
+ BeginPath(ctx)
+}
+
+// Starts new sub-path with specified point as first point.
+MoveTo :: proc(ctx: ^Context, x, y: f32) {
+ __appendCommands(ctx, __cmdf(.MOVE_TO), x, y)
+}
+
+// Adds line segment from the last point in the path to the specified point.
+LineTo :: proc(ctx: ^Context, x, y: f32) {
+ __appendCommands(ctx, __cmdf(.LINE_TO), x, y)
+}
+
+// Adds cubic bezier segment from last point in the path via two control points to the specified point.
+BezierTo :: proc(
+ ctx: ^Context,
+ c1x, c1y: f32,
+ c2x, c2y: f32,
+ x, y: f32,
+) {
+ __appendCommands(ctx, __cmdf(.BEZIER_TO), c1x, c1y, c2x, c2y, x, y)
+}
+
+// Adds quadratic bezier segment from last point in the path via a control point to the specified point.
+QuadTo :: proc(ctx: ^Context, cx, cy, x, y: f32) {
+ x0 := ctx.commandx
+ y0 := ctx.commandy
+ __appendCommands(ctx,
+ __cmdf(.BEZIER_TO),
+ x0 + 2 / 3 * (cx - x0),
+ y0 + 2 / 3 * (cy - y0),
+ x + 2 / 3 * (cx - x),
+ y + 2 / 3 * (cy - y),
+ x,
+ y,
+ )
+}
+
+// Adds an arc segment at the corner defined by the last path point, and two specified points.
+ArcTo :: proc(
+ ctx: ^Context,
+ x1, y1: f32,
+ x2, y2: f32,
+ radius: f32,
+) {
+ if len(ctx.commands) == 0 {
+ return
+ }
+
+ x0 := ctx.commandx
+ y0 := ctx.commandy
+ // Handle degenerate cases.
+ if __ptEquals(x0,y0, x1,y1, ctx.distTol) ||
+ __ptEquals(x1,y1, x2,y2, ctx.distTol) ||
+ __distPtSeg(x1,y1, x0,y0, x2,y2) < ctx.distTol*ctx.distTol ||
+ radius < ctx.distTol {
+ LineTo(ctx, x1, y1)
+ return
+ }
+
+ // Calculate tangential circle to lines (x0,y0)-(x1,y1) and (x1,y1)-(x2,y2).
+ dx0 := x0-x1
+ dy0 := y0-y1
+ dx1 := x2-x1
+ dy1 := y2-y1
+ __normalize(&dx0,&dy0)
+ __normalize(&dx1,&dy1)
+ a := math.acos(dx0*dx1 + dy0*dy1)
+ d := radius / math.tan(a / 2.0)
+
+ if d > 10000 {
+ LineTo(ctx, x1, y1)
+ return
+ }
+
+ a0, a1, cx, cy: f32
+ direction: Winding
+
+ if __cross(dx0,dy0, dx1,dy1) > 0.0 {
+ cx = x1 + dx0*d + dy0*radius
+ cy = y1 + dy0*d + -dx0*radius
+ a0 = math.atan2(dx0, -dy0)
+ a1 = math.atan2(-dx1, dy1)
+ direction = .CW
+ } else {
+ cx = x1 + dx0*d + -dy0*radius
+ cy = y1 + dy0*d + dx0*radius
+ a0 = math.atan2(-dx0, dy0)
+ a1 = math.atan2(dx1, -dy1)
+ direction = .CCW
+ }
+
+ Arc(ctx, cx, cy, radius, a0, a1, direction)
+}
+
+// Creates new circle arc shaped sub-path. The arc center is at cx,cy, the arc radius is r,
+// and the arc is drawn from angle a0 to a1, and swept in direction dir (NVG_CCW, or NVG_CW).
+// Angles are specified in radians.
+Arc :: proc(ctx: ^Context, cx, cy, r, a0, a1: f32, dir: Winding) {
+ move: Commands = .LINE_TO if len(ctx.commands) > 0 else .MOVE_TO
+
+ // Clamp angles
+ da := a1 - a0
+ if dir == .CW {
+ if abs(da) >= math.PI*2 {
+ da = math.PI*2
+ } else {
+ for da < 0.0 {
+ da += math.PI*2
+ }
+ }
+ } else {
+ if abs(da) >= math.PI*2 {
+ da = -math.PI*2
+ } else {
+ for da > 0.0 {
+ da -= math.PI*2
+ }
+ }
+ }
+
+ // Split arc into max 90 degree segments.
+ ndivs := max(1, min((int)(abs(da) / (math.PI*0.5) + 0.5), 5))
+ hda := (da / f32(ndivs)) / 2.0
+ kappa := abs(4.0 / 3.0 * (1.0 - math.cos(hda)) / math.sin(hda))
+
+ if dir == .CCW {
+ kappa = -kappa
+ }
+
+ values: [3 + 5 * 7 + 100]f32
+ nvals := 0
+
+ px, py, ptanx, ptany: f32
+ for i in 0..=ndivs {
+ a := a0 + da * f32(i) / f32(ndivs)
+ dx := math.cos(a)
+ dy := math.sin(a)
+ x := cx + dx*r
+ y := cy + dy*r
+ tanx := -dy*r*kappa
+ tany := dx*r*kappa
+
+ if i == 0 {
+ values[nvals] = __cmdf(move); nvals += 1
+ values[nvals] = x; nvals += 1
+ values[nvals] = y; nvals += 1
+ } else {
+ values[nvals] = __cmdf(.BEZIER_TO); nvals += 1
+ values[nvals] = px + ptanx; nvals += 1
+ values[nvals] = py + ptany; nvals += 1
+ values[nvals] = x-tanx; nvals += 1
+ values[nvals] = y-tany; nvals += 1
+ values[nvals] = x; nvals += 1
+ values[nvals] = y; nvals += 1
+ }
+ px = x
+ py = y
+ ptanx = tanx
+ ptany = tany
+ }
+
+ // stored internally
+ __appendCommands(ctx, ..values[:nvals])
+}
+
+// Closes current sub-path with a line segment.
+ClosePath :: proc(ctx: ^Context) {
+ __appendCommands(ctx, __cmdf(.CLOSE))
+}
+
+// Sets the current sub-path winding, see NVGwinding and NVGsolidity.
+PathWinding :: proc(ctx: ^Context, direction: Winding) {
+ __appendCommands(ctx, __cmdf(.WINDING), f32(direction))
+}
+
+// same as path_winding but with different enum
+PathSolidity :: proc(ctx: ^Context, solidity: Solidity) {
+ __appendCommands(ctx, __cmdf(.WINDING), f32(solidity))
+}
+
+// Creates new rectangle shaped sub-path.
+Rect :: proc(ctx: ^Context, x, y, w, h: f32) {
+ __appendCommands(ctx,
+ __cmdf(.MOVE_TO), x, y,
+ __cmdf(.LINE_TO), x, y + h,
+ __cmdf(.LINE_TO), x + w, y + h,
+ __cmdf(.LINE_TO), x + w, y,
+ __cmdf(.CLOSE),
+ )
+}
+
+// Creates new rounded rectangle shaped sub-path.
+RoundedRect :: proc(ctx: ^Context, x, y, w, h, radius: f32) {
+ RoundedRectVarying(ctx, x, y, w, h, radius, radius, radius, radius)
+}
+
+// Creates new rounded rectangle shaped sub-path with varying radii for each corner.
+RoundedRectVarying :: proc(
+ ctx: ^Context,
+ x, y: f32,
+ w, h: f32,
+ radius_top_left: f32,
+ radius_top_right: f32,
+ radius_bottom_right: f32,
+ radius_bottom_left: f32,
+) {
+ if radius_top_left < 0.1 && radius_top_right < 0.1 && radius_bottom_right < 0.1 && radius_bottom_left < 0.1 {
+ Rect(ctx, x, y, w, h)
+ } else {
+ halfw := abs(w) * 0.5
+ halfh := abs(h) * 0.5
+ rxBL := min(radius_bottom_left, halfw) * math.sign(w)
+ ryBL := min(radius_bottom_left, halfh) * math.sign(h)
+ rxBR := min(radius_bottom_right, halfw) * math.sign(w)
+ ryBR := min(radius_bottom_right, halfh) * math.sign(h)
+ rxTR := min(radius_top_right, halfw) * math.sign(w)
+ ryTR := min(radius_top_right, halfh) * math.sign(h)
+ rxTL := min(radius_top_left, halfw) * math.sign(w)
+ ryTL := min(radius_top_left, halfh) * math.sign(h)
+ __appendCommands(ctx,
+ __cmdf(.MOVE_TO), x, y + ryTL,
+ __cmdf(.LINE_TO), x, y + h - ryBL,
+ __cmdf(.BEZIER_TO), x, y + h - ryBL*(1 - KAPPA), x + rxBL*(1 - KAPPA), y + h, x + rxBL, y + h,
+ __cmdf(.LINE_TO), x + w - rxBR, y + h,
+ __cmdf(.BEZIER_TO), x + w - rxBR*(1 - KAPPA), y + h, x + w, y + h - ryBR*(1 - KAPPA), x + w, y + h - ryBR,
+ __cmdf(.LINE_TO), x + w, y + ryTR,
+ __cmdf(.BEZIER_TO), x + w, y + ryTR*(1 - KAPPA), x + w - rxTR*(1 - KAPPA), y, x + w - rxTR, y,
+ __cmdf(.LINE_TO), x + rxTL, y,
+ __cmdf(.BEZIER_TO), x + rxTL*(1 - KAPPA), y, x, y + ryTL*(1 - KAPPA), x, y + ryTL,
+ __cmdf(.CLOSE),
+ )
+ }
+}
+
+// Creates new ellipse shaped sub-path.
+Ellipse :: proc(ctx: ^Context, cx, cy, rx, ry: f32) {
+ __appendCommands(ctx,
+ __cmdf(.MOVE_TO), cx-rx, cy,
+ __cmdf(.BEZIER_TO), cx-rx, cy+ry*KAPPA, cx-rx*KAPPA, cy+ry, cx, cy+ry,
+ __cmdf(.BEZIER_TO), cx+rx*KAPPA, cy+ry, cx+rx, cy+ry*KAPPA, cx+rx, cy,
+ __cmdf(.BEZIER_TO), cx+rx, cy-ry*KAPPA, cx+rx*KAPPA, cy-ry, cx, cy-ry,
+ __cmdf(.BEZIER_TO), cx-rx*KAPPA, cy-ry, cx-rx, cy-ry*KAPPA, cx-rx, cy,
+ __cmdf(.CLOSE),
+ )
+}
+
+// Creates new circle shaped sub-path.
+Circle :: #force_inline proc(ctx: ^Context, cx, cy: f32, radius: f32) {
+ Ellipse(ctx, cx, cy, radius, radius)
+}
+
+// Fills the current path with current fill style.
+Fill :: proc(ctx: ^Context) {
+ state := __getState(ctx)
+ fill_paint := state.fill
+
+ __flattenPaths(ctx)
+
+ if ctx.params.edgeAntiAlias && state.shapeAntiAlias {
+ __expandFill(ctx, ctx.fringeWidth, .MITER, 2.4)
+ } else {
+ __expandFill(ctx, 0, .MITER, 2.4)
+ }
+
+ // apply global alpha
+ fill_paint.innerColor.a *= state.alpha
+ fill_paint.outerColor.a *= state.alpha
+
+ assert(ctx.params.renderFill != nil)
+ ctx.params.renderFill(
+ ctx.params.userPtr,
+ &fill_paint,
+ state.compositeOperation,
+ &state.scissor,
+ ctx.fringeWidth,
+ ctx.cache.bounds,
+ ctx.cache.paths[:],
+ )
+
+ for path in ctx.cache.paths {
+ ctx.fillTriCount += len(path.fill) - 2
+ ctx.fillTriCount += len(path.stroke) - 2
+ ctx.drawCallCount += 2
+ }
+}
+
+// Fills the current path with current stroke style.
+Stroke :: proc(ctx: ^Context) {
+ state := __getState(ctx)
+ scale := __getAverageScale(state.xform[:])
+ strokeWidth := clamp(state.strokeWidth * scale, 0, 200)
+ stroke_paint := state.stroke
+
+ if strokeWidth < ctx.fringeWidth {
+ // If the stroke width is less than pixel size, use alpha to emulate coverage.
+ // Since coverage is area, scale by alpha*alpha.
+ alpha := clamp(strokeWidth / ctx.fringeWidth, 0, 1)
+ stroke_paint.innerColor.a *= alpha * alpha
+ stroke_paint.outerColor.a *= alpha * alpha
+ strokeWidth = ctx.fringeWidth
+ }
+
+ // apply global alpha
+ stroke_paint.innerColor.a *= state.alpha
+ stroke_paint.outerColor.a *= state.alpha
+
+ __flattenPaths(ctx)
+
+ if ctx.params.edgeAntiAlias && state.shapeAntiAlias {
+ __expandStroke(ctx, strokeWidth * 0.5, ctx.fringeWidth, state.lineCap, state.lineJoin, state.miterLimit)
+ } else {
+ __expandStroke(ctx, strokeWidth * 0.5, 0, state.lineCap, state.lineJoin, state.miterLimit)
+ }
+
+ assert(ctx.params.renderStroke != nil)
+ ctx.params.renderStroke(
+ ctx.params.userPtr,
+ &stroke_paint,
+ state.compositeOperation,
+ &state.scissor,
+ ctx.fringeWidth,
+ strokeWidth,
+ ctx.cache.paths[:],
+ )
+
+ for path in ctx.cache.paths {
+ ctx.strokeTriCount += len(path.stroke) - 2
+ ctx.drawCallCount += 1
+ }
+}
+
+DebugDumpPathCache :: proc(ctx: ^Context) {
+ fmt.printf("~~~~~~~~~~~~~Dumping %d cached paths\n", len(ctx.cache.paths))
+
+ for path, i in ctx.cache.paths {
+ fmt.printf(" - Path %d\n", i)
+
+ if len(path.fill) != 0 {
+ fmt.printf(" - fill: %d\n", len(path.fill))
+
+ for v in path.fill {
+ fmt.printf("%f\t%f\n", v.x, v.y)
+ }
+ }
+
+ if len(path.stroke) != 0 {
+ fmt.printf(" - stroke: %d\n", len(path.stroke))
+
+ for v in path.stroke {
+ fmt.printf("%f\t%f\n", v.x, v.y)
+ }
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////
+// NanoVG allows you to load .ttf files and use the font to render text.
+//
+// The appearance of the text can be defined by setting the current text style
+// and by specifying the fill color. Common text and font settings such as
+// font size, letter spacing and text align are supported. Font blur allows you
+// to create simple text effects such as drop shadows.
+//
+// At render time the font face can be set based on the font handles or name.
+//
+// Font measure functions return values in local space, the calculations are
+// carried in the same resolution as the final rendering. This is done because
+// the text glyph positions are snapped to the nearest pixels sharp rendering.
+//
+// The local space means that values are not rotated or scale as per the current
+// transformation. For example if you set font size to 12, which would mean that
+// line height is 16, then regardless of the current scaling and rotation, the
+// returned line height is always 16. Some measures may vary because of the scaling
+// since aforementioned pixel snapping.
+//
+// While this may sound a little odd, the setup allows you to always render the
+// same way regardless of scaling. I.e. following works regardless of scaling:
+//
+// const char* txt = "Text me up.";
+// nvgTextBounds(vg, x,y, txt, nil, bounds);
+// nvgBeginPath(vg);
+// nvgRoundedRect(vg, bounds[0],bounds[1], bounds[2]-bounds[0], bounds[3]-bounds[1]);
+// nvgFill(vg);
+//
+// Note: currently only solid color fill is supported for text.
+///////////////////////////////////////////////////////////
+
+// Creates font by loading it from the disk from specified file name.
+// Returns handle to the font.
+CreateFont :: proc(ctx: ^Context, name, filename: string) -> int {
+ return fontstash.AddFontPath(&ctx.fs, name, filename)
+}
+
+// Creates font by loading it from the specified memory chunk.
+// Returns handle to the font.
+CreateFontMem :: proc(ctx: ^Context, name: string, slice: []byte, free_loaded_data: bool) -> int {
+ return fontstash.AddFontMem(&ctx.fs, name, slice, free_loaded_data)
+}
+
+// Finds a loaded font of specified name, and returns handle to it, or -1 if the font is not found.
+FindFont :: proc(ctx: ^Context, name: string) -> int {
+ if name == "" {
+ return -1
+ }
+
+ return fontstash.GetFontByName(&ctx.fs, name)
+}
+
+// Adds a fallback font by handle.
+AddFallbackFontId :: proc(ctx: ^Context, base_font, fallback_font: int) -> bool {
+ if base_font == -1 || fallback_font == -1 {
+ return false
+ }
+
+ return fontstash.AddFallbackFont(&ctx.fs, base_font, fallback_font)
+}
+
+// Adds a fallback font by name.
+AddFallbackFont :: proc(ctx: ^Context, base_font: string, fallback_font: string) -> bool {
+ return AddFallbackFontId(
+ ctx,
+ FindFont(ctx, base_font),
+ FindFont(ctx, fallback_font),
+ )
+}
+
+// Resets fallback fonts by handle.
+ResetFallbackFontsId :: proc(ctx: ^Context, base_font: int) {
+ fontstash.ResetFallbackFont(&ctx.fs, base_font)
+}
+
+// Resets fallback fonts by name.
+ResetFallbackFonts :: proc(ctx: ^Context, base_font: string) {
+ fontstash.ResetFallbackFont(&ctx.fs, FindFont(ctx, base_font))
+}
+
+// Sets the font size of current text style.
+FontSize :: proc(ctx: ^Context, size: f32) {
+ state := __getState(ctx)
+ state.fontSize = size
+}
+
+// Sets the blur of current text style.
+FontBlur :: proc(ctx: ^Context, blur: f32) {
+ state := __getState(ctx)
+ state.fontBlur = blur
+}
+
+// Sets the letter spacing of current text style.
+TextLetterSpacing :: proc(ctx: ^Context, spacing: f32) {
+ state := __getState(ctx)
+ state.letterSpacing = spacing
+}
+
+// Sets the proportional line height of current text style. The line height is specified as multiple of font size.
+TextLineHeight :: proc(ctx: ^Context, lineHeight: f32) {
+ state := __getState(ctx)
+ state.lineHeight = lineHeight
+}
+
+// Sets the horizontal text align of current text style
+TextAlignHorizontal :: proc(ctx: ^Context, align: AlignHorizontal) {
+ state := __getState(ctx)
+ state.alignHorizontal = align
+}
+
+// Sets the vertical text align of current text style
+TextAlignVertical :: proc(ctx: ^Context, align: AlignVertical) {
+ state := __getState(ctx)
+ state.alignVertical = align
+}
+
+// Sets the text align of current text style, see NVGalign for options.
+TextAlign :: proc(ctx: ^Context, ah: AlignHorizontal, av: AlignVertical) {
+ state := __getState(ctx)
+ state.alignHorizontal = ah
+ state.alignVertical = av
+}
+
+// Sets the font face based on specified name of current text style.
+FontFaceId :: proc(ctx: ^Context, font: int) {
+ state := __getState(ctx)
+ state.fontId = font
+}
+
+// Sets the font face based on specified name of current text style.
+FontFace :: proc(ctx: ^Context, font: string) {
+ state := __getState(ctx)
+ state.fontId = fontstash.GetFontByName(&ctx.fs, font)
+}
+
+__quantize :: proc(a, d: f32) -> f32 {
+ return f32(int(a / d + 0.5)) * d
+}
+
+__getFontScale :: proc(state: ^State) -> f32 {
+ return min(__quantize(__getAverageScale(state.xform[:]), 0.01), 4.0)
+}
+
+__flushTextTexture :: proc(ctx: ^Context) {
+ dirty: [4]f32
+ assert(ctx.params.renderUpdateTexture != nil)
+
+ if fontstash.ValidateTexture(&ctx.fs, &dirty) {
+ font_image := ctx.fontImages[ctx.fontImageIdx]
+
+ // Update texture
+ if font_image != 0 {
+ data := ctx.fs.textureData
+ x := dirty[0]
+ y := dirty[1]
+ w := dirty[2] - dirty[0]
+ h := dirty[3] - dirty[1]
+ ctx.params.renderUpdateTexture(ctx.params.userPtr, font_image, int(x), int(y), int(w), int(h), data)
+ }
+ }
+}
+
+__allocTextAtlas :: proc(ctx: ^Context) -> bool {
+ __flushTextTexture(ctx)
+
+ if ctx.fontImageIdx >= MAX_FONTIMAGES - 1 {
+ return false
+ }
+
+ // if next fontImage already have a texture
+ iw, ih: int
+ if ctx.fontImages[ctx.fontImageIdx+1] != 0 {
+ iw, ih = ImageSize(ctx, ctx.fontImages[ctx.fontImageIdx+1])
+ } else { // calculate the new font image size and create it.
+ iw, ih = ImageSize(ctx, ctx.fontImages[ctx.fontImageIdx])
+
+ if iw > ih {
+ ih *= 2
+ } else {
+ iw *= 2
+ }
+
+ if iw > MAX_FONTIMAGE_SIZE || ih > MAX_FONTIMAGE_SIZE {
+ iw = MAX_FONTIMAGE_SIZE
+ ih = MAX_FONTIMAGE_SIZE
+ }
+
+ ctx.fontImages[ctx.fontImageIdx + 1] = ctx.params.renderCreateTexture(ctx.params.userPtr, .Alpha, iw, ih, {}, nil)
+ }
+
+ ctx.fontImageIdx += 1
+ fontstash.ResetAtlas(&ctx.fs, iw, ih)
+
+ return true
+}
+
+__renderText :: proc(ctx: ^Context, verts: []Vertex) {
+ // disallow 0
+ if len(verts) == 0 {
+ return
+ }
+
+ state := __getState(ctx)
+ paint := state.fill
+
+ // Render triangles.
+ paint.image = ctx.fontImages[ctx.fontImageIdx]
+
+ // Apply global alpha
+ paint.innerColor.a *= state.alpha
+ paint.outerColor.a *= state.alpha
+
+ ctx.params.renderTriangles(ctx.params.userPtr, &paint, state.compositeOperation, &state.scissor, verts, ctx.fringeWidth)
+
+ ctx.drawCallCount += 1
+ ctx.textTriCount += len(verts) / 3
+}
+
+__isTransformFlipped :: proc(xform: []f32) -> bool {
+ det := xform[0] * xform[3] - xform[2] * xform[1]
+ return det < 0
+}
+
+// draw a single codepoint, useful for icons
+TextIcon :: proc(ctx: ^Context, xpos, ypos: f32, codepoint: rune) -> f32 {
+ state := __getState(ctx)
+ scale := __getFontScale(state) * ctx.devicePxRatio
+ invscale := f32(1.0) / scale
+ is_flipped := __isTransformFlipped(state.xform[:])
+
+ if state.fontId == -1 {
+ return xpos
+ }
+
+ fs := &ctx.fs
+ fontstash.SetSize(fs, state.fontSize * scale)
+ fontstash.SetSpacing(fs, state.letterSpacing * scale)
+ fontstash.SetBlur(fs, state.fontBlur * scale)
+ fontstash.SetAlignHorizontal(fs, state.alignHorizontal)
+ fontstash.SetAlignVertical(fs, state.alignVertical)
+ fontstash.SetFont(fs, state.fontId)
+
+ // fontstash internals
+ fstate := fontstash.__getState(fs)
+ font := fontstash.__getFont(fs, state.fontId)
+ isize := i16(fstate.size * 10)
+ iblur := i16(fstate.blur)
+ glyph, _ := fontstash.__getGlyph(fs, font, codepoint, isize, iblur)
+ fscale := fontstash.__getPixelHeightScale(font, f32(isize) / 10)
+
+ // transform x / y
+ x := xpos * scale
+ y := ypos * scale
+ switch fstate.ah {
+ case .LEFT: {}
+
+ case .CENTER:
+ width := fontstash.CodepointWidth(font, codepoint, fscale)
+ x = math.round(x - width * 0.5)
+
+ case .RIGHT:
+ width := fontstash.CodepointWidth(font, codepoint, fscale)
+ x -= width
+ }
+
+ // align vertically
+ y = math.round(y + fontstash.__getVerticalAlign(fs, font, fstate.av, isize))
+ nextx := f32(x)
+ nexty := f32(y)
+
+ if glyph != nil {
+ q: fontstash.Quad
+ fontstash.__getQuad(fs, font, -1, glyph, fscale, fstate.spacing, &nextx, &nexty, &q)
+
+ if is_flipped {
+ q.y0, q.y1 = q.y1, q.y0
+ q.t0, q.t1 = q.t1, q.t0
+ }
+
+ // single glyph only
+ verts := __allocTempVerts(ctx, 6)
+ c: [4 * 2]f32
+
+ // Transform corners.
+ TransformPoint(&c[0], &c[1], state.xform, q.x0 * invscale, q.y0 * invscale)
+ TransformPoint(&c[2], &c[3], state.xform, q.x1 * invscale, q.y0 * invscale)
+ TransformPoint(&c[4], &c[5], state.xform, q.x1 * invscale, q.y1 * invscale)
+ TransformPoint(&c[6], &c[7], state.xform, q.x0 * invscale, q.y1 * invscale)
+
+ // Create triangles
+ verts[0] = {c[0], c[1], q.s0, q.t0}
+ verts[1] = {c[4], c[5], q.s1, q.t1}
+ verts[2] = {c[2], c[3], q.s1, q.t0}
+ verts[3] = {c[0], c[1], q.s0, q.t0}
+ verts[4] = {c[6], c[7], q.s0, q.t1}
+ verts[5] = {c[4], c[5], q.s1, q.t1}
+
+ ctx.textureDirty = true
+ __renderText(ctx, verts[:])
+ }
+
+ return nextx / scale
+}
+
+// Draws text string at specified location. If end is specified only the sub-string up to the end is drawn.
+Text :: proc(ctx: ^Context, x, y: f32, text: string) -> f32 {
+ state := __getState(ctx)
+ scale := __getFontScale(state) * ctx.devicePxRatio
+ invscale := f32(1.0) / scale
+ is_flipped := __isTransformFlipped(state.xform[:])
+
+ if state.fontId == -1 {
+ return x
+ }
+
+ fs := &ctx.fs
+ fontstash.SetSize(fs, state.fontSize * scale)
+ fontstash.SetSpacing(fs, state.letterSpacing * scale)
+ fontstash.SetBlur(fs, state.fontBlur * scale)
+ fontstash.SetAlignHorizontal(fs, state.alignHorizontal)
+ fontstash.SetAlignVertical(fs, state.alignVertical)
+ fontstash.SetFont(fs, state.fontId)
+
+ cverts := max(2, len(text)) * 6 // conservative estimate.
+ verts := __allocTempVerts(ctx, cverts)
+ nverts: int
+
+ iter := fontstash.TextIterInit(fs, x * scale, y * scale, text)
+ prev_iter := iter
+ q: fontstash.Quad
+ for fontstash.TextIterNext(&ctx.fs, &iter, &q) {
+ c: [4 * 2]f32
+
+ if iter.previousGlyphIndex == -1 { // can not retrieve glyph?
+ if nverts != 0 {
+ __renderText(ctx, verts[:])
+ nverts = 0
+ }
+
+ if !__allocTextAtlas(ctx) {
+ break // no memory :(
+ }
+
+ iter = prev_iter
+ fontstash.TextIterNext(fs, &iter, &q) // try again
+
+ if iter.previousGlyphIndex == -1 {
+ // still can not find glyph?
+ break
+ }
+ }
+
+ prev_iter = iter
+ if is_flipped {
+ q.y0, q.y1 = q.y1, q.y0
+ q.t0, q.t1 = q.t1, q.t0
+ }
+
+ // Transform corners.
+ TransformPoint(&c[0], &c[1], state.xform, q.x0 * invscale, q.y0 * invscale)
+ TransformPoint(&c[2], &c[3], state.xform, q.x1 * invscale, q.y0 * invscale)
+ TransformPoint(&c[4], &c[5], state.xform, q.x1 * invscale, q.y1 * invscale)
+ TransformPoint(&c[6], &c[7], state.xform, q.x0 * invscale, q.y1 * invscale)
+
+ // Create triangles
+ if nverts + 6 <= cverts {
+ verts[nverts+0] = {c[0], c[1], q.s0, q.t0}
+ verts[nverts+1] = {c[4], c[5], q.s1, q.t1}
+ verts[nverts+2] = {c[2], c[3], q.s1, q.t0}
+ verts[nverts+3] = {c[0], c[1], q.s0, q.t0}
+ verts[nverts+4] = {c[6], c[7], q.s0, q.t1}
+ verts[nverts+5] = {c[4], c[5], q.s1, q.t1}
+ nverts += 6
+ }
+ }
+
+ ctx.textureDirty = true
+ __renderText(ctx, verts[:nverts])
+
+ return iter.nextx / scale
+}
+
+// Returns the vertical metrics based on the current text style.
+// Measured values are returned in local coordinate space.
+TextMetrics :: proc(ctx: ^Context) -> (ascender, descender, lineHeight: f32) {
+ state := __getState(ctx)
+ scale := __getFontScale(state) * ctx.devicePxRatio
+ invscale := f32(1.0) / scale
+
+ if state.fontId == -1 {
+ return
+ }
+
+ fs := &ctx.fs
+ fontstash.SetSize(fs, state.fontSize*scale)
+ fontstash.SetSpacing(fs, state.letterSpacing*scale)
+ fontstash.SetBlur(fs, state.fontBlur*scale)
+ fontstash.SetAlignHorizontal(fs, state.alignHorizontal)
+ fontstash.SetAlignVertical(fs, state.alignVertical)
+ fontstash.SetFont(fs, state.fontId)
+
+ ascender, descender, lineHeight = fontstash.VerticalMetrics(fs)
+ ascender *= invscale
+ descender *= invscale
+ lineHeight *= invscale
+ return
+}
+
+// Measures the specified text string. Parameter bounds should be a pointer to float[4],
+// if the bounding box of the text should be returned. The bounds value are [xmin,ymin, xmax,ymax]
+// Returns the horizontal advance of the measured text (i.e. where the next character should drawn).
+// Measured values are returned in local coordinate space.
+TextBounds :: proc(
+ ctx: ^Context,
+ x, y: f32,
+ input: string,
+ bounds: ^[4]f32 = nil,
+) -> (advance: f32) {
+ state := __getState(ctx)
+ scale := __getFontScale(state) * ctx.devicePxRatio
+ invscale := f32(1.0) / scale
+
+ if state.fontId == -1 {
+ return 0
+ }
+
+ fs := &ctx.fs
+ fontstash.SetSize(fs, state.fontSize*scale)
+ fontstash.SetSpacing(fs, state.letterSpacing*scale)
+ fontstash.SetBlur(fs, state.fontBlur*scale)
+ fontstash.SetAlignHorizontal(fs, state.alignHorizontal)
+ fontstash.SetAlignVertical(fs, state.alignVertical)
+ fontstash.SetFont(fs, state.fontId)
+
+ width := fontstash.TextBounds(fs, input, x * scale, y * scale, bounds)
+
+ if bounds != nil {
+ // Use line bounds for height.
+ one, two := fontstash.LineBounds(fs, y * scale)
+
+ bounds[1] = one
+ bounds[3] = two
+ bounds[0] *= invscale
+ bounds[1] *= invscale
+ bounds[2] *= invscale
+ bounds[3] *= invscale
+ }
+
+ return width * invscale
+}
+
+// text row with relative byte offsets into a string
+Text_Row :: struct {
+ start: int,
+ end: int,
+ next: int,
+ width: f32,
+ minx, maxx: f32,
+}
+
+Codepoint_Type :: enum {
+ Space,
+ Newline,
+ Char,
+ CJK,
+}
+
+// Draws multi-line text string at specified location wrapped at the specified width. If end is specified only the sub-string up to the end is drawn.
+// White space is stripped at the beginning of the rows, the text is split at word boundaries or when new-line characters are encountered.
+// Words longer than the max width are slit at nearest character (i.e. no hyphenation).
+TextBox :: proc(
+ ctx: ^Context,
+ x, y: f32,
+ break_row_width: f32,
+ input: string,
+) {
+ state := __getState(ctx)
+ rows: [2]Text_Row
+
+ if state.fontId == -1 {
+ return
+ }
+
+ _, _, lineHeight := TextMetrics(ctx)
+ old_align := state.alignHorizontal
+ defer state.alignHorizontal = old_align
+ state.alignHorizontal = .LEFT
+ rows_mod := rows[:]
+
+ y := y
+ input := input
+ for nrows, input_last in TextBreakLines(ctx, &input, break_row_width, &rows_mod) {
+ for row in rows[:nrows] {
+ Text(ctx, x, y, input_last[row.start:row.end])
+ y += lineHeight * state.lineHeight
+ }
+ }
+}
+
+// NOTE text break lines works relative to the string in byte indexes now, instead of on pointers
+// Breaks the specified text into lines
+// White space is stripped at the beginning of the rows, the text is split at word boundaries or when new-line characters are encountered.
+// Words longer than the max width are slit at nearest character (i.e. no hyphenation).
+TextBreakLines :: proc(
+ ctx: ^Context,
+ text: ^string,
+ break_row_width: f32,
+ rows: ^[]Text_Row,
+) -> (nrows: int, last: string, ok: bool) {
+ state := __getState(ctx)
+ scale := __getFontScale(state) * ctx.devicePxRatio
+ invscale := 1.0 / scale
+
+ row_start_x, row_width, row_min_x, row_max_x: f32
+ max_rows := len(rows)
+
+ row_start: int = -1
+ row_end: int = -1
+ word_start: int = -1
+ break_end: int = -1
+ word_start_x, word_min_x: f32
+
+ break_width, break_max_x: f32
+ type := Codepoint_Type.Space
+ ptype := Codepoint_Type.Space
+ pcodepoint: rune
+
+ if max_rows == 0 || state.fontId == -1 || len(text) == 0 {
+ return
+ }
+
+ fs := &ctx.fs
+ fontstash.SetSize(fs, state.fontSize * scale)
+ fontstash.SetSpacing(fs, state.letterSpacing * scale)
+ fontstash.SetBlur(fs, state.fontBlur * scale)
+ fontstash.SetAlignHorizontal(fs, state.alignHorizontal)
+ fontstash.SetAlignVertical(fs, state.alignVertical)
+ fontstash.SetFont(fs, state.fontId)
+
+ break_x := break_row_width * scale
+ iter := fontstash.TextIterInit(fs, 0, 0, text^)
+ prev_iter := iter
+
+ q: fontstash.Quad
+ stopped_early: bool
+
+ for fontstash.TextIterNext(fs, &iter, &q) {
+ if iter.previousGlyphIndex < 0 && __allocTextAtlas(ctx) { // can not retrieve glyph?
+ iter = prev_iter
+ fontstash.TextIterNext(fs, &iter, &q) // try again
+ }
+ prev_iter = iter
+
+ switch iter.codepoint {
+ case '\t', '\v', '\f', ' ', 0x00a0:
+ // NBSP
+ type = .Space
+
+ case '\n':
+ type = .Space if pcodepoint == 13 else .Newline
+
+ case '\r':
+ type = .Space if pcodepoint == 10 else .Newline
+
+ case 0x0085:
+ // NEL
+ type = .Newline
+
+ case:
+ switch iter.codepoint {
+ case 0x4E00..=0x9FFF,
+ 0x3000..=0x30FF,
+ 0xFF00..=0xFFEF,
+ 0x1100..=0x11FF,
+ 0x3130..=0x318F,
+ 0xAC00..=0xD7AF:
+ type = .CJK
+ case:
+ type = .Char
+ }
+ }
+
+ if type == .Newline {
+ // Always handle new lines.
+ rows[nrows].start = row_start if row_start != -1 else iter.str
+ rows[nrows].end = row_end if row_end != -1 else iter.str
+ rows[nrows].width = row_width * invscale
+ rows[nrows].minx = row_min_x * invscale
+ rows[nrows].maxx = row_max_x * invscale
+ rows[nrows].next = iter.next
+ nrows += 1
+
+ if nrows >= max_rows {
+ stopped_early = true
+ break
+ }
+
+ // Set nil break point
+ break_end = row_start
+ break_width = 0.0
+ break_max_x = 0.0
+ // Indicate to skip the white space at the beginning of the row.
+ row_start = -1
+ row_end = -1
+ row_width = 0
+ row_min_x = 0
+ row_max_x = 0
+ } else {
+ if row_start == -1 {
+ // Skip white space until the beginning of the line
+ if type == .Char || type == .CJK {
+ // The current char is the row so far
+ row_start_x = iter.x
+ row_start = iter.str
+ row_end = iter.next
+ row_width = iter.nextx - row_start_x
+ row_min_x = q.x0 - row_start_x
+ row_max_x = q.x1 - row_start_x
+ word_start = iter.str
+ word_start_x = iter.x
+ word_min_x = q.x0 - row_start_x
+ // Set nil break point
+ break_end = row_start
+ break_width = 0.0
+ break_max_x = 0.0
+ }
+ } else {
+ next_width := iter.nextx - row_start_x
+
+ // track last non-white space character
+ if type == .Char || type == .CJK {
+ row_end = iter.next
+ row_width = iter.nextx - row_start_x
+ row_max_x = q.x1 - row_start_x
+ }
+ // track last end of a word
+ if ((ptype == .Char || ptype == .CJK) && type == .Space) || type == .CJK {
+ break_end = iter.str
+ break_width = row_width
+ break_max_x = row_max_x
+ }
+ // track last beginning of a word
+ if ((ptype == .Space && (type == .Char || type == .CJK)) || type == .CJK) {
+ word_start = iter.str
+ word_start_x = iter.x
+ word_min_x = q.x0
+ }
+
+ // Break to new line when a character is beyond break width.
+ if (type == .Char || type == .CJK) && next_width > break_x {
+ // The run length is too long, need to break to new line.
+ if break_end == row_start {
+ // The current word is longer than the row length, just break it from here.
+ rows[nrows].start = row_start
+ rows[nrows].end = iter.str
+ rows[nrows].width = row_width * invscale
+ rows[nrows].minx = row_min_x * invscale
+ rows[nrows].maxx = row_max_x * invscale
+ rows[nrows].next = iter.str
+ nrows += 1
+
+ if nrows >= max_rows {
+ stopped_early = true
+ break
+ }
+
+ row_start_x = iter.x
+ row_start = iter.str
+ row_end = iter.next
+ row_width = iter.nextx - row_start_x
+ row_min_x = q.x0 - row_start_x
+ row_max_x = q.x1 - row_start_x
+ word_start = iter.str
+ word_start_x = iter.x
+ word_min_x = q.x0 - row_start_x
+ } else {
+ // Break the line from the end of the last word, and start new line from the beginning of the new.
+ rows[nrows].start = row_start
+ rows[nrows].end = break_end
+ rows[nrows].width = break_width * invscale
+ rows[nrows].minx = row_min_x * invscale
+ rows[nrows].maxx = break_max_x * invscale
+ rows[nrows].next = word_start
+ nrows += 1
+ if nrows >= max_rows {
+ stopped_early = true
+ break
+ }
+ // Update row
+ row_start_x = word_start_x
+ row_start = word_start
+ row_end = iter.next
+ row_width = iter.nextx - row_start_x
+ row_min_x = word_min_x - row_start_x
+ row_max_x = q.x1 - row_start_x
+ }
+ // Set nil break point
+ break_end = row_start
+ break_width = 0.0
+ break_max_x = 0.0
+ }
+ }
+ }
+
+ pcodepoint = iter.codepoint
+ ptype = type
+ }
+
+ // Break the line from the end of the last word, and start new line from the beginning of the new.
+ if !stopped_early && row_start != -1 {
+ rows[nrows].start = row_start
+ rows[nrows].end = row_end
+ rows[nrows].width = row_width * invscale
+ rows[nrows].minx = row_min_x * invscale
+ rows[nrows].maxx = row_max_x * invscale
+ rows[nrows].next = iter.end
+ nrows += 1
+ }
+
+ // NOTE a bit hacky, row.start / row.end need to work with last string range
+ last = text^
+ // advance early
+ next := rows[nrows-1].next
+ text^ = text[next:]
+ // terminate the for loop on non ok
+ ok = nrows != 0
+
+ return
+}
+
+// Measures the specified multi-text string. Parameter bounds should be a pointer to float[4],
+// if the bounding box of the text should be returned. The bounds value are [xmin,ymin, xmax,ymax]
+// Measured values are returned in local coordinate space.
+TextBoxBounds :: proc(
+ ctx: ^Context,
+ x, y: f32,
+ breakRowWidth: f32,
+ input: string,
+ bounds: ^[4]f32,
+) {
+ state := __getState(ctx)
+ rows: [2]Text_Row
+ scale := __getFontScale(state) * ctx.devicePxRatio
+ invscale := f32(1.0) / scale
+
+ if state.fontId == -1 {
+ if bounds != nil {
+ bounds^ = {}
+ }
+
+ return
+ }
+
+ // alignment
+ halign := state.alignHorizontal
+ old_align := state.alignHorizontal
+ defer state.alignHorizontal = old_align
+ state.alignHorizontal = .LEFT
+
+ _, _, lineh := TextMetrics(ctx)
+ minx, maxx := x, x
+ miny, maxy := y, y
+
+ fs := &ctx.fs
+ fontstash.SetSize(fs, state.fontSize * scale)
+ fontstash.SetSpacing(fs, state.letterSpacing * scale)
+ fontstash.SetBlur(fs, state.fontBlur * scale)
+ fontstash.SetAlignHorizontal(fs, state.alignHorizontal)
+ fontstash.SetAlignVertical(fs, state.alignVertical)
+ fontstash.SetFont(fs, state.fontId)
+ rminy, rmaxy := fontstash.LineBounds(fs, 0)
+ rminy *= invscale
+ rmaxy *= invscale
+
+ input := input
+ rows_mod := rows[:]
+ y := y
+
+ for nrows, input_last in TextBreakLines(ctx, &input, breakRowWidth, &rows_mod) {
+ for row in rows[:nrows] {
+ rminx, rmaxx, dx: f32
+
+ // Horizontal bounds
+ switch halign {
+ case .LEFT: dx = 0
+ case .CENTER: dx = breakRowWidth*0.5 - row.width*0.5
+ case .RIGHT: dx = breakRowWidth - row.width
+ }
+
+ rminx = x + row.minx + dx
+ rmaxx = x + row.maxx + dx
+ minx = min(minx, rminx)
+ maxx = max(maxx, rmaxx)
+ // Vertical bounds.
+ miny = min(miny, y + rminy)
+ maxy = max(maxy, y + rmaxy)
+
+ y += lineh * state.lineHeight
+ }
+ }
+
+ if bounds != nil {
+ bounds^ = {minx, miny, maxx, maxy}
+ }
+}
+
+Glyph_Position :: struct {
+ str: int,
+ x: f32,
+ minx, maxx: f32,
+}
+
+// Calculates the glyph x positions of the specified text.
+// Measured values are returned in local coordinate space.
+TextGlyphPositions :: proc(
+ ctx: ^Context,
+ x, y: f32,
+ text: string,
+ positions: ^[]Glyph_Position,
+) -> int {
+ state := __getState(ctx)
+ scale := __getFontScale(state) * ctx.devicePxRatio
+
+ if state.fontId == -1 || len(text) == 0 {
+ return 0
+ }
+
+ fs := &ctx.fs
+ fontstash.SetSize(fs, state.fontSize*scale)
+ fontstash.SetSpacing(fs, state.letterSpacing*scale)
+ fontstash.SetBlur(fs, state.fontBlur*scale)
+ fontstash.SetAlignHorizontal(fs, state.alignHorizontal)
+ fontstash.SetAlignVertical(fs, state.alignVertical)
+ fontstash.SetFont(fs, state.fontId)
+
+ iter := fontstash.TextIterInit(fs, 0, 0, text)
+ prev_iter := iter
+ q: fontstash.Quad
+ npos: int
+ for fontstash.TextIterNext(fs, &iter, &q) {
+ if iter.previousGlyphIndex < 0 && __allocTextAtlas(ctx) { // can not retrieve glyph?
+ iter = prev_iter
+ fontstash.TextIterNext(fs, &iter, &q) // try again
+ }
+
+ prev_iter = iter
+ positions[npos].str = iter.str
+ positions[npos].x = iter.x + x
+ positions[npos].minx = min(iter.x, q.x0) + x
+ positions[npos].maxx = max(iter.nextx, q.x1) + x
+ npos += 1
+
+ if npos >= len(positions) {
+ break
+ }
+ }
+
+ return npos
+}
\ No newline at end of file
diff --git a/vendor/raylib/raylib.odin b/vendor/raylib/raylib.odin
index 2bedf77c4..4236839f0 100644
--- a/vendor/raylib/raylib.odin
+++ b/vendor/raylib/raylib.odin
@@ -1133,7 +1133,7 @@ foreign lib {
SetGesturesEnabled :: proc(flags: Gestures) --- // Enable a set of gestures using flags
IsGestureDetected :: proc(gesture: Gesture) -> bool --- // Check if a gesture have been detected
- GetGestureDetected :: proc() -> Gesture --- // Get latest detected gesture
+ GetGestureDetected :: proc() -> Gestures --- // Get latest detected gesture
GetGestureHoldDuration :: proc() -> f32 --- // Get gesture hold time in milliseconds
GetGestureDragVector :: proc() -> Vector2 --- // Get gesture drag vector
GetGestureDragAngle :: proc() -> f32 --- // Get gesture drag angle
diff --git a/vendor/vulkan/_gen/create_vulkan_odin_wrapper.py b/vendor/vulkan/_gen/create_vulkan_odin_wrapper.py
index 26bfc0a82..c3497aa8f 100644
--- a/vendor/vulkan/_gen/create_vulkan_odin_wrapper.py
+++ b/vendor/vulkan/_gen/create_vulkan_odin_wrapper.py
@@ -429,7 +429,93 @@ def parse_enums(f):
f.write("{} :: distinct bit_set[{}; Flags]\n".format(flag.ljust(max_len), flag_name))
f.write("{} :: enum u32 {{}}\n".format(flag_name.ljust(max_len)))
+def parse_fake_enums(f):
+ data = re.findall(r"static const Vk(\w+FlagBits2) VK_(\w+?) = (\w+);", src, re.S)
+
+ data.sort(key=lambda x: x[0])
+ fake_enums = {}
+
+ for type_name, name, value in data:
+ if type_name in fake_enums:
+ fake_enums[type_name].append((name,value))
+ else:
+ fake_enums[type_name] = [(name, value)]
+
+ for name in fake_enums.keys():
+ flags_name = name.replace("FlagBits", "Flags")
+ enum_name = name.replace("FlagBits", "Flag")
+ f.write("{} :: distinct bit_set[{}; Flags64]\n".format(flags_name, enum_name))
+ f.write("{} :: enum Flags64 {{\n".format(name.replace("FlagBits", "Flag")))
+
+ prefix = to_snake_case(name).upper()
+ suffix = None
+ for ext in ext_suffixes:
+ prefix_new = remove_suffix(prefix, "_"+ext)
+ assert suffix is None
+ if prefix_new != prefix:
+ suffix = "_"+ext
+ prefix = prefix_new
+ break
+
+
+ prefix = prefix.replace("_FLAG_BITS2", "_2")
+ prefix += "_"
+
+ ff = []
+
+ groups = []
+ flags = {}
+
+ names_and_values = fake_enums[name]
+
+ for name, value in names_and_values:
+ value = value.replace("ULL", "")
+ n = fix_enum_name(name, prefix, suffix, True)
+ try:
+ v = fix_enum_value(value, prefix, suffix, True)
+ except FlagError as e:
+ v = int(str(e))
+ groups.append((n, v))
+ continue
+ except IgnoreFlagError as e:
+ groups.append((n, 0))
+ continue
+
+ if n == v:
+ continue
+ try:
+ flags[int(v)] = n
+ except ValueError as e:
+ pass
+
+ if v == "NONE":
+ continue
+
+ ff.append((n, v))
+
+ max_flag_value = max([int(v) for n, v in ff if is_int(v)] + [0])
+ max_group_value = max([int(v) for n, v in groups if is_int(v)] + [0])
+ if max_flag_value < max_group_value:
+ if (1< (p: i32) {
- @(default_calling_convention="c")
+GetShaderiv :: proc "contextless" (shader: Shader, pname: Enum) -> (p: i32) {
foreign webgl {
@(link_name="GetShaderiv")
- _GetShaderiv :: proc "c" (shader: Shader, pname: Enum, p: ^i32) ---
+ _GetShaderiv :: proc "contextless" (shader: Shader, pname: Enum, p: ^i32) ---
}
_GetShaderiv(shader, pname, &p)
return
}
-GetProgramInfoLog :: proc "c" (program: Program, buf: []byte) -> string {
- @(default_calling_convention="c")
+GetProgramInfoLog :: proc "contextless" (program: Program, buf: []byte) -> string {
foreign webgl {
@(link_name="GetProgramInfoLog")
- _GetProgramInfoLog :: proc "c" (program: Program, buf: []byte, length: ^int) ---
+ _GetProgramInfoLog :: proc "contextless" (program: Program, buf: []byte, length: ^int) ---
}
length: int
@@ -230,11 +228,10 @@ GetProgramInfoLog :: proc "c" (program: Program, buf: []byte) -> string {
return string(buf[:length])
}
-GetShaderInfoLog :: proc "c" (shader: Shader, buf: []byte) -> string {
- @(default_calling_convention="c")
+GetShaderInfoLog :: proc "contextless" (shader: Shader, buf: []byte) -> string {
foreign webgl {
@(link_name="GetShaderInfoLog")
- _GetShaderInfoLog :: proc "c" (shader: Shader, buf: []byte, length: ^int) ---
+ _GetShaderInfoLog :: proc "contextless" (shader: Shader, buf: []byte, length: ^int) ---
}
length: int
@@ -244,27 +241,27 @@ GetShaderInfoLog :: proc "c" (shader: Shader, buf: []byte) -> string {
-BufferDataSlice :: proc "c" (target: Enum, slice: $S/[]$E, usage: Enum) {
+BufferDataSlice :: proc "contextless" (target: Enum, slice: $S/[]$E, usage: Enum) {
BufferData(target, len(slice)*size_of(E), raw_data(slice), usage)
}
-BufferSubDataSlice :: proc "c" (target: Enum, offset: uintptr, slice: $S/[]$E) {
+BufferSubDataSlice :: proc "contextless" (target: Enum, offset: uintptr, slice: $S/[]$E) {
BufferSubData(target, offset, len(slice)*size_of(E), raw_data(slice), usage)
}
-CompressedTexImage2DSlice :: proc "c" (target: Enum, level: i32, internalformat: Enum, width, height: i32, border: i32, slice: $S/[]$E) {
+CompressedTexImage2DSlice :: proc "contextless" (target: Enum, level: i32, internalformat: Enum, width, height: i32, border: i32, slice: $S/[]$E) {
CompressedTexImage2DSlice(target, level, internalformat, width, height, border, len(slice)*size_of(E), raw_data(slice))
}
-CompressedTexSubImage2DSlice :: proc "c" (target: Enum, level: i32, xoffset, yoffset, width, height: i32, format: Enum, slice: $S/[]$E) {
+CompressedTexSubImage2DSlice :: proc "contextless" (target: Enum, level: i32, xoffset, yoffset, width, height: i32, format: Enum, slice: $S/[]$E) {
CompressedTexSubImage2DSlice(target, level, level, xoffset, yoffset, width, height, format, len(slice)*size_of(E), raw_data(slice))
}
-ReadPixelsSlice :: proc "c" (x, y, width, height: i32, format: Enum, type: Enum, slice: $S/[]$E) {
+ReadPixelsSlice :: proc "contextless" (x, y, width, height: i32, format: Enum, type: Enum, slice: $S/[]$E) {
ReadnPixels(x, y, width, height, format, type, len(slice)*size_of(E), raw_data(slice))
}
-TexImage2DSlice :: proc "c" (target: Enum, level: i32, internalformat: Enum, width, height: i32, border: i32, format, type: Enum, slice: $S/[]$E) {
+TexImage2DSlice :: proc "contextless" (target: Enum, level: i32, internalformat: Enum, width, height: i32, border: i32, format, type: Enum, slice: $S/[]$E) {
TexImage2D(target, level, internalformat, width, height, border, format, type, len(slice)*size_of(E), raw_data(slice))
}
-TexSubImage2DSlice :: proc "c" (target: Enum, level: i32, xoffset, yoffset, width, height: i32, format, type: Enum, slice: $S/[]$E) {
+TexSubImage2DSlice :: proc "contextless" (target: Enum, level: i32, xoffset, yoffset, width, height: i32, format, type: Enum, slice: $S/[]$E) {
TexSubImage2D(target, level, xoffset, yoffset, width, height, format, type, len(slice)*size_of(E), raw_data(slice))
}
\ No newline at end of file
diff --git a/vendor/wasm/WebGL/webgl2.odin b/vendor/wasm/WebGL/webgl2.odin
index a1d55b703..d360bc396 100644
--- a/vendor/wasm/WebGL/webgl2.odin
+++ b/vendor/wasm/WebGL/webgl2.odin
@@ -10,13 +10,13 @@ Sync :: distinct u32
TransformFeedback :: distinct u32
VertexArrayObject :: distinct u32
-IsWebGL2Supported :: proc "c" () -> bool {
+IsWebGL2Supported :: proc "contextless" () -> bool {
major, minor: i32
GetWebGLVersion(&major, &minor)
return major >= 2
}
-@(default_calling_convention="c")
+@(default_calling_convention="contextless")
foreign webgl2 {
/* Buffer objects */
CopyBufferSubData :: proc(readTarget, writeTarget: Enum, readOffset, writeOffset: int, size: int) ---
@@ -110,7 +110,7 @@ foreign webgl2 {
GetActiveUniformBlockName :: proc(program: Program, uniformBlockIndex: i32, buf: []byte) -> string {
foreign webgl2 {
- _GetActiveUniformBlockName :: proc(program: Program, uniformBlockIndex: i32, buf: []byte, length: ^int) ---
+ _GetActiveUniformBlockName :: proc "contextless" (program: Program, uniformBlockIndex: i32, buf: []byte, length: ^int) ---
}
n: int
_GetActiveUniformBlockName(program, uniformBlockIndex, buf, &n)
@@ -118,65 +118,65 @@ GetActiveUniformBlockName :: proc(program: Program, uniformBlockIndex: i32, buf:
}
-Uniform1uiv :: proc "c" (location: i32, v: u32) {
+Uniform1uiv :: proc "contextless" (location: i32, v: u32) {
Uniform1ui(location, v)
}
-Uniform2uiv :: proc "c" (location: i32, v: glm.uvec2) {
+Uniform2uiv :: proc "contextless" (location: i32, v: glm.uvec2) {
Uniform2ui(location, v.x, v.y)
}
-Uniform3uiv :: proc "c" (location: i32, v: glm.uvec3) {
+Uniform3uiv :: proc "contextless" (location: i32, v: glm.uvec3) {
Uniform3ui(location, v.x, v.y, v.z)
}
-Uniform4uiv :: proc "c" (location: i32, v: glm.uvec4) {
+Uniform4uiv :: proc "contextless" (location: i32, v: glm.uvec4) {
Uniform4ui(location, v.x, v.y, v.z, v.w)
}
-UniformMatrix3x2fv :: proc "c" (location: i32, m: glm.mat3x2) {
+UniformMatrix3x2fv :: proc "contextless" (location: i32, m: glm.mat3x2) {
foreign webgl2 {
- _UniformMatrix3x2fv :: proc "c" (location: i32, addr: [^]f32) ---
+ _UniformMatrix3x2fv :: proc "contextless" (location: i32, addr: [^]f32) ---
}
array := matrix_flatten(m)
_UniformMatrix3x2fv(location, &array[0])
}
-UniformMatrix4x2fv :: proc "c" (location: i32, m: glm.mat4x2) {
+UniformMatrix4x2fv :: proc "contextless" (location: i32, m: glm.mat4x2) {
foreign webgl2 {
- _UniformMatrix4x2fv :: proc "c" (location: i32, addr: [^]f32) ---
+ _UniformMatrix4x2fv :: proc "contextless" (location: i32, addr: [^]f32) ---
}
array := matrix_flatten(m)
_UniformMatrix4x2fv(location, &array[0])
}
-UniformMatrix2x3fv :: proc "c" (location: i32, m: glm.mat2x3) {
+UniformMatrix2x3fv :: proc "contextless" (location: i32, m: glm.mat2x3) {
foreign webgl2 {
- _UniformMatrix2x3fv :: proc "c" (location: i32, addr: [^]f32) ---
+ _UniformMatrix2x3fv :: proc "contextless" (location: i32, addr: [^]f32) ---
}
array := matrix_flatten(m)
_UniformMatrix2x3fv(location, &array[0])
}
-UniformMatrix4x3fv :: proc "c" (location: i32, m: glm.mat4x3) {
+UniformMatrix4x3fv :: proc "contextless" (location: i32, m: glm.mat4x3) {
foreign webgl2 {
- _UniformMatrix4x3fv :: proc "c" (location: i32, addr: [^]f32) ---
+ _UniformMatrix4x3fv :: proc "contextless" (location: i32, addr: [^]f32) ---
}
array := matrix_flatten(m)
_UniformMatrix4x3fv(location, &array[0])
}
-UniformMatrix2x4fv :: proc "c" (location: i32, m: glm.mat2x4) {
+UniformMatrix2x4fv :: proc "contextless" (location: i32, m: glm.mat2x4) {
foreign webgl2 {
- _UniformMatrix2x4fv :: proc "c" (location: i32, addr: [^]f32) ---
+ _UniformMatrix2x4fv :: proc "contextless" (location: i32, addr: [^]f32) ---
}
array := matrix_flatten(m)
_UniformMatrix2x4fv(location, &array[0])
}
-UniformMatrix3x4fv :: proc "c" (location: i32, m: glm.mat3x4) {
+UniformMatrix3x4fv :: proc "contextless" (location: i32, m: glm.mat3x4) {
foreign webgl2 {
- _UniformMatrix3x4fv :: proc "c" (location: i32, addr: [^]f32) ---
+ _UniformMatrix3x4fv :: proc "contextless" (location: i32, addr: [^]f32) ---
}
array := matrix_flatten(m)
_UniformMatrix3x4fv(location, &array[0])
}
-VertexAttribI4iv :: proc "c" (index: i32, v: glm.ivec4) {
+VertexAttribI4iv :: proc "contextless" (index: i32, v: glm.ivec4) {
VertexAttribI4i(index, v.x, v.y, v.z, v.w)
}
-VertexAttribI4uiv :: proc "c" (index: i32, v: glm.uvec4) {
+VertexAttribI4uiv :: proc "contextless" (index: i32, v: glm.uvec4) {
VertexAttribI4ui(index, v.x, v.y, v.z, v.w)
}