mirror of
https://github.com/odin-lang/Odin.git
synced 2026-01-03 19:52:30 +00:00
Refactor the test runner
Changes - Support multi-threaded testing. - Support `set_fail_timeout` on all platforms. - Display an animated progress report. - Setup all tests with a context logger. - Give all tests their own separate custom allocators. - Support tracking test memory usage. - Display a summary of the failed tests at the end. - Let users select only specific tests to run. - Support copying failed tests to the clipboard to run again. - Support catching SIGINT (CTRL-C) to cancel early. - Record context in cleanup procs. - Write all log messages to STDERR for easy redirection. - Possibly more I've forgotten. New Options - `-define:test_threads=N`: Specify thread count. - `-define:test_thread_memory=B`: Specify initial memory block size in bytes to each thread. - `-define:test_track_memory=true`: Track the memory usage of individual tests. - `-define:test_fancy=false`: Disable animated progress report. - `-define:test_select=package.test_name,...`: Run only select tests. - `-define:test_clipboard=true`: Copy names of failed tests to the clipboard. - `-define:test_progress_width=24`: Change the width of the animated progress bars.
This commit is contained in:
48
core/testing/events.odin
Normal file
48
core/testing/events.odin
Normal file
@@ -0,0 +1,48 @@
|
||||
//+private
|
||||
package testing
|
||||
|
||||
import "base:runtime"
|
||||
import "core:sync/chan"
|
||||
import "core:time"
|
||||
|
||||
Test_State :: enum {
|
||||
Ready,
|
||||
Running,
|
||||
Successful,
|
||||
Failed,
|
||||
}
|
||||
|
||||
Update_Channel :: chan.Chan(Channel_Event)
|
||||
Update_Channel_Sender :: chan.Chan(Channel_Event, .Send)
|
||||
|
||||
Task_Channel :: struct {
|
||||
channel: Update_Channel,
|
||||
test_index: int,
|
||||
}
|
||||
|
||||
Event_New_Test :: struct {
|
||||
test_index: int,
|
||||
}
|
||||
|
||||
Event_State_Change :: struct {
|
||||
new_state: Test_State,
|
||||
}
|
||||
|
||||
Event_Set_Fail_Timeout :: struct {
|
||||
at_time: time.Time,
|
||||
location: runtime.Source_Code_Location,
|
||||
}
|
||||
|
||||
Event_Log_Message :: struct {
|
||||
level: runtime.Logger_Level,
|
||||
text: string,
|
||||
time: time.Time,
|
||||
formatted_text: string,
|
||||
}
|
||||
|
||||
Channel_Event :: union {
|
||||
Event_New_Test,
|
||||
Event_State_Change,
|
||||
Event_Set_Fail_Timeout,
|
||||
Event_Log_Message,
|
||||
}
|
||||
71
core/testing/logging.odin
Normal file
71
core/testing/logging.odin
Normal file
@@ -0,0 +1,71 @@
|
||||
//+private
|
||||
package testing
|
||||
|
||||
import "base:runtime"
|
||||
import "core:fmt"
|
||||
import pkg_log "core:log"
|
||||
import "core:strings"
|
||||
import "core:sync/chan"
|
||||
import "core:time"
|
||||
|
||||
Default_Test_Logger_Opts :: runtime.Logger_Options {
|
||||
.Level,
|
||||
.Terminal_Color,
|
||||
.Short_File_Path,
|
||||
.Line,
|
||||
.Procedure,
|
||||
.Date, .Time,
|
||||
}
|
||||
|
||||
Log_Message :: struct {
|
||||
level: runtime.Logger_Level,
|
||||
text: string,
|
||||
time: time.Time,
|
||||
// `text` may be allocated differently, depending on where a log message
|
||||
// originates from.
|
||||
allocator: runtime.Allocator,
|
||||
}
|
||||
|
||||
test_logger_proc :: proc(logger_data: rawptr, level: runtime.Logger_Level, text: string, options: runtime.Logger_Options, location := #caller_location) {
|
||||
t := cast(^T)logger_data
|
||||
|
||||
if level >= .Error {
|
||||
t.error_count += 1
|
||||
}
|
||||
|
||||
cloned_text, clone_error := strings.clone(text, t._log_allocator)
|
||||
assert(clone_error == nil, "Error while cloning string in test thread logger proc.")
|
||||
|
||||
now := time.now()
|
||||
|
||||
chan.send(t.channel, Event_Log_Message {
|
||||
level = level,
|
||||
text = cloned_text,
|
||||
time = now,
|
||||
formatted_text = format_log_text(level, text, options, location, now, t._log_allocator),
|
||||
})
|
||||
}
|
||||
|
||||
runner_logger_proc :: proc(logger_data: rawptr, level: runtime.Logger_Level, text: string, options: runtime.Logger_Options, location := #caller_location) {
|
||||
log_messages := cast(^[dynamic]Log_Message)logger_data
|
||||
|
||||
now := time.now()
|
||||
|
||||
append(log_messages, Log_Message {
|
||||
level = level,
|
||||
text = format_log_text(level, text, options, location, now),
|
||||
time = now,
|
||||
allocator = context.allocator,
|
||||
})
|
||||
}
|
||||
|
||||
format_log_text :: proc(level: runtime.Logger_Level, text: string, options: runtime.Logger_Options, location: runtime.Source_Code_Location, at_time: time.Time, allocator := context.allocator) -> string{
|
||||
backing: [1024]byte
|
||||
buf := strings.builder_from_bytes(backing[:])
|
||||
|
||||
pkg_log.do_level_header(options, &buf, level)
|
||||
pkg_log.do_time_header(options, &buf, at_time)
|
||||
pkg_log.do_location_header(options, &buf, location)
|
||||
|
||||
return fmt.aprintf("%s%s", strings.to_string(buf), text, allocator = allocator)
|
||||
}
|
||||
312
core/testing/reporting.odin
Normal file
312
core/testing/reporting.odin
Normal file
@@ -0,0 +1,312 @@
|
||||
//+private
|
||||
package testing
|
||||
|
||||
import "base:runtime"
|
||||
import "core:encoding/ansi"
|
||||
import "core:fmt"
|
||||
import "core:io"
|
||||
import "core:mem"
|
||||
import "core:path/filepath"
|
||||
import "core:strings"
|
||||
|
||||
// Definitions of colors for use in the test runner.
|
||||
SGR_RESET :: ansi.CSI + ansi.RESET + ansi.SGR
|
||||
SGR_READY :: ansi.CSI + ansi.FG_BRIGHT_BLACK + ansi.SGR
|
||||
SGR_RUNNING :: ansi.CSI + ansi.FG_YELLOW + ansi.SGR
|
||||
SGR_SUCCESS :: ansi.CSI + ansi.FG_GREEN + ansi.SGR
|
||||
SGR_FAILED :: ansi.CSI + ansi.FG_RED + ansi.SGR
|
||||
|
||||
// More than enough bytes to cover long package names, long test names, dozens
|
||||
// of ANSI codes, et cetera.
|
||||
LINE_BUFFER_SIZE :: (PROGRESS_WIDTH * 8 + 256) * runtime.Byte
|
||||
|
||||
PROGRESS_COLUMN_SPACING :: 2
|
||||
|
||||
Package_Run :: struct {
|
||||
name: string,
|
||||
header: string,
|
||||
|
||||
frame_ready: bool,
|
||||
|
||||
redraw_buffer: [LINE_BUFFER_SIZE]byte,
|
||||
redraw_string: string,
|
||||
|
||||
last_change_state: Test_State,
|
||||
last_change_name: string,
|
||||
|
||||
tests: []Internal_Test,
|
||||
test_states: []Test_State,
|
||||
}
|
||||
|
||||
Report :: struct {
|
||||
packages: []Package_Run,
|
||||
packages_by_name: map[string]^Package_Run,
|
||||
|
||||
pkg_column_len: int,
|
||||
test_column_len: int,
|
||||
|
||||
all_tests: []Internal_Test,
|
||||
all_test_states: []Test_State,
|
||||
}
|
||||
|
||||
// Organize all tests by package and sort out test state data.
|
||||
make_report :: proc(internal_tests: []Internal_Test) -> (report: Report, error: runtime.Allocator_Error) {
|
||||
assert(len(internal_tests) > 0, "make_report called with no tests")
|
||||
|
||||
packages: [dynamic]Package_Run
|
||||
|
||||
report.all_tests = internal_tests
|
||||
report.all_test_states = make([]Test_State, len(internal_tests)) or_return
|
||||
|
||||
// First, figure out what belongs where.
|
||||
#no_bounds_check cur_pkg := internal_tests[0].pkg
|
||||
pkg_start: int
|
||||
|
||||
// This loop assumes the tests are sorted by package already.
|
||||
for it, index in internal_tests {
|
||||
if cur_pkg != it.pkg {
|
||||
#no_bounds_check {
|
||||
append(&packages, Package_Run {
|
||||
name = cur_pkg,
|
||||
tests = report.all_tests[pkg_start:index],
|
||||
test_states = report.all_test_states[pkg_start:index],
|
||||
}) or_return
|
||||
}
|
||||
|
||||
pkg_start = index
|
||||
report.pkg_column_len = max(report.pkg_column_len, len(cur_pkg))
|
||||
cur_pkg = it.pkg
|
||||
}
|
||||
report.test_column_len = max(report.test_column_len, len(it.name))
|
||||
}
|
||||
|
||||
// Handle the last package.
|
||||
#no_bounds_check {
|
||||
append(&packages, Package_Run {
|
||||
name = cur_pkg,
|
||||
header = cur_pkg,
|
||||
tests = report.all_tests[pkg_start:],
|
||||
test_states = report.all_test_states[pkg_start:],
|
||||
}) or_return
|
||||
}
|
||||
|
||||
report.pkg_column_len = PROGRESS_COLUMN_SPACING + max(report.pkg_column_len, len(cur_pkg))
|
||||
|
||||
shrink(&packages) or_return
|
||||
|
||||
for &pkg in packages {
|
||||
pkg.header = fmt.aprintf("%- *[1]s[", pkg.name, report.pkg_column_len)
|
||||
assert(len(pkg.header) > 0, "Error allocating package header string.")
|
||||
|
||||
// This is safe because the array is done resizing, and it has the same
|
||||
// lifetime as the map.
|
||||
report.packages_by_name[pkg.name] = &pkg
|
||||
}
|
||||
|
||||
// It's okay to discard the dynamic array's allocator information here,
|
||||
// because its capacity has been shrunk to its length, it was allocated by
|
||||
// the caller's context allocator, and it will be deallocated by the same.
|
||||
//
|
||||
// `delete_slice` is equivalent to `delete_dynamic_array` in this case.
|
||||
report.packages = packages[:]
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
destroy_report :: proc(report: ^Report) {
|
||||
for pkg in report.packages {
|
||||
delete(pkg.header)
|
||||
}
|
||||
|
||||
delete(report.packages)
|
||||
delete(report.packages_by_name)
|
||||
delete(report.all_test_states)
|
||||
}
|
||||
|
||||
redraw_package :: proc(w: io.Writer, pkg: ^Package_Run) {
|
||||
if pkg.frame_ready {
|
||||
io.write_string(w, pkg.redraw_string)
|
||||
return
|
||||
}
|
||||
|
||||
// Write the output line here so we can cache it.
|
||||
line_builder := strings.builder_from_bytes(pkg.redraw_buffer[:])
|
||||
line_writer := strings.to_writer(&line_builder)
|
||||
|
||||
highest_run_index: int
|
||||
failed_count: int
|
||||
done_count: int
|
||||
#no_bounds_check for i := 0; i < len(pkg.test_states); i += 1 {
|
||||
switch pkg.test_states[i] {
|
||||
case .Ready:
|
||||
continue
|
||||
case .Running:
|
||||
highest_run_index = max(highest_run_index, i)
|
||||
case .Successful:
|
||||
done_count += 1
|
||||
case .Failed:
|
||||
failed_count += 1
|
||||
done_count += 1
|
||||
}
|
||||
}
|
||||
|
||||
start := max(0, highest_run_index - (PROGRESS_WIDTH - 1))
|
||||
end := min(start + PROGRESS_WIDTH, len(pkg.test_states))
|
||||
|
||||
// This variable is to keep track of the last ANSI code emitted, in
|
||||
// order to avoid repeating the same code over in a sequence.
|
||||
//
|
||||
// This should help reduce screen flicker.
|
||||
last_state := Test_State(-1)
|
||||
|
||||
io.write_string(line_writer, pkg.header)
|
||||
|
||||
#no_bounds_check for state in pkg.test_states[start:end] {
|
||||
switch state {
|
||||
case .Ready:
|
||||
if last_state != state {
|
||||
io.write_string(line_writer, SGR_READY)
|
||||
last_state = state
|
||||
}
|
||||
case .Running:
|
||||
if last_state != state {
|
||||
io.write_string(line_writer, SGR_RUNNING)
|
||||
last_state = state
|
||||
}
|
||||
case .Successful:
|
||||
if last_state != state {
|
||||
io.write_string(line_writer, SGR_SUCCESS)
|
||||
last_state = state
|
||||
}
|
||||
case .Failed:
|
||||
if last_state != state {
|
||||
io.write_string(line_writer, SGR_FAILED)
|
||||
last_state = state
|
||||
}
|
||||
}
|
||||
io.write_byte(line_writer, '|')
|
||||
}
|
||||
|
||||
for _ in 0 ..< PROGRESS_WIDTH - (end - start) {
|
||||
io.write_byte(line_writer, ' ')
|
||||
}
|
||||
|
||||
io.write_string(line_writer, SGR_RESET + "] ")
|
||||
|
||||
ticker: string
|
||||
if done_count == len(pkg.test_states) {
|
||||
ticker = "[package done]"
|
||||
if failed_count > 0 {
|
||||
ticker = fmt.tprintf("%s (" + SGR_FAILED + "%i" + SGR_RESET + " failed)", ticker, failed_count)
|
||||
}
|
||||
} else {
|
||||
if len(pkg.last_change_name) == 0 {
|
||||
#no_bounds_check pkg.last_change_name = pkg.tests[0].name
|
||||
}
|
||||
|
||||
switch pkg.last_change_state {
|
||||
case .Ready:
|
||||
ticker = fmt.tprintf(SGR_READY + "%s" + SGR_RESET, pkg.last_change_name)
|
||||
case .Running:
|
||||
ticker = fmt.tprintf(SGR_RUNNING + "%s" + SGR_RESET, pkg.last_change_name)
|
||||
case .Failed:
|
||||
ticker = fmt.tprintf(SGR_FAILED + "%s" + SGR_RESET, pkg.last_change_name)
|
||||
case .Successful:
|
||||
ticker = fmt.tprintf(SGR_SUCCESS + "%s" + SGR_RESET, pkg.last_change_name)
|
||||
}
|
||||
}
|
||||
|
||||
if done_count == len(pkg.test_states) {
|
||||
fmt.wprintfln(line_writer, " % 4i :: %s",
|
||||
len(pkg.test_states),
|
||||
ticker,
|
||||
)
|
||||
} else {
|
||||
fmt.wprintfln(line_writer, "% 4i/% 4i :: %s",
|
||||
done_count,
|
||||
len(pkg.test_states),
|
||||
ticker,
|
||||
)
|
||||
}
|
||||
|
||||
pkg.redraw_string = strings.to_string(line_builder)
|
||||
pkg.frame_ready = true
|
||||
io.write_string(w, pkg.redraw_string)
|
||||
}
|
||||
|
||||
redraw_report :: proc(w: io.Writer, report: Report) {
|
||||
// If we print a line longer than the user's terminal can handle, it may
|
||||
// wrap around, shifting the progress report out of alignment.
|
||||
//
|
||||
// There are ways to get the current terminal width, and that would be the
|
||||
// ideal way to handle this, but it would require system-specific code such
|
||||
// as setting STDIN to be non-blocking in order to read the response from
|
||||
// the ANSI DSR escape code, or reading environment variables.
|
||||
//
|
||||
// The DECAWM escape codes control whether or not the terminal will wrap
|
||||
// long lines or overwrite the last visible character.
|
||||
// This should be fine for now.
|
||||
//
|
||||
// Note that we only do this for the animated summary; log messages are
|
||||
// still perfectly fine to wrap, as they're printed in their own batch,
|
||||
// whereas the animation depends on each package being only on one line.
|
||||
//
|
||||
// Of course, if you resize your terminal while it's printing, things can
|
||||
// still break...
|
||||
fmt.wprint(w, ansi.CSI + ansi.DECAWM_OFF)
|
||||
for &pkg in report.packages {
|
||||
redraw_package(w, &pkg)
|
||||
}
|
||||
fmt.wprint(w, ansi.CSI + ansi.DECAWM_ON)
|
||||
}
|
||||
|
||||
needs_to_redraw :: proc(report: Report) -> bool {
|
||||
for pkg in report.packages {
|
||||
if !pkg.frame_ready {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
draw_status_bar :: proc(w: io.Writer, threads_string: string, total_done_count, total_test_count: int) {
|
||||
if total_done_count != total_test_count {
|
||||
fmt.wprintfln(w,
|
||||
"%s % 4i/% 4i :: total",
|
||||
threads_string,
|
||||
total_done_count,
|
||||
total_test_count)
|
||||
}
|
||||
}
|
||||
|
||||
write_memory_report :: proc(w: io.Writer, tracker: ^mem.Tracking_Allocator, pkg, name: string) {
|
||||
fmt.wprintf(w,
|
||||
"<% 10M/% 10M> <% 10M> (% 5i/% 5i) :: %s.%s",
|
||||
tracker.current_memory_allocated,
|
||||
tracker.total_memory_allocated,
|
||||
tracker.peak_memory_allocated,
|
||||
tracker.total_free_count,
|
||||
tracker.total_allocation_count,
|
||||
pkg,
|
||||
name)
|
||||
|
||||
for ptr, entry in tracker.allocation_map {
|
||||
fmt.wprintf(w,
|
||||
"\n +++ leak % 10M @ %p [%s:%i:%s()]",
|
||||
entry.size,
|
||||
ptr,
|
||||
filepath.base(entry.location.file_path),
|
||||
entry.location.line,
|
||||
entry.location.procedure)
|
||||
}
|
||||
|
||||
for entry in tracker.bad_free_array {
|
||||
fmt.wprintf(w,
|
||||
"\n +++ bad free @ %p [%s:%i:%s()]",
|
||||
entry.memory,
|
||||
filepath.base(entry.location.file_path),
|
||||
entry.location.line,
|
||||
entry.location.procedure)
|
||||
}
|
||||
}
|
||||
@@ -1,73 +1,675 @@
|
||||
//+private
|
||||
package testing
|
||||
|
||||
import "base:intrinsics"
|
||||
import "base:runtime"
|
||||
import "core:bytes"
|
||||
import "core:encoding/ansi"
|
||||
import "core:encoding/base64"
|
||||
import "core:fmt"
|
||||
import "core:io"
|
||||
import pkg_log "core:log"
|
||||
import "core:mem"
|
||||
import "core:os"
|
||||
import "core:slice"
|
||||
import "core:strings"
|
||||
import "core:sync/chan"
|
||||
import "core:thread"
|
||||
import "core:time"
|
||||
|
||||
// Keep `-vet` happy.
|
||||
base64_encode :: base64.encode
|
||||
_ :: pkg_log
|
||||
_ :: strings
|
||||
|
||||
// Specify how many threads to use when running tests.
|
||||
TEST_THREADS : int : #config(test_threads, 0)
|
||||
// Track the memory used by each test.
|
||||
TRACKING_MEMORY : bool : #config(test_track_memory, false)
|
||||
// Specify how much memory each thread allocator starts with.
|
||||
PER_THREAD_MEMORY : int : #config(test_thread_memory, mem.ROLLBACK_STACK_DEFAULT_BLOCK_SIZE)
|
||||
// Select a specific set of tests to run by name.
|
||||
TEST_SELECT : string : #config(test_select, "")
|
||||
// Show the fancy animated progress report.
|
||||
FANCY_OUTPUT : bool : #config(test_fancy, true)
|
||||
// Copy failed tests to the clipboard when done.
|
||||
USE_CLIPBOARD : bool : #config(test_clipboard, false)
|
||||
// How many test results to show at a time per package.
|
||||
PROGRESS_WIDTH : int : #config(test_progress_width, 24)
|
||||
|
||||
|
||||
reset_t :: proc(t: ^T) {
|
||||
clear(&t.cleanups)
|
||||
t.error_count = 0
|
||||
}
|
||||
end_t :: proc(t: ^T) {
|
||||
for i := len(t.cleanups)-1; i >= 0; i -= 1 {
|
||||
c := t.cleanups[i]
|
||||
#no_bounds_check c := t.cleanups[i]
|
||||
context = c.ctx
|
||||
c.procedure(c.user_data)
|
||||
}
|
||||
|
||||
delete(t.cleanups)
|
||||
t.cleanups = {}
|
||||
}
|
||||
|
||||
Task_Data :: struct {
|
||||
it: Internal_Test,
|
||||
t: T,
|
||||
allocator_index: int,
|
||||
}
|
||||
|
||||
Task_Timeout :: struct {
|
||||
test_index: int,
|
||||
at_time: time.Time,
|
||||
location: runtime.Source_Code_Location,
|
||||
}
|
||||
|
||||
run_test_task :: proc(task: thread.Task) {
|
||||
data := cast(^Task_Data)(task.data)
|
||||
|
||||
chan.send(data.t.channel, Event_New_Test {
|
||||
test_index = task.user_index,
|
||||
})
|
||||
|
||||
chan.send(data.t.channel, Event_State_Change {
|
||||
new_state = .Running,
|
||||
})
|
||||
|
||||
context.logger = {
|
||||
procedure = test_logger_proc,
|
||||
data = &data.t,
|
||||
lowest_level = .Debug if ODIN_DEBUG else .Info,
|
||||
options = Default_Test_Logger_Opts,
|
||||
}
|
||||
|
||||
free_all(context.temp_allocator)
|
||||
|
||||
run_internal_test(&data.t, data.it)
|
||||
|
||||
end_t(&data.t)
|
||||
|
||||
new_state : Test_State = .Failed if failed(&data.t) else .Successful
|
||||
|
||||
chan.send(data.t.channel, Event_State_Change {
|
||||
new_state = new_state,
|
||||
})
|
||||
}
|
||||
|
||||
runner :: proc(internal_tests: []Internal_Test) -> bool {
|
||||
stream := os.stream_from_handle(os.stdout)
|
||||
w := io.to_writer(stream)
|
||||
BATCH_BUFFER_SIZE :: 32 * mem.Kilobyte
|
||||
POOL_BLOCK_SIZE :: 16 * mem.Kilobyte
|
||||
CLIPBOARD_BUFFER_SIZE :: 16 * mem.Kilobyte
|
||||
|
||||
t := &T{}
|
||||
t.w = w
|
||||
reserve(&t.cleanups, 1024)
|
||||
defer delete(t.cleanups)
|
||||
BUFFERED_EVENTS_PER_CHANNEL :: 16
|
||||
RESERVED_LOG_MESSAGES :: 64
|
||||
RESERVED_TEST_FAILURES :: 64
|
||||
|
||||
total_success_count := 0
|
||||
total_test_count := len(internal_tests)
|
||||
ERROR_STRING_TIMEOUT : string : "Test timed out."
|
||||
ERROR_STRING_UNKNOWN : string : "Test failed for unknown reasons."
|
||||
OSC_WINDOW_TITLE : string : ansi.OSC + ansi.WINDOW_TITLE + ";Odin test runner (%i/%i)" + ansi.ST
|
||||
|
||||
slice.sort_by(internal_tests, proc(a, b: Internal_Test) -> bool {
|
||||
if a.pkg < b.pkg {
|
||||
return true
|
||||
safe_delete_string :: proc(s: string, allocator := context.allocator) {
|
||||
// Guard against bad frees on static strings.
|
||||
switch raw_data(s) {
|
||||
case raw_data(ERROR_STRING_TIMEOUT), raw_data(ERROR_STRING_UNKNOWN):
|
||||
return
|
||||
case:
|
||||
delete(s, allocator)
|
||||
}
|
||||
return a.name < b.name
|
||||
})
|
||||
}
|
||||
|
||||
prev_pkg := ""
|
||||
stdout := io.to_writer(os.stream_from_handle(os.stdout))
|
||||
stderr := io.to_writer(os.stream_from_handle(os.stderr))
|
||||
|
||||
// -- Prepare test data.
|
||||
|
||||
alloc_error: mem.Allocator_Error
|
||||
|
||||
when TEST_SELECT != "" {
|
||||
select_internal_tests: [dynamic]Internal_Test
|
||||
defer delete(select_internal_tests)
|
||||
|
||||
{
|
||||
index_list := TEST_SELECT
|
||||
for selector in strings.split_iterator(&index_list, ",") {
|
||||
// Temp allocator is fine since we just need to identify which test it's referring to.
|
||||
split_selector := strings.split(selector, ".", context.temp_allocator)
|
||||
|
||||
found := false
|
||||
switch len(split_selector) {
|
||||
case 1:
|
||||
// Only the test name?
|
||||
#no_bounds_check name := split_selector[0]
|
||||
find_test_by_name: for it in internal_tests {
|
||||
if it.name == name {
|
||||
found = true
|
||||
_, alloc_error = append(&select_internal_tests, it)
|
||||
fmt.assertf(alloc_error == nil, "Error appending to select internal tests: %v", alloc_error)
|
||||
break find_test_by_name
|
||||
}
|
||||
}
|
||||
case 2:
|
||||
#no_bounds_check pkg := split_selector[0]
|
||||
#no_bounds_check name := split_selector[1]
|
||||
find_test_by_pkg_and_name: for it in internal_tests {
|
||||
if it.pkg == pkg && it.name == name {
|
||||
found = true
|
||||
_, alloc_error = append(&select_internal_tests, it)
|
||||
fmt.assertf(alloc_error == nil, "Error appending to select internal tests: %v", alloc_error)
|
||||
break find_test_by_pkg_and_name
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
fmt.wprintfln(stderr, "No test found for the name: %q", selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Intentional shadow with user-specified tests.
|
||||
internal_tests := select_internal_tests[:]
|
||||
}
|
||||
|
||||
total_failure_count := 0
|
||||
total_success_count := 0
|
||||
total_done_count := 0
|
||||
total_test_count := len(internal_tests)
|
||||
|
||||
when !FANCY_OUTPUT {
|
||||
// This is strictly for updating the window title when the progress
|
||||
// report is disabled. We're otherwise able to depend on the call to
|
||||
// `needs_to_redraw`.
|
||||
last_done_count := -1
|
||||
}
|
||||
|
||||
if total_test_count == 0 {
|
||||
// Exit early.
|
||||
fmt.wprintln(stdout, "No tests to run.")
|
||||
return true
|
||||
}
|
||||
|
||||
for it in internal_tests {
|
||||
if it.p == nil {
|
||||
total_test_count -= 1
|
||||
continue
|
||||
}
|
||||
// NOTE(Feoramund): The old test runner skipped over tests with nil
|
||||
// procedures, but I couldn't find any case where they occurred.
|
||||
// This assert stands to prevent any oversight on my part.
|
||||
fmt.assertf(it.p != nil, "Test %s.%s has <nil> procedure.", it.pkg, it.name)
|
||||
}
|
||||
|
||||
free_all(context.temp_allocator)
|
||||
reset_t(t)
|
||||
defer end_t(t)
|
||||
|
||||
if prev_pkg != it.pkg {
|
||||
prev_pkg = it.pkg
|
||||
logf(t, "[Package: %s]", it.pkg)
|
||||
}
|
||||
|
||||
logf(t, "[Test: %s]", it.name)
|
||||
|
||||
run_internal_test(t, it)
|
||||
|
||||
if failed(t) {
|
||||
logf(t, "[%s : FAILURE]", it.name)
|
||||
slice.stable_sort_by(internal_tests, proc(a, b: Internal_Test) -> bool {
|
||||
if a.pkg == b.pkg {
|
||||
return a.name < b.name
|
||||
} else {
|
||||
logf(t, "[%s : SUCCESS]", it.name)
|
||||
total_success_count += 1
|
||||
return a.pkg < b.pkg
|
||||
}
|
||||
})
|
||||
|
||||
// -- Set thread count.
|
||||
|
||||
when TEST_THREADS == 0 {
|
||||
thread_count := os.processor_core_count()
|
||||
} else {
|
||||
thread_count := max(1, TEST_THREADS)
|
||||
}
|
||||
|
||||
thread_count = min(thread_count, total_test_count)
|
||||
|
||||
// -- Allocate.
|
||||
|
||||
pool_stack: mem.Rollback_Stack
|
||||
alloc_error = mem.rollback_stack_init(&pool_stack, POOL_BLOCK_SIZE)
|
||||
fmt.assertf(alloc_error == nil, "Error allocating memory for thread pool: %v", alloc_error)
|
||||
defer mem.rollback_stack_destroy(&pool_stack)
|
||||
|
||||
pool: thread.Pool
|
||||
thread.pool_init(&pool, mem.rollback_stack_allocator(&pool_stack), thread_count)
|
||||
defer thread.pool_destroy(&pool)
|
||||
|
||||
task_channels: []Task_Channel = ---
|
||||
task_channels, alloc_error = make([]Task_Channel, thread_count)
|
||||
fmt.assertf(alloc_error == nil, "Error allocating memory for update channels: %v", alloc_error)
|
||||
defer delete(task_channels)
|
||||
|
||||
for &task_channel, index in task_channels {
|
||||
task_channel.channel, alloc_error = chan.create_buffered(Update_Channel, BUFFERED_EVENTS_PER_CHANNEL, context.allocator)
|
||||
fmt.assertf(alloc_error == nil, "Error allocating memory for update channel #%i: %v", index, alloc_error)
|
||||
}
|
||||
defer for &task_channel in task_channels {
|
||||
chan.destroy(&task_channel.channel)
|
||||
}
|
||||
|
||||
// This buffer is used to batch writes to STDOUT or STDERR, to help reduce
|
||||
// screen flickering.
|
||||
batch_buffer: bytes.Buffer
|
||||
bytes.buffer_init_allocator(&batch_buffer, 0, BATCH_BUFFER_SIZE)
|
||||
batch_writer := io.to_writer(bytes.buffer_to_stream(&batch_buffer))
|
||||
defer bytes.buffer_destroy(&batch_buffer)
|
||||
|
||||
report: Report = ---
|
||||
report, alloc_error = make_report(internal_tests)
|
||||
fmt.assertf(alloc_error == nil, "Error allocating memory for test report: %v", alloc_error)
|
||||
defer destroy_report(&report)
|
||||
|
||||
when FANCY_OUTPUT {
|
||||
// We cannot make use of the ANSI save/restore cursor codes, because they
|
||||
// work by absolute screen coordinates. This will cause unnecessary
|
||||
// scrollback if we print at the bottom of someone's terminal.
|
||||
ansi_redraw_string := fmt.aprintf(
|
||||
// ANSI for "go up N lines then erase the screen from the cursor forward."
|
||||
ansi.CSI + "%i" + ansi.CPL + ansi.CSI + ansi.ED +
|
||||
// We'll combine this with the window title format string, since it
|
||||
// can be printed at the same time.
|
||||
"%s",
|
||||
// 1 extra line for the status bar.
|
||||
1 + len(report.packages), OSC_WINDOW_TITLE)
|
||||
assert(len(ansi_redraw_string) > 0, "Error allocating ANSI redraw string.")
|
||||
defer delete(ansi_redraw_string)
|
||||
|
||||
thread_count_status_string: string = ---
|
||||
{
|
||||
PADDING :: PROGRESS_COLUMN_SPACING + PROGRESS_WIDTH
|
||||
|
||||
unpadded := fmt.tprintf("%i thread%s", thread_count, "" if thread_count == 1 else "s")
|
||||
thread_count_status_string = fmt.aprintf("%- *[1]s", unpadded, report.pkg_column_len + PADDING)
|
||||
assert(len(thread_count_status_string) > 0, "Error allocating thread count status string.")
|
||||
}
|
||||
defer delete(thread_count_status_string)
|
||||
}
|
||||
|
||||
task_data_slots: []Task_Data = ---
|
||||
task_data_slots, alloc_error = make([]Task_Data, thread_count)
|
||||
fmt.assertf(alloc_error == nil, "Error allocating memory for task data slots: %v", alloc_error)
|
||||
defer delete(task_data_slots)
|
||||
|
||||
safe_heap: mem.Mutex_Allocator
|
||||
mem.mutex_allocator_init(&safe_heap, context.allocator)
|
||||
safe_heap_allocator := mem.mutex_allocator(&safe_heap)
|
||||
|
||||
// Tests rotate through these allocators as they finish.
|
||||
task_allocators: []mem.Rollback_Stack = ---
|
||||
task_allocators, alloc_error = make([]mem.Rollback_Stack, thread_count)
|
||||
fmt.assertf(alloc_error == nil, "Error allocating memory for task allocators: %v", alloc_error)
|
||||
defer delete(task_allocators)
|
||||
|
||||
when TRACKING_MEMORY {
|
||||
task_memory_trackers: []mem.Tracking_Allocator = ---
|
||||
task_memory_trackers, alloc_error = make([]mem.Tracking_Allocator, thread_count)
|
||||
fmt.assertf(alloc_error == nil, "Error allocating memory for memory trackers: %v", alloc_error)
|
||||
defer delete(task_memory_trackers)
|
||||
}
|
||||
|
||||
#no_bounds_check for i in 0 ..< thread_count {
|
||||
alloc_error = mem.rollback_stack_init(&task_allocators[i], PER_THREAD_MEMORY, block_allocator = safe_heap_allocator)
|
||||
fmt.assertf(alloc_error == nil, "Error allocating memory for task allocator #%i: %v", i, alloc_error)
|
||||
when TRACKING_MEMORY {
|
||||
mem.tracking_allocator_init(&task_memory_trackers[i], mem.rollback_stack_allocator(&task_allocators[i]))
|
||||
}
|
||||
}
|
||||
logf(t, "----------------------------------------")
|
||||
if total_test_count == 0 {
|
||||
log(t, "NO TESTS RAN")
|
||||
} else {
|
||||
logf(t, "%d/%d SUCCESSFUL", total_success_count, total_test_count)
|
||||
|
||||
defer #no_bounds_check for i in 0 ..< thread_count {
|
||||
when TRACKING_MEMORY {
|
||||
mem.tracking_allocator_destroy(&task_memory_trackers[i])
|
||||
}
|
||||
mem.rollback_stack_destroy(&task_allocators[i])
|
||||
}
|
||||
|
||||
task_timeouts: [dynamic]Task_Timeout = ---
|
||||
task_timeouts, alloc_error = make([dynamic]Task_Timeout, 0, thread_count)
|
||||
fmt.assertf(alloc_error == nil, "Error allocating memory for task timeouts: %v", alloc_error)
|
||||
defer delete(task_timeouts)
|
||||
|
||||
failed_test_reason_map: map[int]string = ---
|
||||
failed_test_reason_map, alloc_error = make(map[int]string, RESERVED_TEST_FAILURES)
|
||||
fmt.assertf(alloc_error == nil, "Error allocating memory for failed test reasons: %v", alloc_error)
|
||||
defer delete(failed_test_reason_map)
|
||||
|
||||
log_messages: [dynamic]Log_Message = ---
|
||||
log_messages, alloc_error = make([dynamic]Log_Message, 0, RESERVED_LOG_MESSAGES)
|
||||
fmt.assertf(alloc_error == nil, "Error allocating memory for log message queue: %v", alloc_error)
|
||||
defer delete(log_messages)
|
||||
|
||||
sorted_failed_test_reasons: [dynamic]int = ---
|
||||
sorted_failed_test_reasons, alloc_error = make([dynamic]int, 0, RESERVED_TEST_FAILURES)
|
||||
fmt.assertf(alloc_error == nil, "Error allocating memory for sorted failed test reasons: %v", alloc_error)
|
||||
defer delete(sorted_failed_test_reasons)
|
||||
|
||||
when USE_CLIPBOARD {
|
||||
clipboard_buffer: bytes.Buffer
|
||||
bytes.buffer_init_allocator(&clipboard_buffer, 0, CLIPBOARD_BUFFER_SIZE)
|
||||
defer bytes.buffer_destroy(&clipboard_buffer)
|
||||
}
|
||||
|
||||
// -- Setup initial tasks.
|
||||
|
||||
// NOTE(Feoramund): This is the allocator that will be used by threads to
|
||||
// persist log messages past their lifetimes. It has its own variable name
|
||||
// in the event it needs to be changed from `safe_heap_allocator` without
|
||||
// digging through the source to divine everywhere it is used for that.
|
||||
shared_log_allocator := safe_heap_allocator
|
||||
|
||||
context.allocator = safe_heap_allocator
|
||||
|
||||
context.logger = {
|
||||
procedure = runner_logger_proc,
|
||||
data = &log_messages,
|
||||
lowest_level = .Debug if ODIN_DEBUG else .Info,
|
||||
options = Default_Test_Logger_Opts - {.Short_File_Path, .Line, .Procedure},
|
||||
}
|
||||
|
||||
run_index: int
|
||||
|
||||
setup_tasks: for &data, task_index in task_data_slots {
|
||||
setup_next_test: for run_index < total_test_count {
|
||||
#no_bounds_check it := internal_tests[run_index]
|
||||
defer run_index += 1
|
||||
|
||||
data.it = it
|
||||
#no_bounds_check data.t.channel = chan.as_send(task_channels[task_index].channel)
|
||||
data.t._log_allocator = shared_log_allocator
|
||||
data.allocator_index = task_index
|
||||
|
||||
#no_bounds_check when TRACKING_MEMORY {
|
||||
task_allocator := mem.tracking_allocator(&task_memory_trackers[task_index])
|
||||
} else {
|
||||
task_allocator := mem.rollback_stack_allocator(&task_allocators[task_index])
|
||||
}
|
||||
|
||||
thread.pool_add_task(&pool, task_allocator, run_test_task, &data, run_index)
|
||||
|
||||
continue setup_tasks
|
||||
}
|
||||
}
|
||||
|
||||
// -- Run tests.
|
||||
|
||||
setup_signal_handler()
|
||||
|
||||
fmt.wprint(stdout, ansi.CSI + ansi.DECTCEM_HIDE)
|
||||
|
||||
when FANCY_OUTPUT {
|
||||
redraw_report(stdout, report)
|
||||
draw_status_bar(stdout, thread_count_status_string, total_done_count, total_test_count)
|
||||
}
|
||||
|
||||
when TRACKING_MEMORY {
|
||||
pkg_log.info("Memory tracking is enabled. Tests will log their memory usage when complete.")
|
||||
pkg_log.info("< Final Mem/ Total Mem> < Peak Mem> (#Free/Alloc) :: [package.test_name]")
|
||||
}
|
||||
|
||||
start_time := time.now()
|
||||
|
||||
thread.pool_start(&pool)
|
||||
main_loop: for !thread.pool_is_empty(&pool) {
|
||||
|
||||
cycle_pool: for task in thread.pool_pop_done(&pool) {
|
||||
data := cast(^Task_Data)(task.data)
|
||||
|
||||
when TRACKING_MEMORY {
|
||||
#no_bounds_check tracker := &task_memory_trackers[data.allocator_index]
|
||||
|
||||
write_memory_report(batch_writer, tracker, data.it.pkg, data.it.name)
|
||||
|
||||
pkg_log.info(bytes.buffer_to_string(&batch_buffer))
|
||||
bytes.buffer_reset(&batch_buffer)
|
||||
|
||||
mem.tracking_allocator_reset(tracker)
|
||||
}
|
||||
|
||||
free_all(task.allocator)
|
||||
|
||||
if run_index < total_test_count {
|
||||
#no_bounds_check it := internal_tests[run_index]
|
||||
defer run_index += 1
|
||||
|
||||
data.it = it
|
||||
data.t.error_count = 0
|
||||
|
||||
thread.pool_add_task(&pool, task.allocator, run_test_task, data, run_index)
|
||||
}
|
||||
}
|
||||
|
||||
handle_events: for &task_channel in task_channels {
|
||||
for ev in chan.try_recv(task_channel.channel) {
|
||||
switch event in ev {
|
||||
case Event_New_Test:
|
||||
task_channel.test_index = event.test_index
|
||||
|
||||
case Event_State_Change:
|
||||
#no_bounds_check report.all_test_states[task_channel.test_index] = event.new_state
|
||||
|
||||
#no_bounds_check it := internal_tests[task_channel.test_index]
|
||||
#no_bounds_check pkg := report.packages_by_name[it.pkg]
|
||||
|
||||
#partial switch event.new_state {
|
||||
case .Failed:
|
||||
if task_channel.test_index not_in failed_test_reason_map {
|
||||
failed_test_reason_map[task_channel.test_index] = ERROR_STRING_UNKNOWN
|
||||
}
|
||||
total_failure_count += 1
|
||||
total_done_count += 1
|
||||
case .Successful:
|
||||
total_success_count += 1
|
||||
total_done_count += 1
|
||||
}
|
||||
|
||||
when ODIN_DEBUG {
|
||||
pkg_log.debugf("Test #%i %s.%s changed state to %v.", task_channel.test_index, it.pkg, it.name, event.new_state)
|
||||
}
|
||||
|
||||
pkg.last_change_state = event.new_state
|
||||
pkg.last_change_name = it.name
|
||||
pkg.frame_ready = false
|
||||
|
||||
case Event_Set_Fail_Timeout:
|
||||
_, alloc_error = append(&task_timeouts, Task_Timeout {
|
||||
test_index = task_channel.test_index,
|
||||
at_time = event.at_time,
|
||||
location = event.location,
|
||||
})
|
||||
fmt.assertf(alloc_error == nil, "Error appending to task timeouts: %v", alloc_error)
|
||||
|
||||
case Event_Log_Message:
|
||||
_, alloc_error = append(&log_messages, Log_Message {
|
||||
level = event.level,
|
||||
text = event.formatted_text,
|
||||
time = event.time,
|
||||
allocator = shared_log_allocator,
|
||||
})
|
||||
fmt.assertf(alloc_error == nil, "Error appending to log messages: %v", alloc_error)
|
||||
|
||||
if event.level >= .Error {
|
||||
// Save the message for the final summary.
|
||||
if old_error, ok := failed_test_reason_map[task_channel.test_index]; ok {
|
||||
safe_delete_string(old_error, shared_log_allocator)
|
||||
}
|
||||
failed_test_reason_map[task_channel.test_index] = event.text
|
||||
} else {
|
||||
delete(event.text, shared_log_allocator)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
check_timeouts: for i := len(task_timeouts) - 1; i >= 0; i -= 1 {
|
||||
#no_bounds_check timeout := &task_timeouts[i]
|
||||
|
||||
if time.since(timeout.at_time) < 0 {
|
||||
continue check_timeouts
|
||||
}
|
||||
|
||||
defer unordered_remove(&task_timeouts, i)
|
||||
|
||||
#no_bounds_check if report.all_test_states[timeout.test_index] > .Running {
|
||||
continue check_timeouts
|
||||
}
|
||||
|
||||
if !thread.pool_stop_task(&pool, timeout.test_index) {
|
||||
// The task may have stopped a split second after we started
|
||||
// checking, but we haven't handled the new state yet.
|
||||
continue check_timeouts
|
||||
}
|
||||
|
||||
#no_bounds_check report.all_test_states[timeout.test_index] = .Failed
|
||||
#no_bounds_check it := internal_tests[timeout.test_index]
|
||||
#no_bounds_check pkg := report.packages_by_name[it.pkg]
|
||||
pkg.frame_ready = false
|
||||
|
||||
if old_error, ok := failed_test_reason_map[timeout.test_index]; ok {
|
||||
safe_delete_string(old_error, shared_log_allocator)
|
||||
}
|
||||
failed_test_reason_map[timeout.test_index] = ERROR_STRING_TIMEOUT
|
||||
total_failure_count += 1
|
||||
total_done_count += 1
|
||||
|
||||
now := time.now()
|
||||
_, alloc_error = append(&log_messages, Log_Message {
|
||||
level = .Error,
|
||||
text = format_log_text(.Error, ERROR_STRING_TIMEOUT, Default_Test_Logger_Opts, timeout.location, now),
|
||||
time = now,
|
||||
allocator = context.allocator,
|
||||
})
|
||||
fmt.assertf(alloc_error == nil, "Error appending to log messages: %v", alloc_error)
|
||||
|
||||
find_task_data: for &data in task_data_slots {
|
||||
if data.it.pkg == it.pkg && data.it.name == it.name {
|
||||
end_t(&data.t)
|
||||
break find_task_data
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if should_abort() {
|
||||
fmt.wprintln(stderr, "\nCaught interrupt signal. Stopping all tests.")
|
||||
thread.pool_shutdown(&pool)
|
||||
break main_loop
|
||||
}
|
||||
|
||||
// -- Redraw.
|
||||
|
||||
when FANCY_OUTPUT {
|
||||
if len(log_messages) == 0 && !needs_to_redraw(report) {
|
||||
continue main_loop
|
||||
}
|
||||
|
||||
fmt.wprintf(stdout, ansi_redraw_string, total_done_count, total_test_count)
|
||||
} else {
|
||||
if total_done_count != last_done_count {
|
||||
fmt.wprintf(stdout, OSC_WINDOW_TITLE, total_done_count, total_test_count)
|
||||
last_done_count = total_done_count
|
||||
}
|
||||
|
||||
if len(log_messages) == 0 {
|
||||
continue main_loop
|
||||
}
|
||||
}
|
||||
|
||||
// Because each thread has its own messenger channel, log messages
|
||||
// arrive in chunks that are in-order, but when they're merged with the
|
||||
// logs from other threads, they become out-of-order.
|
||||
slice.stable_sort_by(log_messages[:], proc(a, b: Log_Message) -> bool {
|
||||
return time.diff(a.time, b.time) > 0
|
||||
})
|
||||
|
||||
for message in log_messages {
|
||||
fmt.wprintln(batch_writer, message.text)
|
||||
delete(message.text, message.allocator)
|
||||
}
|
||||
|
||||
fmt.wprint(stderr, bytes.buffer_to_string(&batch_buffer))
|
||||
clear(&log_messages)
|
||||
bytes.buffer_reset(&batch_buffer)
|
||||
|
||||
when FANCY_OUTPUT {
|
||||
redraw_report(batch_writer, report)
|
||||
draw_status_bar(batch_writer, thread_count_status_string, total_done_count, total_test_count)
|
||||
fmt.wprint(stdout, bytes.buffer_to_string(&batch_buffer))
|
||||
bytes.buffer_reset(&batch_buffer)
|
||||
}
|
||||
}
|
||||
|
||||
// -- All tests are complete, or the runner has been interrupted.
|
||||
|
||||
thread.pool_join(&pool)
|
||||
|
||||
finished_in := time.since(start_time)
|
||||
|
||||
fmt.wprintf(batch_writer,
|
||||
"\nFinished %i test%s in %v.",
|
||||
total_done_count,
|
||||
"" if total_done_count == 1 else "s",
|
||||
finished_in)
|
||||
|
||||
if total_done_count != total_test_count {
|
||||
not_run_count := total_test_count - total_done_count
|
||||
fmt.wprintf(batch_writer,
|
||||
" " + SGR_READY + "%i" + SGR_RESET + " %s left undone.",
|
||||
not_run_count,
|
||||
"test was" if not_run_count == 1 else "tests were")
|
||||
}
|
||||
|
||||
if total_success_count == total_test_count {
|
||||
fmt.wprintfln(batch_writer,
|
||||
" %s " + SGR_SUCCESS + "successful." + SGR_RESET,
|
||||
"The test was" if total_test_count == 1 else "All tests were")
|
||||
} else if total_failure_count > 0 {
|
||||
if total_failure_count == total_test_count {
|
||||
fmt.wprintfln(batch_writer,
|
||||
" %s " + SGR_FAILED + "failed." + SGR_RESET,
|
||||
"The test" if total_test_count == 1 else "All tests")
|
||||
} else {
|
||||
fmt.wprintfln(batch_writer,
|
||||
" " + SGR_FAILED + "%i" + SGR_RESET + " test%s failed.",
|
||||
total_failure_count,
|
||||
"" if total_failure_count == 1 else "s")
|
||||
}
|
||||
|
||||
for test_index in failed_test_reason_map {
|
||||
_, alloc_error = append(&sorted_failed_test_reasons, test_index)
|
||||
fmt.assertf(alloc_error == nil, "Error appending to sorted failed test reasons: %v", alloc_error)
|
||||
}
|
||||
|
||||
slice.sort(sorted_failed_test_reasons[:])
|
||||
|
||||
for test_index in sorted_failed_test_reasons {
|
||||
#no_bounds_check last_error := failed_test_reason_map[test_index]
|
||||
#no_bounds_check it := internal_tests[test_index]
|
||||
pkg_and_name := fmt.tprintf("%s.%s", it.pkg, it.name)
|
||||
fmt.wprintfln(batch_writer, " - %- *[1]s\t%s",
|
||||
pkg_and_name,
|
||||
report.pkg_column_len + report.test_column_len,
|
||||
last_error)
|
||||
safe_delete_string(last_error, shared_log_allocator)
|
||||
}
|
||||
|
||||
if total_success_count > 0 {
|
||||
when USE_CLIPBOARD {
|
||||
clipboard_writer := io.to_writer(bytes.buffer_to_stream(&clipboard_buffer))
|
||||
fmt.wprint(clipboard_writer, "-define:test_select=")
|
||||
for test_index in sorted_failed_test_reasons {
|
||||
#no_bounds_check it := internal_tests[test_index]
|
||||
fmt.wprintf(clipboard_writer, "%s.%s,", it.pkg, it.name)
|
||||
}
|
||||
|
||||
encoded_names := base64_encode(bytes.buffer_to_bytes(&clipboard_buffer), allocator = context.temp_allocator)
|
||||
|
||||
fmt.wprintf(batch_writer,
|
||||
ansi.OSC + ansi.CLIPBOARD + ";c;%s" + ansi.ST +
|
||||
"\nThe name%s of the failed test%s been copied to your clipboard.",
|
||||
encoded_names,
|
||||
"" if total_failure_count == 1 else "s",
|
||||
" has" if total_failure_count == 1 else "s have")
|
||||
} else {
|
||||
fmt.wprintf(batch_writer, "\nTo run only the failed test%s, use:\n\t-define:test_select=",
|
||||
"" if total_failure_count == 1 else "s")
|
||||
for test_index in sorted_failed_test_reasons {
|
||||
#no_bounds_check it := internal_tests[test_index]
|
||||
fmt.wprintf(batch_writer, "%s.%s,", it.pkg, it.name)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.wprintln(batch_writer)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.wprint(batch_writer, ansi.CSI + ansi.DECTCEM_SHOW)
|
||||
|
||||
fmt.wprintln(stderr, bytes.buffer_to_string(&batch_buffer))
|
||||
|
||||
return total_success_count == total_test_count
|
||||
}
|
||||
|
||||
19
core/testing/signal_handler.odin
Normal file
19
core/testing/signal_handler.odin
Normal file
@@ -0,0 +1,19 @@
|
||||
//+private
|
||||
//+build windows, linux, darwin, freebsd, openbsd, netbsd, haiku
|
||||
package testing
|
||||
|
||||
import "base:intrinsics"
|
||||
import "core:c/libc"
|
||||
|
||||
@(private="file")
|
||||
abort_flag: libc.sig_atomic_t
|
||||
|
||||
setup_signal_handler :: proc() {
|
||||
libc.signal(libc.SIGINT, proc "c" (sig: libc.int) {
|
||||
intrinsics.atomic_add(&abort_flag, 1)
|
||||
})
|
||||
}
|
||||
|
||||
should_abort :: proc() -> bool {
|
||||
return intrinsics.atomic_load(&abort_flag) > 0
|
||||
}
|
||||
11
core/testing/signal_handler_other.odin
Normal file
11
core/testing/signal_handler_other.odin
Normal file
@@ -0,0 +1,11 @@
|
||||
//+private
|
||||
//+build js, wasi, freestanding
|
||||
package testing
|
||||
|
||||
setup_signal_handler :: proc() {
|
||||
// Do nothing.
|
||||
}
|
||||
|
||||
should_abort :: proc() -> bool {
|
||||
return false
|
||||
}
|
||||
@@ -1,10 +1,11 @@
|
||||
package testing
|
||||
|
||||
import "core:fmt"
|
||||
import "core:io"
|
||||
import "core:time"
|
||||
import "base:intrinsics"
|
||||
import "base:runtime"
|
||||
import pkg_log "core:log"
|
||||
import "core:reflect"
|
||||
import "core:sync/chan"
|
||||
import "core:time"
|
||||
|
||||
_ :: reflect // alias reflect to nothing to force visibility for -vet
|
||||
|
||||
@@ -22,44 +23,45 @@ Internal_Test :: struct {
|
||||
Internal_Cleanup :: struct {
|
||||
procedure: proc(rawptr),
|
||||
user_data: rawptr,
|
||||
ctx: runtime.Context,
|
||||
}
|
||||
|
||||
T :: struct {
|
||||
error_count: int,
|
||||
|
||||
w: io.Writer,
|
||||
channel: Update_Channel_Sender,
|
||||
|
||||
cleanups: [dynamic]Internal_Cleanup,
|
||||
|
||||
// This allocator is shared between the test runner and its threads for
|
||||
// cloning log strings, so they can outlive the lifetime of individual
|
||||
// tests during channel transmission.
|
||||
_log_allocator: runtime.Allocator,
|
||||
|
||||
_fail_now: proc() -> !,
|
||||
}
|
||||
|
||||
|
||||
@(deprecated="prefer `log.error`")
|
||||
error :: proc(t: ^T, args: ..any, loc := #caller_location) {
|
||||
fmt.wprintf(t.w, "%v: ", loc)
|
||||
fmt.wprintln(t.w, ..args)
|
||||
t.error_count += 1
|
||||
pkg_log.error(..args, location = loc)
|
||||
}
|
||||
|
||||
@(deprecated="prefer `log.errorf`")
|
||||
errorf :: proc(t: ^T, format: string, args: ..any, loc := #caller_location) {
|
||||
fmt.wprintf(t.w, "%v: ", loc)
|
||||
fmt.wprintf(t.w, format, ..args)
|
||||
fmt.wprintln(t.w)
|
||||
t.error_count += 1
|
||||
pkg_log.errorf(format, ..args, location = loc)
|
||||
}
|
||||
|
||||
fail :: proc(t: ^T, loc := #caller_location) {
|
||||
error(t, "FAIL", loc=loc)
|
||||
t.error_count += 1
|
||||
pkg_log.error("FAIL", location=loc)
|
||||
}
|
||||
|
||||
fail_now :: proc(t: ^T, msg := "", loc := #caller_location) {
|
||||
if msg != "" {
|
||||
error(t, "FAIL:", msg, loc=loc)
|
||||
pkg_log.error("FAIL:", msg, location=loc)
|
||||
} else {
|
||||
error(t, "FAIL", loc=loc)
|
||||
pkg_log.error("FAIL", location=loc)
|
||||
}
|
||||
t.error_count += 1
|
||||
if t._fail_now != nil {
|
||||
t._fail_now()
|
||||
}
|
||||
@@ -69,32 +71,34 @@ failed :: proc(t: ^T) -> bool {
|
||||
return t.error_count != 0
|
||||
}
|
||||
|
||||
@(deprecated="prefer `log.info`")
|
||||
log :: proc(t: ^T, args: ..any, loc := #caller_location) {
|
||||
fmt.wprintln(t.w, ..args)
|
||||
pkg_log.info(..args, location = loc)
|
||||
}
|
||||
|
||||
@(deprecated="prefer `log.infof`")
|
||||
logf :: proc(t: ^T, format: string, args: ..any, loc := #caller_location) {
|
||||
fmt.wprintf(t.w, format, ..args)
|
||||
fmt.wprintln(t.w)
|
||||
pkg_log.infof(format, ..args, location = loc)
|
||||
}
|
||||
|
||||
|
||||
// cleanup registers a procedure and user_data, which will be called when the test, and all its subtests, complete
|
||||
// cleanup procedures will be called in LIFO (last added, first called) order.
|
||||
// cleanup registers a procedure and user_data, which will be called when the test, and all its subtests, complete.
|
||||
// Cleanup procedures will be called in LIFO (last added, first called) order.
|
||||
// Each procedure will use a copy of the context at the time of registering.
|
||||
cleanup :: proc(t: ^T, procedure: proc(rawptr), user_data: rawptr) {
|
||||
append(&t.cleanups, Internal_Cleanup{procedure, user_data})
|
||||
append(&t.cleanups, Internal_Cleanup{procedure, user_data, context})
|
||||
}
|
||||
|
||||
expect :: proc(t: ^T, ok: bool, msg: string = "", loc := #caller_location) -> bool {
|
||||
if !ok {
|
||||
error(t, msg, loc=loc)
|
||||
pkg_log.error(msg, location=loc)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
expectf :: proc(t: ^T, ok: bool, format: string, args: ..any, loc := #caller_location) -> bool {
|
||||
if !ok {
|
||||
errorf(t, format, ..args, loc=loc)
|
||||
pkg_log.errorf(format, ..args, location=loc)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
@@ -102,12 +106,15 @@ expectf :: proc(t: ^T, ok: bool, format: string, args: ..any, loc := #caller_loc
|
||||
expect_value :: proc(t: ^T, value, expected: $T, loc := #caller_location) -> bool where intrinsics.type_is_comparable(T) {
|
||||
ok := value == expected || reflect.is_nil(value) && reflect.is_nil(expected)
|
||||
if !ok {
|
||||
errorf(t, "expected %v, got %v", expected, value, loc=loc)
|
||||
pkg_log.errorf("expected %v, got %v", expected, value, location=loc)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
|
||||
set_fail_timeout :: proc(t: ^T, duration: time.Duration, loc := #caller_location) {
|
||||
_fail_timeout(t, duration, loc)
|
||||
chan.send(t.channel, Event_Set_Fail_Timeout {
|
||||
at_time = time.time_add(time.now(), duration),
|
||||
location = loc,
|
||||
})
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user