Merge pull request #1792 from jasonKercher/os2_linux

Os2 linux
This commit is contained in:
gingerBill
2022-05-22 17:18:28 +01:00
committed by GitHub
10 changed files with 2073 additions and 3 deletions

View File

@@ -0,0 +1,28 @@
//+private
package os2
_get_env :: proc(key: string, allocator := context.allocator) -> (value: string, found: bool) {
//TODO
return
}
_set_env :: proc(key, value: string) -> bool {
//TODO
return false
}
_unset_env :: proc(key: string) -> bool {
//TODO
return false
}
_clear_env :: proc() {
//TODO
}
_environ :: proc(allocator := context.allocator) -> []string {
//TODO
return nil
}

View File

@@ -0,0 +1,145 @@
//+private
package os2
import "core:sys/unix"
EPERM :: 1
ENOENT :: 2
ESRCH :: 3
EINTR :: 4
EIO :: 5
ENXIO :: 6
EBADF :: 9
EAGAIN :: 11
ENOMEM :: 12
EACCES :: 13
EFAULT :: 14
EEXIST :: 17
ENODEV :: 19
ENOTDIR :: 20
EISDIR :: 21
EINVAL :: 22
ENFILE :: 23
EMFILE :: 24
ETXTBSY :: 26
EFBIG :: 27
ENOSPC :: 28
ESPIPE :: 29
EROFS :: 30
EPIPE :: 32
ERANGE :: 34 /* Result too large */
EDEADLK :: 35 /* Resource deadlock would occur */
ENAMETOOLONG :: 36 /* File name too long */
ENOLCK :: 37 /* No record locks available */
ENOSYS :: 38 /* Invalid system call number */
ENOTEMPTY :: 39 /* Directory not empty */
ELOOP :: 40 /* Too many symbolic links encountered */
EWOULDBLOCK :: EAGAIN /* Operation would block */
ENOMSG :: 42 /* No message of desired type */
EIDRM :: 43 /* Identifier removed */
ECHRNG :: 44 /* Channel number out of range */
EL2NSYNC :: 45 /* Level 2 not synchronized */
EL3HLT :: 46 /* Level 3 halted */
EL3RST :: 47 /* Level 3 reset */
ELNRNG :: 48 /* Link number out of range */
EUNATCH :: 49 /* Protocol driver not attached */
ENOCSI :: 50 /* No CSI structure available */
EL2HLT :: 51 /* Level 2 halted */
EBADE :: 52 /* Invalid exchange */
EBADR :: 53 /* Invalid request descriptor */
EXFULL :: 54 /* Exchange full */
ENOANO :: 55 /* No anode */
EBADRQC :: 56 /* Invalid request code */
EBADSLT :: 57 /* Invalid slot */
EDEADLOCK :: EDEADLK
EBFONT :: 59 /* Bad font file format */
ENOSTR :: 60 /* Device not a stream */
ENODATA :: 61 /* No data available */
ETIME :: 62 /* Timer expired */
ENOSR :: 63 /* Out of streams resources */
ENONET :: 64 /* Machine is not on the network */
ENOPKG :: 65 /* Package not installed */
EREMOTE :: 66 /* Object is remote */
ENOLINK :: 67 /* Link has been severed */
EADV :: 68 /* Advertise error */
ESRMNT :: 69 /* Srmount error */
ECOMM :: 70 /* Communication error on send */
EPROTO :: 71 /* Protocol error */
EMULTIHOP :: 72 /* Multihop attempted */
EDOTDOT :: 73 /* RFS specific error */
EBADMSG :: 74 /* Not a data message */
EOVERFLOW :: 75 /* Value too large for defined data type */
ENOTUNIQ :: 76 /* Name not unique on network */
EBADFD :: 77 /* File descriptor in bad state */
EREMCHG :: 78 /* Remote address changed */
ELIBACC :: 79 /* Can not access a needed shared library */
ELIBBAD :: 80 /* Accessing a corrupted shared library */
ELIBSCN :: 81 /* .lib section in a.out corrupted */
ELIBMAX :: 82 /* Attempting to link in too many shared libraries */
ELIBEXEC :: 83 /* Cannot exec a shared library directly */
EILSEQ :: 84 /* Illegal byte sequence */
ERESTART :: 85 /* Interrupted system call should be restarted */
ESTRPIPE :: 86 /* Streams pipe error */
EUSERS :: 87 /* Too many users */
ENOTSOCK :: 88 /* Socket operation on non-socket */
EDESTADDRREQ :: 89 /* Destination address required */
EMSGSIZE :: 90 /* Message too long */
EPROTOTYPE :: 91 /* Protocol wrong type for socket */
ENOPROTOOPT :: 92 /* Protocol not available */
EPROTONOSUPPORT:: 93 /* Protocol not supported */
ESOCKTNOSUPPORT:: 94 /* Socket type not supported */
EOPNOTSUPP :: 95 /* Operation not supported on transport endpoint */
EPFNOSUPPORT :: 96 /* Protocol family not supported */
EAFNOSUPPORT :: 97 /* Address family not supported by protocol */
EADDRINUSE :: 98 /* Address already in use */
EADDRNOTAVAIL :: 99 /* Cannot assign requested address */
ENETDOWN :: 100 /* Network is down */
ENETUNREACH :: 101 /* Network is unreachable */
ENETRESET :: 102 /* Network dropped connection because of reset */
ECONNABORTED :: 103 /* Software caused connection abort */
ECONNRESET :: 104 /* Connection reset by peer */
ENOBUFS :: 105 /* No buffer space available */
EISCONN :: 106 /* Transport endpoint is already connected */
ENOTCONN :: 107 /* Transport endpoint is not connected */
ESHUTDOWN :: 108 /* Cannot send after transport endpoint shutdown */
ETOOMANYREFS :: 109 /* Too many references: cannot splice */
ETIMEDOUT :: 110 /* Connection timed out */
ECONNREFUSED :: 111 /* Connection refused */
EHOSTDOWN :: 112 /* Host is down */
EHOSTUNREACH :: 113 /* No route to host */
EALREADY :: 114 /* Operation already in progress */
EINPROGRESS :: 115 /* Operation now in progress */
ESTALE :: 116 /* Stale file handle */
EUCLEAN :: 117 /* Structure needs cleaning */
ENOTNAM :: 118 /* Not a XENIX named type file */
ENAVAIL :: 119 /* No XENIX semaphores available */
EISNAM :: 120 /* Is a named type file */
EREMOTEIO :: 121 /* Remote I/O error */
EDQUOT :: 122 /* Quota exceeded */
ENOMEDIUM :: 123 /* No medium found */
EMEDIUMTYPE :: 124 /* Wrong medium type */
ECANCELED :: 125 /* Operation Canceled */
ENOKEY :: 126 /* Required key not available */
EKEYEXPIRED :: 127 /* Key has expired */
EKEYREVOKED :: 128 /* Key has been revoked */
EKEYREJECTED :: 129 /* Key was rejected by service */
EOWNERDEAD :: 130 /* Owner died */
ENOTRECOVERABLE:: 131 /* State not recoverable */
ERFKILL :: 132 /* Operation not possible due to RF-kill */
EHWPOISON :: 133 /* Memory page has hardware error */
_get_platform_error :: proc(res: int) -> Error {
errno := unix.get_errno(res)
return Platform_Error(i32(errno))
}
_ok_or_error :: proc(res: int) -> Error {
return res >= 0 ? nil : _get_platform_error(res)
}
_error_string :: proc(errno: i32) -> string {
if errno == 0 {
return ""
}
return "Error"
}

422
core/os/os2/file_linux.odin Normal file
View File

@@ -0,0 +1,422 @@
//+private
package os2
import "core:io"
import "core:time"
import "core:strings"
import "core:runtime"
import "core:sys/unix"
INVALID_HANDLE :: -1
_O_RDONLY :: 0o0
_O_WRONLY :: 0o1
_O_RDWR :: 0o2
_O_CREAT :: 0o100
_O_EXCL :: 0o200
_O_TRUNC :: 0o1000
_O_APPEND :: 0o2000
_O_NONBLOCK :: 0o4000
_O_LARGEFILE :: 0o100000
_O_DIRECTORY :: 0o200000
_O_NOFOLLOW :: 0o400000
_O_SYNC :: 0o4010000
_O_CLOEXEC :: 0o2000000
_O_PATH :: 0o10000000
_AT_FDCWD :: -100
_CSTRING_NAME_HEAP_THRESHOLD :: 512
_File :: struct {
name: string,
fd: int,
allocator: runtime.Allocator,
}
_file_allocator :: proc() -> runtime.Allocator {
return heap_allocator()
}
_open :: proc(name: string, flags: File_Flags, perm: File_Mode) -> (^File, Error) {
name_cstr, allocated := _name_to_cstring(name)
defer if allocated {
delete(name_cstr)
}
flags_i: int
switch flags & O_RDONLY|O_WRONLY|O_RDWR {
case O_RDONLY: flags_i = _O_RDONLY
case O_WRONLY: flags_i = _O_WRONLY
case O_RDWR: flags_i = _O_RDWR
}
flags_i |= (_O_APPEND * int(.Append in flags))
flags_i |= (_O_CREAT * int(.Create in flags))
flags_i |= (_O_EXCL * int(.Excl in flags))
flags_i |= (_O_SYNC * int(.Sync in flags))
flags_i |= (_O_TRUNC * int(.Trunc in flags))
flags_i |= (_O_CLOEXEC * int(.Close_On_Exec in flags))
fd := unix.sys_open(name_cstr, flags_i, int(perm))
if fd < 0 {
return nil, _get_platform_error(fd)
}
return _new_file(uintptr(fd), name), nil
}
_new_file :: proc(fd: uintptr, _: string) -> ^File {
file := new(File, _file_allocator())
file.impl.fd = int(fd)
file.impl.allocator = _file_allocator()
file.impl.name = _get_full_path(file.impl.fd, file.impl.allocator)
return file
}
_destroy :: proc(f: ^File) -> Error {
if f == nil {
return nil
}
delete(f.impl.name, f.impl.allocator)
free(f, f.impl.allocator)
return nil
}
_close :: proc(f: ^File) -> Error {
res := unix.sys_close(f.impl.fd)
return _ok_or_error(res)
}
_fd :: proc(f: ^File) -> uintptr {
if f == nil {
return ~uintptr(0)
}
return uintptr(f.impl.fd)
}
_name :: proc(f: ^File) -> string {
return f.impl.name if f != nil else ""
}
_seek :: proc(f: ^File, offset: i64, whence: Seek_From) -> (ret: i64, err: Error) {
res := unix.sys_lseek(f.impl.fd, offset, int(whence))
if res < 0 {
return -1, _get_platform_error(int(res))
}
return res, nil
}
_read :: proc(f: ^File, p: []byte) -> (n: int, err: Error) {
if len(p) == 0 {
return 0, nil
}
n = unix.sys_read(f.impl.fd, &p[0], len(p))
if n < 0 {
return -1, _get_platform_error(n)
}
return n, nil
}
_read_at :: proc(f: ^File, p: []byte, offset: i64) -> (n: int, err: Error) {
if offset < 0 {
return 0, .Invalid_Offset
}
b, offset := p, offset
for len(b) > 0 {
m := unix.sys_pread(f.impl.fd, &b[0], len(b), offset)
if m < 0 {
return -1, _get_platform_error(m)
}
n += m
b = b[m:]
offset += i64(m)
}
return
}
_read_from :: proc(f: ^File, r: io.Reader) -> (n: i64, err: Error) {
//TODO
return
}
_write :: proc(f: ^File, p: []byte) -> (n: int, err: Error) {
if len(p) == 0 {
return 0, nil
}
n = unix.sys_write(f.impl.fd, &p[0], uint(len(p)))
if n < 0 {
return -1, _get_platform_error(n)
}
return int(n), nil
}
_write_at :: proc(f: ^File, p: []byte, offset: i64) -> (n: int, err: Error) {
if offset < 0 {
return 0, .Invalid_Offset
}
b, offset := p, offset
for len(b) > 0 {
m := unix.sys_pwrite(f.impl.fd, &b[0], len(b), offset)
if m < 0 {
return -1, _get_platform_error(m)
}
n += m
b = b[m:]
offset += i64(m)
}
return
}
_write_to :: proc(f: ^File, w: io.Writer) -> (n: i64, err: Error) {
//TODO
return
}
_file_size :: proc(f: ^File) -> (n: i64, err: Error) {
s: _Stat = ---
res := unix.sys_fstat(f.impl.fd, &s)
if res < 0 {
return -1, _get_platform_error(res)
}
return s.size, nil
}
_sync :: proc(f: ^File) -> Error {
return _ok_or_error(unix.sys_fsync(f.impl.fd))
}
_flush :: proc(f: ^File) -> Error {
return _ok_or_error(unix.sys_fsync(f.impl.fd))
}
_truncate :: proc(f: ^File, size: i64) -> Error {
return _ok_or_error(unix.sys_ftruncate(f.impl.fd, size))
}
_remove :: proc(name: string) -> Error {
name_cstr, allocated := _name_to_cstring(name)
defer if allocated {
delete(name_cstr)
}
fd := unix.sys_open(name_cstr, int(File_Flags.Read))
if fd < 0 {
return _get_platform_error(fd)
}
defer unix.sys_close(fd)
if _is_dir_fd(fd) {
return _ok_or_error(unix.sys_rmdir(name_cstr))
}
return _ok_or_error(unix.sys_unlink(name_cstr))
}
_rename :: proc(old_name, new_name: string) -> Error {
old_name_cstr, old_allocated := _name_to_cstring(old_name)
new_name_cstr, new_allocated := _name_to_cstring(new_name)
defer if old_allocated {
delete(old_name_cstr)
}
defer if new_allocated {
delete(new_name_cstr)
}
return _ok_or_error(unix.sys_rename(old_name_cstr, new_name_cstr))
}
_link :: proc(old_name, new_name: string) -> Error {
old_name_cstr, old_allocated := _name_to_cstring(old_name)
new_name_cstr, new_allocated := _name_to_cstring(new_name)
defer if old_allocated {
delete(old_name_cstr)
}
defer if new_allocated {
delete(new_name_cstr)
}
return _ok_or_error(unix.sys_link(old_name_cstr, new_name_cstr))
}
_symlink :: proc(old_name, new_name: string) -> Error {
old_name_cstr, old_allocated := _name_to_cstring(old_name)
new_name_cstr, new_allocated := _name_to_cstring(new_name)
defer if old_allocated {
delete(old_name_cstr)
}
defer if new_allocated {
delete(new_name_cstr)
}
return _ok_or_error(unix.sys_symlink(old_name_cstr, new_name_cstr))
}
_read_link_cstr :: proc(name_cstr: cstring, allocator := context.allocator) -> (string, Error) {
bufsz : uint = 256
buf := make([]byte, bufsz, allocator)
for {
rc := unix.sys_readlink(name_cstr, &(buf[0]), bufsz)
if rc < 0 {
delete(buf)
return "", _get_platform_error(rc)
} else if rc == int(bufsz) {
bufsz *= 2
delete(buf)
buf = make([]byte, bufsz, allocator)
} else {
return strings.string_from_ptr(&buf[0], rc), nil
}
}
}
_read_link :: proc(name: string, allocator := context.allocator) -> (string, Error) {
name_cstr, allocated := _name_to_cstring(name)
defer if allocated {
delete(name_cstr)
}
return _read_link_cstr(name_cstr, allocator)
}
_unlink :: proc(name: string) -> Error {
name_cstr, allocated := _name_to_cstring(name)
defer if allocated {
delete(name_cstr)
}
return _ok_or_error(unix.sys_unlink(name_cstr))
}
_chdir :: proc(name: string) -> Error {
name_cstr, allocated := _name_to_cstring(name)
defer if allocated {
delete(name_cstr)
}
return _ok_or_error(unix.sys_chdir(name_cstr))
}
_fchdir :: proc(f: ^File) -> Error {
return _ok_or_error(unix.sys_fchdir(f.impl.fd))
}
_chmod :: proc(name: string, mode: File_Mode) -> Error {
name_cstr, allocated := _name_to_cstring(name)
defer if allocated {
delete(name_cstr)
}
return _ok_or_error(unix.sys_chmod(name_cstr, int(mode)))
}
_fchmod :: proc(f: ^File, mode: File_Mode) -> Error {
return _ok_or_error(unix.sys_fchmod(f.impl.fd, int(mode)))
}
// NOTE: will throw error without super user priviledges
_chown :: proc(name: string, uid, gid: int) -> Error {
name_cstr, allocated := _name_to_cstring(name)
defer if allocated {
delete(name_cstr)
}
return _ok_or_error(unix.sys_chown(name_cstr, uid, gid))
}
// NOTE: will throw error without super user priviledges
_lchown :: proc(name: string, uid, gid: int) -> Error {
name_cstr, allocated := _name_to_cstring(name)
defer if allocated {
delete(name_cstr)
}
return _ok_or_error(unix.sys_lchown(name_cstr, uid, gid))
}
// NOTE: will throw error without super user priviledges
_fchown :: proc(f: ^File, uid, gid: int) -> Error {
return _ok_or_error(unix.sys_fchown(f.impl.fd, uid, gid))
}
_chtimes :: proc(name: string, atime, mtime: time.Time) -> Error {
name_cstr, allocated := _name_to_cstring(name)
defer if allocated {
delete(name_cstr)
}
times := [2]Unix_File_Time {
{ atime._nsec, 0 },
{ mtime._nsec, 0 },
}
return _ok_or_error(unix.sys_utimensat(_AT_FDCWD, name_cstr, &times, 0))
}
_fchtimes :: proc(f: ^File, atime, mtime: time.Time) -> Error {
times := [2]Unix_File_Time {
{ atime._nsec, 0 },
{ mtime._nsec, 0 },
}
return _ok_or_error(unix.sys_utimensat(f.impl.fd, nil, &times, 0))
}
_exists :: proc(name: string) -> bool {
name_cstr, allocated := _name_to_cstring(name)
defer if allocated {
delete(name_cstr)
}
return unix.sys_access(name_cstr, F_OK) == 0
}
_is_file :: proc(name: string) -> bool {
name_cstr, allocated := _name_to_cstring(name)
defer if allocated {
delete(name_cstr)
}
s: _Stat
res := unix.sys_stat(name_cstr, &s)
if res < 0 {
return false
}
return S_ISREG(s.mode)
}
_is_file_fd :: proc(fd: int) -> bool {
s: _Stat
res := unix.sys_fstat(fd, &s)
if res < 0 { // error
return false
}
return S_ISREG(s.mode)
}
_is_dir :: proc(name: string) -> bool {
name_cstr, allocated := _name_to_cstring(name)
defer if allocated {
delete(name_cstr)
}
s: _Stat
res := unix.sys_stat(name_cstr, &s)
if res < 0 {
return false
}
return S_ISDIR(s.mode)
}
_is_dir_fd :: proc(fd: int) -> bool {
s: _Stat
res := unix.sys_fstat(fd, &s)
if res < 0 { // error
return false
}
return S_ISDIR(s.mode)
}
// Ideally we want to use the temp_allocator. PATH_MAX on Linux is commonly
// defined as 512, however, it is well known that paths can exceed that limit.
// So, in theory you could have a path larger than the entire temp_allocator's
// buffer. Therefor, any large paths will use context.allocator.
_name_to_cstring :: proc(name: string) -> (cname: cstring, allocated: bool) {
if len(name) > _CSTRING_NAME_HEAP_THRESHOLD {
cname = strings.clone_to_cstring(name)
allocated = true
return
}
cname = strings.clone_to_cstring(name, context.temp_allocator)
return
}

722
core/os/os2/heap_linux.odin Normal file
View File

@@ -0,0 +1,722 @@
//+private
package os2
import "core:sys/unix"
import "core:sync"
import "core:mem"
// NOTEs
//
// All allocations below DIRECT_MMAP_THRESHOLD exist inside of memory "Regions." A region
// consists of a Region_Header and the memory that will be divided into allocations to
// send to the user. The memory is an array of "Allocation_Headers" which are 8 bytes.
// Allocation_Headers are used to navigate the memory in the region. The "next" member of
// the Allocation_Header points to the next header, and the space between the headers
// can be used to send to the user. This space between is referred to as "blocks" in the
// code. The indexes in the header refer to these blocks instead of bytes. This allows us
// to index all the memory in the region with a u16.
//
// When an allocation request is made, it will use the first free block that can contain
// the entire block. If there is an excess number of blocks (as specified by the constant
// BLOCK_SEGMENT_THRESHOLD), this extra space will be segmented and left in the free_list.
//
// To keep the implementation simple, there can never exist 2 free blocks adjacent to each
// other. Any freeing will result in attempting to merge the blocks before and after the
// newly free'd blocks.
//
// Any request for size above the DIRECT_MMAP_THRESHOLD will result in the allocation
// getting its own individual mmap. Individual mmaps will still get an Allocation_Header
// that contains the size with the last bit set to 1 to indicate it is indeed a direct
// mmap allocation.
// Why not brk?
// glibc's malloc utilizes a mix of the brk and mmap system calls. This implementation
// does *not* utilize the brk system call to avoid possible conflicts with foreign C
// code. Just because we aren't directly using libc, there is nothing stopping the user
// from doing it.
// What's with all the #no_bounds_check?
// When memory is returned from mmap, it technically doesn't get written ... well ... anywhere
// until that region is written to by *you*. So, when a new region is created, we call mmap
// to get a pointer to some memory, and we claim that memory is a ^Region. Therefor, the
// region itself is never formally initialized by the compiler as this would result in writing
// zeros to memory that we can already assume are 0. This would also have the effect of
// actually commiting this data to memory whether it gets used or not.
//
// Some variables to play with
//
// Minimum blocks used for any one allocation
MINIMUM_BLOCK_COUNT :: 2
// Number of extra blocks beyond the requested amount where we would segment.
// E.g. (blocks) |H0123456| 7 available
// |H01H0123| Ask for 2, now 4 available
BLOCK_SEGMENT_THRESHOLD :: 4
// Anything above this threshold will get its own memory map. Since regions
// are indexed by 16 bit integers, this value should not surpass max(u16) * 6
DIRECT_MMAP_THRESHOLD_USER :: int(max(u16))
// The point at which we convert direct mmap to region. This should be a decent
// amount less than DIRECT_MMAP_THRESHOLD to avoid jumping in and out of regions.
MMAP_TO_REGION_SHRINK_THRESHOLD :: DIRECT_MMAP_THRESHOLD - PAGE_SIZE * 4
// free_list is dynamic and is initialized in the begining of the region memory
// when the region is initialized. Once resized, it can be moved anywhere.
FREE_LIST_DEFAULT_CAP :: 32
//
// Other constants that should not be touched
//
// This universally seems to be 4096 outside of uncommon archs.
PAGE_SIZE :: 4096
// just rounding up to nearest PAGE_SIZE
DIRECT_MMAP_THRESHOLD :: (DIRECT_MMAP_THRESHOLD_USER-1) + PAGE_SIZE - (DIRECT_MMAP_THRESHOLD_USER-1) % PAGE_SIZE
// Regions must be big enough to hold DIRECT_MMAP_THRESHOLD - 1 as well
// as end right on a page boundary as to not waste space.
SIZE_OF_REGION :: DIRECT_MMAP_THRESHOLD + 4 * int(PAGE_SIZE)
// size of user memory blocks
BLOCK_SIZE :: size_of(Allocation_Header)
// number of allocation sections (call them blocks) of the region used for allocations
BLOCKS_PER_REGION :: u16((SIZE_OF_REGION - size_of(Region_Header)) / BLOCK_SIZE)
// minimum amount of space that can used by any individual allocation (includes header)
MINIMUM_ALLOCATION :: (MINIMUM_BLOCK_COUNT * BLOCK_SIZE) + BLOCK_SIZE
// This is used as a boolean value for Region_Header.local_addr.
CURRENTLY_ACTIVE :: (^^Region)(~uintptr(0))
FREE_LIST_ENTRIES_PER_BLOCK :: BLOCK_SIZE / size_of(u16)
MMAP_FLAGS :: unix.MAP_ANONYMOUS | unix.MAP_PRIVATE
MMAP_PROT :: unix.PROT_READ | unix.PROT_WRITE
@thread_local _local_region: ^Region
global_regions: ^Region
// There is no way of correctly setting the last bit of free_idx or
// the last bit of requested, so we can safely use it as a flag to
// determine if we are interacting with a direct mmap.
REQUESTED_MASK :: 0x7FFFFFFFFFFFFFFF
IS_DIRECT_MMAP :: 0x8000000000000000
// Special free_idx value that does not index the free_list.
NOT_FREE :: 0x7FFF
Allocation_Header :: struct #raw_union {
using _: struct {
// Block indicies
idx: u16,
prev: u16,
next: u16,
free_idx: u16,
},
requested: u64,
}
Region_Header :: struct #align 16 {
next_region: ^Region, // points to next region in global_heap (linked list)
local_addr: ^^Region, // tracks region ownership via address of _local_region
reset_addr: ^^Region, // tracks old local addr for reset
free_list: []u16,
free_list_len: u16,
free_blocks: u16, // number of free blocks in region (includes headers)
last_used: u16, // farthest back block that has been used (need zeroing?)
_reserved: u16,
}
Region :: struct {
hdr: Region_Header,
memory: [BLOCKS_PER_REGION]Allocation_Header,
}
_heap_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,
size, alignment: int,
old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, mem.Allocator_Error) {
//
// NOTE(tetra, 2020-01-14): The heap doesn't respect alignment.
// Instead, we overallocate by `alignment + size_of(rawptr) - 1`, and insert
// padding. We also store the original pointer returned by heap_alloc right before
// the pointer we return to the user.
//
aligned_alloc :: proc(size, alignment: int, old_ptr: rawptr = nil) -> ([]byte, mem.Allocator_Error) {
a := max(alignment, align_of(rawptr))
space := size + a - 1
allocated_mem: rawptr
if old_ptr != nil {
original_old_ptr := mem.ptr_offset((^rawptr)(old_ptr), -1)^
allocated_mem = heap_resize(original_old_ptr, space+size_of(rawptr))
} else {
allocated_mem = heap_alloc(space+size_of(rawptr))
}
aligned_mem := rawptr(mem.ptr_offset((^u8)(allocated_mem), size_of(rawptr)))
ptr := uintptr(aligned_mem)
aligned_ptr := (ptr - 1 + uintptr(a)) & -uintptr(a)
diff := int(aligned_ptr - ptr)
if (size + diff) > space {
return nil, .Out_Of_Memory
}
aligned_mem = rawptr(aligned_ptr)
mem.ptr_offset((^rawptr)(aligned_mem), -1)^ = allocated_mem
return mem.byte_slice(aligned_mem, size), nil
}
aligned_free :: proc(p: rawptr) {
if p != nil {
heap_free(mem.ptr_offset((^rawptr)(p), -1)^)
}
}
aligned_resize :: proc(p: rawptr, old_size: int, new_size: int, new_alignment: int) -> (new_memory: []byte, err: mem.Allocator_Error) {
if p == nil {
return nil, nil
}
return aligned_alloc(new_size, new_alignment, p)
}
switch mode {
case .Alloc:
return aligned_alloc(size, alignment)
case .Free:
aligned_free(old_memory)
case .Free_All:
return nil, .Mode_Not_Implemented
case .Resize:
if old_memory == nil {
return aligned_alloc(size, alignment)
}
return aligned_resize(old_memory, old_size, size, alignment)
case .Query_Features:
set := (^mem.Allocator_Mode_Set)(old_memory)
if set != nil {
set^ = {.Alloc, .Free, .Resize, .Query_Features}
}
return nil, nil
case .Query_Info:
return nil, .Mode_Not_Implemented
}
return nil, nil
}
heap_alloc :: proc(size: int) -> rawptr {
if size >= DIRECT_MMAP_THRESHOLD {
return _direct_mmap_alloc(size)
}
// atomically check if the local region has been stolen
if _local_region != nil {
res := sync.atomic_compare_exchange_strong_explicit(
&_local_region.hdr.local_addr,
&_local_region,
CURRENTLY_ACTIVE,
.Acquire,
.Relaxed,
)
if res != &_local_region {
// At this point, the region has been stolen and res contains the unexpected value
expected := res
if res != CURRENTLY_ACTIVE {
expected = res
res = sync.atomic_compare_exchange_strong_explicit(
&_local_region.hdr.local_addr,
expected,
CURRENTLY_ACTIVE,
.Acquire,
.Relaxed,
)
}
if res != expected {
_local_region = nil
}
}
}
size := size
size = _round_up_to_nearest(size, BLOCK_SIZE)
blocks_needed := u16(max(MINIMUM_BLOCK_COUNT, size / BLOCK_SIZE))
// retrieve a region if new thread or stolen
if _local_region == nil {
_local_region, _ = _region_retrieve_with_space(blocks_needed)
if _local_region == nil {
return nil
}
}
defer sync.atomic_store_explicit(&_local_region.hdr.local_addr, &_local_region, .Release)
// At this point we have a usable region. Let's find the user some memory
idx: u16
local_region_idx := _region_get_local_idx()
back_idx := -1
infinite: for {
for i := 0; i < int(_local_region.hdr.free_list_len); i += 1 {
idx = _local_region.hdr.free_list[i]
#no_bounds_check if _get_block_count(_local_region.memory[idx]) >= blocks_needed {
break infinite
}
}
sync.atomic_store_explicit(&_local_region.hdr.local_addr, &_local_region, .Release)
_local_region, back_idx = _region_retrieve_with_space(blocks_needed, local_region_idx, back_idx)
}
user_ptr, used := _region_get_block(_local_region, idx, blocks_needed)
_local_region.hdr.free_blocks -= (used + 1)
// If this memory was ever used before, it now needs to be zero'd.
if idx < _local_region.hdr.last_used {
mem.zero(user_ptr, int(used) * BLOCK_SIZE)
} else {
_local_region.hdr.last_used = idx + used
}
return user_ptr
}
heap_resize :: proc(old_memory: rawptr, new_size: int) -> rawptr #no_bounds_check {
alloc := _get_allocation_header(old_memory)
if alloc.requested & IS_DIRECT_MMAP > 0 {
return _direct_mmap_resize(alloc, new_size)
}
if new_size > DIRECT_MMAP_THRESHOLD {
return _direct_mmap_from_region(alloc, new_size)
}
return _region_resize(alloc, new_size)
}
heap_free :: proc(memory: rawptr) {
alloc := _get_allocation_header(memory)
if alloc.requested & IS_DIRECT_MMAP == IS_DIRECT_MMAP {
_direct_mmap_free(alloc)
return
}
assert(alloc.free_idx == NOT_FREE)
_region_find_and_assign_local(alloc)
_region_local_free(alloc)
sync.atomic_store_explicit(&_local_region.hdr.local_addr, &_local_region, .Release)
}
//
// Regions
//
_new_region :: proc() -> ^Region #no_bounds_check {
res := unix.sys_mmap(nil, uint(SIZE_OF_REGION), MMAP_PROT, MMAP_FLAGS, -1, 0)
if res < 0 {
return nil
}
new_region := (^Region)(uintptr(res))
new_region.hdr.local_addr = CURRENTLY_ACTIVE
new_region.hdr.reset_addr = &_local_region
free_list_blocks := _round_up_to_nearest(FREE_LIST_DEFAULT_CAP, FREE_LIST_ENTRIES_PER_BLOCK)
_region_assign_free_list(new_region, &new_region.memory[1], u16(free_list_blocks) * FREE_LIST_ENTRIES_PER_BLOCK)
// + 2 to account for free_list's allocation header
first_user_block := len(new_region.hdr.free_list) / FREE_LIST_ENTRIES_PER_BLOCK + 2
// first allocation header (this is a free list)
new_region.memory[0].next = u16(first_user_block)
new_region.memory[0].free_idx = NOT_FREE
new_region.memory[first_user_block].idx = u16(first_user_block)
new_region.memory[first_user_block].next = BLOCKS_PER_REGION - 1
// add the first user block to the free list
new_region.hdr.free_list[0] = u16(first_user_block)
new_region.hdr.free_list_len = 1
new_region.hdr.free_blocks = _get_block_count(new_region.memory[first_user_block]) + 1
for r := sync.atomic_compare_exchange_strong(&global_regions, nil, new_region);
r != nil;
r = sync.atomic_compare_exchange_strong(&r.hdr.next_region, nil, new_region) {}
return new_region
}
_region_resize :: proc(alloc: ^Allocation_Header, new_size: int, alloc_is_free_list: bool = false) -> rawptr #no_bounds_check {
assert(alloc.free_idx == NOT_FREE)
old_memory := mem.ptr_offset(alloc, 1)
old_block_count := _get_block_count(alloc^)
new_block_count := u16(
max(MINIMUM_BLOCK_COUNT, _round_up_to_nearest(new_size, BLOCK_SIZE) / BLOCK_SIZE),
)
if new_block_count < old_block_count {
if new_block_count - old_block_count >= MINIMUM_BLOCK_COUNT {
_region_find_and_assign_local(alloc)
_region_segment(_local_region, alloc, new_block_count, alloc.free_idx)
new_block_count = _get_block_count(alloc^)
sync.atomic_store_explicit(&_local_region.hdr.local_addr, &_local_region, .Release)
}
// need to zero anything within the new block that that lies beyond new_size
extra_bytes := int(new_block_count * BLOCK_SIZE) - new_size
extra_bytes_ptr := mem.ptr_offset((^u8)(alloc), new_size + BLOCK_SIZE)
mem.zero(extra_bytes_ptr, extra_bytes)
return old_memory
}
if !alloc_is_free_list {
_region_find_and_assign_local(alloc)
}
defer if !alloc_is_free_list {
sync.atomic_store_explicit(&_local_region.hdr.local_addr, &_local_region, .Release)
}
// First, let's see if we can grow in place.
if alloc.next != BLOCKS_PER_REGION - 1 && _local_region.memory[alloc.next].free_idx != NOT_FREE {
next_alloc := _local_region.memory[alloc.next]
total_available := old_block_count + _get_block_count(next_alloc) + 1
if total_available >= new_block_count {
alloc.next = next_alloc.next
_local_region.memory[alloc.next].prev = alloc.idx
if total_available - new_block_count > BLOCK_SEGMENT_THRESHOLD {
_region_segment(_local_region, alloc, new_block_count, next_alloc.free_idx)
} else {
_region_free_list_remove(_local_region, next_alloc.free_idx)
}
mem.zero(&_local_region.memory[next_alloc.idx], int(alloc.next - next_alloc.idx) * BLOCK_SIZE)
_local_region.hdr.last_used = max(alloc.next, _local_region.hdr.last_used)
_local_region.hdr.free_blocks -= (_get_block_count(alloc^) - old_block_count)
if alloc_is_free_list {
_region_assign_free_list(_local_region, old_memory, _get_block_count(alloc^))
}
return old_memory
}
}
// If we made it this far, we need to resize, copy, zero and free.
region_iter := _local_region
local_region_idx := _region_get_local_idx()
back_idx := -1
idx: u16
infinite: for {
for i := 0; i < len(region_iter.hdr.free_list); i += 1 {
idx = region_iter.hdr.free_list[i]
if _get_block_count(region_iter.memory[idx]) >= new_block_count {
break infinite
}
}
if region_iter != _local_region {
sync.atomic_store_explicit(
&region_iter.hdr.local_addr,
region_iter.hdr.reset_addr,
.Release,
)
}
region_iter, back_idx = _region_retrieve_with_space(new_block_count, local_region_idx, back_idx)
}
if region_iter != _local_region {
sync.atomic_store_explicit(
&region_iter.hdr.local_addr,
region_iter.hdr.reset_addr,
.Release,
)
}
// copy from old memory
new_memory, used_blocks := _region_get_block(region_iter, idx, new_block_count)
mem.copy(new_memory, old_memory, int(old_block_count * BLOCK_SIZE))
// zero any new memory
addon_section := mem.ptr_offset((^Allocation_Header)(new_memory), old_block_count)
new_blocks := used_blocks - old_block_count
mem.zero(addon_section, int(new_blocks) * BLOCK_SIZE)
region_iter.hdr.free_blocks -= (used_blocks + 1)
// Set free_list before freeing.
if alloc_is_free_list {
_region_assign_free_list(_local_region, new_memory, used_blocks)
}
// free old memory
_region_local_free(alloc)
return new_memory
}
_region_local_free :: proc(alloc: ^Allocation_Header) #no_bounds_check {
alloc := alloc
add_to_free_list := true
_local_region.hdr.free_blocks += _get_block_count(alloc^) + 1
// try to merge with prev
if alloc.idx > 0 && _local_region.memory[alloc.prev].free_idx != NOT_FREE {
_local_region.memory[alloc.prev].next = alloc.next
_local_region.memory[alloc.next].prev = alloc.prev
alloc = &_local_region.memory[alloc.prev]
add_to_free_list = false
}
// try to merge with next
if alloc.next < BLOCKS_PER_REGION - 1 && _local_region.memory[alloc.next].free_idx != NOT_FREE {
old_next := alloc.next
alloc.next = _local_region.memory[old_next].next
_local_region.memory[alloc.next].prev = alloc.idx
if add_to_free_list {
_local_region.hdr.free_list[_local_region.memory[old_next].free_idx] = alloc.idx
alloc.free_idx = _local_region.memory[old_next].free_idx
} else {
// NOTE: We have aleady merged with prev, and now merged with next.
// Now, we are actually going to remove from the free_list.
_region_free_list_remove(_local_region, _local_region.memory[old_next].free_idx)
}
add_to_free_list = false
}
// This is the only place where anything is appended to the free list.
if add_to_free_list {
fl := _local_region.hdr.free_list
alloc.free_idx = _local_region.hdr.free_list_len
fl[alloc.free_idx] = alloc.idx
_local_region.hdr.free_list_len += 1
if int(_local_region.hdr.free_list_len) == len(fl) {
free_alloc := _get_allocation_header(mem.raw_data(_local_region.hdr.free_list))
_region_resize(free_alloc, len(fl) * 2 * size_of(fl[0]), true)
}
}
}
_region_assign_free_list :: proc(region: ^Region, memory: rawptr, blocks: u16) {
raw_free_list := transmute(mem.Raw_Slice)region.hdr.free_list
raw_free_list.len = int(blocks) * FREE_LIST_ENTRIES_PER_BLOCK
raw_free_list.data = memory
region.hdr.free_list = transmute([]u16)(raw_free_list)
}
_region_retrieve_with_space :: proc(blocks: u16, local_idx: int = -1, back_idx: int = -1) -> (^Region, int) {
r: ^Region
idx: int
for r = global_regions; r != nil; r = r.hdr.next_region {
if idx == local_idx || idx < back_idx || r.hdr.free_blocks < blocks {
idx += 1
continue
}
idx += 1
local_addr: ^^Region = sync.atomic_load(&r.hdr.local_addr)
if local_addr != CURRENTLY_ACTIVE {
res := sync.atomic_compare_exchange_strong_explicit(
&r.hdr.local_addr,
local_addr,
CURRENTLY_ACTIVE,
.Acquire,
.Relaxed,
)
if res == local_addr {
r.hdr.reset_addr = local_addr
return r, idx
}
}
}
return _new_region(), idx
}
_region_retrieve_from_addr :: proc(addr: rawptr) -> ^Region {
r: ^Region
for r = global_regions; r != nil; r = r.hdr.next_region {
if _region_contains_mem(r, addr) {
return r
}
}
unreachable()
}
_region_get_block :: proc(region: ^Region, idx, blocks_needed: u16) -> (rawptr, u16) #no_bounds_check {
alloc := &region.memory[idx]
assert(alloc.free_idx != NOT_FREE)
assert(alloc.next > 0)
block_count := _get_block_count(alloc^)
if block_count - blocks_needed > BLOCK_SEGMENT_THRESHOLD {
_region_segment(region, alloc, blocks_needed, alloc.free_idx)
} else {
_region_free_list_remove(region, alloc.free_idx)
}
alloc.free_idx = NOT_FREE
return mem.ptr_offset(alloc, 1), _get_block_count(alloc^)
}
_region_segment :: proc(region: ^Region, alloc: ^Allocation_Header, blocks, new_free_idx: u16) #no_bounds_check {
old_next := alloc.next
alloc.next = alloc.idx + blocks + 1
region.memory[old_next].prev = alloc.next
// Initialize alloc.next allocation header here.
region.memory[alloc.next].prev = alloc.idx
region.memory[alloc.next].next = old_next
region.memory[alloc.next].idx = alloc.next
region.memory[alloc.next].free_idx = new_free_idx
// Replace our original spot in the free_list with new segment.
region.hdr.free_list[new_free_idx] = alloc.next
}
_region_get_local_idx :: proc() -> int {
idx: int
for r := global_regions; r != nil; r = r.hdr.next_region {
if r == _local_region {
return idx
}
idx += 1
}
return -1
}
_region_find_and_assign_local :: proc(alloc: ^Allocation_Header) {
// Find the region that contains this memory
if !_region_contains_mem(_local_region, alloc) {
_local_region = _region_retrieve_from_addr(alloc)
}
// At this point, _local_region is set correctly. Spin until acquired
res: ^^Region
for res != &_local_region {
res = sync.atomic_compare_exchange_strong_explicit(
&_local_region.hdr.local_addr,
&_local_region,
CURRENTLY_ACTIVE,
.Acquire,
.Relaxed,
)
}
}
_region_contains_mem :: proc(r: ^Region, memory: rawptr) -> bool #no_bounds_check {
if r == nil {
return false
}
mem_int := uintptr(memory)
return mem_int >= uintptr(&r.memory[0]) && mem_int <= uintptr(&r.memory[BLOCKS_PER_REGION - 1])
}
_region_free_list_remove :: proc(region: ^Region, free_idx: u16) #no_bounds_check {
// pop, swap and update allocation hdr
if n := region.hdr.free_list_len - 1; free_idx != n {
region.hdr.free_list[free_idx] = region.hdr.free_list[n]
alloc_idx := region.hdr.free_list[free_idx]
region.memory[alloc_idx].free_idx = free_idx
}
region.hdr.free_list_len -= 1
}
//
// Direct mmap
//
_direct_mmap_alloc :: proc(size: int) -> rawptr {
mmap_size := _round_up_to_nearest(size + BLOCK_SIZE, PAGE_SIZE)
new_allocation := unix.sys_mmap(nil, uint(mmap_size), MMAP_PROT, MMAP_FLAGS, -1, 0)
if new_allocation < 0 && new_allocation > -4096 {
return nil
}
alloc := (^Allocation_Header)(uintptr(new_allocation))
alloc.requested = u64(size) // NOTE: requested = requested size
alloc.requested += IS_DIRECT_MMAP
return rawptr(mem.ptr_offset(alloc, 1))
}
_direct_mmap_resize :: proc(alloc: ^Allocation_Header, new_size: int) -> rawptr {
old_requested := int(alloc.requested & REQUESTED_MASK)
old_mmap_size := _round_up_to_nearest(old_requested + BLOCK_SIZE, PAGE_SIZE)
new_mmap_size := _round_up_to_nearest(new_size + BLOCK_SIZE, PAGE_SIZE)
if int(new_mmap_size) < MMAP_TO_REGION_SHRINK_THRESHOLD {
return _direct_mmap_to_region(alloc, new_size)
} else if old_requested == new_size {
return mem.ptr_offset(alloc, 1)
}
new_allocation := unix.sys_mremap(
alloc,
uint(old_mmap_size),
uint(new_mmap_size),
unix.MREMAP_MAYMOVE,
)
if new_allocation < 0 && new_allocation > -4096 {
return nil
}
new_header := (^Allocation_Header)(uintptr(new_allocation))
new_header.requested = u64(new_size)
new_header.requested += IS_DIRECT_MMAP
if new_mmap_size > old_mmap_size {
// new section may not be pointer aligned, so cast to ^u8
new_section := mem.ptr_offset((^u8)(new_header), old_requested + BLOCK_SIZE)
mem.zero(new_section, new_mmap_size - old_mmap_size)
}
return mem.ptr_offset(new_header, 1)
}
_direct_mmap_from_region :: proc(alloc: ^Allocation_Header, new_size: int) -> rawptr {
new_memory := _direct_mmap_alloc(new_size)
if new_memory != nil {
old_memory := mem.ptr_offset(alloc, 1)
mem.copy(new_memory, old_memory, int(_get_block_count(alloc^)) * BLOCK_SIZE)
}
_region_find_and_assign_local(alloc)
_region_local_free(alloc)
sync.atomic_store_explicit(&_local_region.hdr.local_addr, &_local_region, .Release)
return new_memory
}
_direct_mmap_to_region :: proc(alloc: ^Allocation_Header, new_size: int) -> rawptr {
new_memory := heap_alloc(new_size)
if new_memory != nil {
mem.copy(new_memory, mem.ptr_offset(alloc, -1), new_size)
_direct_mmap_free(alloc)
}
return new_memory
}
_direct_mmap_free :: proc(alloc: ^Allocation_Header) {
requested := int(alloc.requested & REQUESTED_MASK)
mmap_size := _round_up_to_nearest(requested + BLOCK_SIZE, PAGE_SIZE)
unix.sys_munmap(alloc, uint(mmap_size))
}
//
// Util
//
_get_block_count :: #force_inline proc(alloc: Allocation_Header) -> u16 {
return alloc.next - alloc.idx - 1
}
_get_allocation_header :: #force_inline proc(raw_mem: rawptr) -> ^Allocation_Header {
return mem.ptr_offset((^Allocation_Header)(raw_mem), -1)
}
_round_up_to_nearest :: #force_inline proc(size, round: int) -> int {
return (size-1) + round - (size-1) % round
}

247
core/os/os2/path_linux.odin Normal file
View File

@@ -0,0 +1,247 @@
//+private
package os2
import "core:strings"
import "core:strconv"
import "core:runtime"
import "core:sys/unix"
_Path_Separator :: '/'
_Path_List_Separator :: ':'
_S_IFMT :: 0o170000 // Type of file mask
_S_IFIFO :: 0o010000 // Named pipe (fifo)
_S_IFCHR :: 0o020000 // Character special
_S_IFDIR :: 0o040000 // Directory
_S_IFBLK :: 0o060000 // Block special
_S_IFREG :: 0o100000 // Regular
_S_IFLNK :: 0o120000 // Symbolic link
_S_IFSOCK :: 0o140000 // Socket
_OPENDIR_FLAGS :: _O_RDONLY|_O_NONBLOCK|_O_DIRECTORY|_O_LARGEFILE|_O_CLOEXEC
_is_path_separator :: proc(c: byte) -> bool {
return c == '/'
}
_mkdir :: proc(path: string, perm: File_Mode) -> Error {
// NOTE: These modes would require sys_mknod, however, that would require
// additional arguments to this function.
if perm & (File_Mode_Named_Pipe | File_Mode_Device | File_Mode_Char_Device | File_Mode_Sym_Link) != 0 {
return .Invalid_Argument
}
path_cstr, allocated := _name_to_cstring(path)
defer if allocated {
delete(path_cstr)
}
return _ok_or_error(unix.sys_mkdir(path_cstr, int(perm & 0o777)))
}
_mkdir_all :: proc(path: string, perm: File_Mode) -> Error {
_mkdirat :: proc(dfd: int, path: []u8, perm: int, has_created: ^bool) -> Error {
if len(path) == 0 {
return _ok_or_error(unix.sys_close(dfd))
}
i: int
for /**/; i < len(path) - 1 && path[i] != '/'; i += 1 {}
path[i] = 0
new_dfd := unix.sys_openat(dfd, cstring(&path[0]), _OPENDIR_FLAGS)
switch new_dfd {
case -ENOENT:
if res := unix.sys_mkdirat(dfd, cstring(&path[0]), perm); res < 0 {
return _get_platform_error(res)
}
has_created^ = true
if new_dfd = unix.sys_openat(dfd, cstring(&path[0]), _OPENDIR_FLAGS); new_dfd < 0 {
return _get_platform_error(new_dfd)
}
fallthrough
case 0:
if res := unix.sys_close(dfd); res < 0 {
return _get_platform_error(res)
}
// skip consecutive '/'
for i += 1; i < len(path) && path[i] == '/'; i += 1 {}
return _mkdirat(new_dfd, path[i:], perm, has_created)
case:
return _get_platform_error(new_dfd)
}
unreachable()
}
if perm & (File_Mode_Named_Pipe | File_Mode_Device | File_Mode_Char_Device | File_Mode_Sym_Link) != 0 {
return .Invalid_Argument
}
// need something we can edit, and use to generate cstrings
allocated: bool
path_bytes: []u8
if len(path) > _CSTRING_NAME_HEAP_THRESHOLD {
allocated = true
path_bytes = make([]u8, len(path) + 1)
} else {
path_bytes = make([]u8, len(path) + 1, context.temp_allocator)
}
defer if allocated {
delete(path_bytes)
}
// NULL terminate the byte slice to make it a valid cstring
copy(path_bytes, path)
path_bytes[len(path)] = 0
dfd: int
if path_bytes[0] == '/' {
dfd = unix.sys_open("/", _OPENDIR_FLAGS)
path_bytes = path_bytes[1:]
} else {
dfd = unix.sys_open(".", _OPENDIR_FLAGS)
}
if dfd < 0 {
return _get_platform_error(dfd)
}
has_created: bool
_mkdirat(dfd, path_bytes, int(perm & 0o777), &has_created) or_return
if has_created {
return nil
}
return .Exist
//return has_created ? nil : .Exist
}
dirent64 :: struct {
d_ino: u64,
d_off: u64,
d_reclen: u16,
d_type: u8,
d_name: [1]u8,
}
_remove_all :: proc(path: string) -> Error {
DT_DIR :: 4
_remove_all_dir :: proc(dfd: int) -> Error {
n := 64
buf := make([]u8, n)
defer delete(buf)
loop: for {
getdents_res := unix.sys_getdents64(dfd, &buf[0], n)
switch getdents_res {
case -EINVAL:
delete(buf)
n *= 2
buf = make([]u8, n)
continue loop
case -4096..<0:
return _get_platform_error(getdents_res)
case 0:
break loop
}
d: ^dirent64
for i := 0; i < getdents_res; i += int(d.d_reclen) {
d = (^dirent64)(rawptr(&buf[i]))
d_name_cstr := cstring(&d.d_name[0])
buf_len := uintptr(d.d_reclen) - offset_of(d.d_name)
/* check for current directory (.) */
#no_bounds_check if buf_len > 1 && d.d_name[0] == '.' && d.d_name[1] == 0 {
continue
}
/* check for parent directory (..) */
#no_bounds_check if buf_len > 2 && d.d_name[0] == '.' && d.d_name[1] == '.' && d.d_name[2] == 0 {
continue
}
unlink_res: int
switch d.d_type {
case DT_DIR:
new_dfd := unix.sys_openat(dfd, d_name_cstr, _OPENDIR_FLAGS)
if new_dfd < 0 {
return _get_platform_error(new_dfd)
}
defer unix.sys_close(new_dfd)
_remove_all_dir(new_dfd) or_return
unlink_res = unix.sys_unlinkat(dfd, d_name_cstr, int(unix.AT_REMOVEDIR))
case:
unlink_res = unix.sys_unlinkat(dfd, d_name_cstr)
}
if unlink_res < 0 {
return _get_platform_error(unlink_res)
}
}
}
return nil
}
path_cstr, allocated := _name_to_cstring(path)
defer if allocated {
delete(path_cstr)
}
fd := unix.sys_open(path_cstr, _OPENDIR_FLAGS)
switch fd {
case -ENOTDIR:
return _ok_or_error(unix.sys_unlink(path_cstr))
case -4096..<0:
return _get_platform_error(fd)
}
defer unix.sys_close(fd)
_remove_all_dir(fd) or_return
return _ok_or_error(unix.sys_rmdir(path_cstr))
}
_getwd :: proc(allocator: runtime.Allocator) -> (string, Error) {
// NOTE(tetra): I would use PATH_MAX here, but I was not able to find
// an authoritative value for it across all systems.
// The largest value I could find was 4096, so might as well use the page size.
// NOTE(jason): Avoiding libc, so just use 4096 directly
PATH_MAX :: 4096
buf := make([dynamic]u8, PATH_MAX, allocator)
for {
#no_bounds_check res := unix.sys_getcwd(&buf[0], uint(len(buf)))
if res >= 0 {
return strings.string_from_nul_terminated_ptr(&buf[0], len(buf)), nil
}
if res != -ERANGE {
return "", _get_platform_error(res)
}
resize(&buf, len(buf)+PATH_MAX)
}
unreachable()
}
_setwd :: proc(dir: string) -> Error {
dir_cstr, allocated := _name_to_cstring(dir)
defer if allocated {
delete(dir_cstr)
}
return _ok_or_error(unix.sys_chdir(dir_cstr))
}
_get_full_path :: proc(fd: int, allocator := context.allocator) -> string {
PROC_FD_PATH :: "/proc/self/fd/"
buf: [32]u8
copy(buf[:], PROC_FD_PATH)
strconv.itoa(buf[len(PROC_FD_PATH):], fd)
fullpath: string
err: Error
if fullpath, err = _read_link_cstr(cstring(&buf[0]), allocator); err != nil || fullpath[0] != '/' {
return ""
}
return fullpath
}

View File

@@ -0,0 +1,7 @@
//+private
package os2
_pipe :: proc() -> (r, w: ^File, err: Error) {
return nil, nil, nil
}

152
core/os/os2/stat_linux.odin Normal file
View File

@@ -0,0 +1,152 @@
//+private
package os2
import "core:time"
import "core:runtime"
import "core:sys/unix"
import "core:path/filepath"
// File type
S_IFMT :: 0o170000 // Type of file mask
S_IFIFO :: 0o010000 // Named pipe (fifo)
S_IFCHR :: 0o020000 // Character special
S_IFDIR :: 0o040000 // Directory
S_IFBLK :: 0o060000 // Block special
S_IFREG :: 0o100000 // Regular
S_IFLNK :: 0o120000 // Symbolic link
S_IFSOCK :: 0o140000 // Socket
// File mode
// Read, write, execute/search by owner
S_IRWXU :: 0o0700 // RWX mask for owner
S_IRUSR :: 0o0400 // R for owner
S_IWUSR :: 0o0200 // W for owner
S_IXUSR :: 0o0100 // X for owner
// Read, write, execute/search by group
S_IRWXG :: 0o0070 // RWX mask for group
S_IRGRP :: 0o0040 // R for group
S_IWGRP :: 0o0020 // W for group
S_IXGRP :: 0o0010 // X for group
// Read, write, execute/search by others
S_IRWXO :: 0o0007 // RWX mask for other
S_IROTH :: 0o0004 // R for other
S_IWOTH :: 0o0002 // W for other
S_IXOTH :: 0o0001 // X for other
S_ISUID :: 0o4000 // Set user id on execution
S_ISGID :: 0o2000 // Set group id on execution
S_ISVTX :: 0o1000 // Directory restrcted delete
S_ISLNK :: #force_inline proc(m: u32) -> bool { return (m & S_IFMT) == S_IFLNK }
S_ISREG :: #force_inline proc(m: u32) -> bool { return (m & S_IFMT) == S_IFREG }
S_ISDIR :: #force_inline proc(m: u32) -> bool { return (m & S_IFMT) == S_IFDIR }
S_ISCHR :: #force_inline proc(m: u32) -> bool { return (m & S_IFMT) == S_IFCHR }
S_ISBLK :: #force_inline proc(m: u32) -> bool { return (m & S_IFMT) == S_IFBLK }
S_ISFIFO :: #force_inline proc(m: u32) -> bool { return (m & S_IFMT) == S_IFIFO }
S_ISSOCK :: #force_inline proc(m: u32) -> bool { return (m & S_IFMT) == S_IFSOCK }
F_OK :: 0 // Test for file existance
X_OK :: 1 // Test for execute permission
W_OK :: 2 // Test for write permission
R_OK :: 4 // Test for read permission
@private
Unix_File_Time :: struct {
seconds: i64,
nanoseconds: i64,
}
@private
_Stat :: struct {
device_id: u64, // ID of device containing file
serial: u64, // File serial number
nlink: u64, // Number of hard links
mode: u32, // Mode of the file
uid: u32, // User ID of the file's owner
gid: u32, // Group ID of the file's group
_padding: i32, // 32 bits of padding
rdev: u64, // Device ID, if device
size: i64, // Size of the file, in bytes
block_size: i64, // Optimal bllocksize for I/O
blocks: i64, // Number of 512-byte blocks allocated
last_access: Unix_File_Time, // Time of last access
modified: Unix_File_Time, // Time of last modification
status_change: Unix_File_Time, // Time of last status change
_reserve1,
_reserve2,
_reserve3: i64,
}
_fstat :: proc(f: ^File, allocator := context.allocator) -> (File_Info, Error) {
return _fstat_internal(f.impl.fd, allocator)
}
_fstat_internal :: proc(fd: int, allocator: runtime.Allocator) -> (File_Info, Error) {
s: _Stat
result := unix.sys_fstat(fd, &s)
if result < 0 {
return {}, _get_platform_error(result)
}
// TODO: As of Linux 4.11, the new statx syscall can retrieve creation_time
fi := File_Info {
fullpath = _get_full_path(fd, allocator),
name = "",
size = s.size,
mode = 0,
is_dir = S_ISDIR(s.mode),
modification_time = time.Time {s.modified.seconds},
access_time = time.Time {s.last_access.seconds},
creation_time = time.Time{0}, // regular stat does not provide this
}
fi.name = filepath.base(fi.fullpath)
return fi, nil
}
// NOTE: _stat and _lstat are using _fstat to avoid a race condition when populating fullpath
_stat :: proc(name: string, allocator := context.allocator) -> (File_Info, Error) {
name_cstr, allocated := _name_to_cstring(name)
defer if allocated {
delete(name_cstr)
}
fd := unix.sys_open(name_cstr, _O_RDONLY)
if fd < 0 {
return {}, _get_platform_error(fd)
}
defer unix.sys_close(fd)
return _fstat_internal(fd, allocator)
}
_lstat :: proc(name: string, allocator := context.allocator) -> (File_Info, Error) {
name_cstr, allocated := _name_to_cstring(name)
defer if allocated {
delete(name_cstr)
}
fd := unix.sys_open(name_cstr, _O_RDONLY | _O_PATH | _O_NOFOLLOW)
if fd < 0 {
return {}, _get_platform_error(fd)
}
defer unix.sys_close(fd)
return _fstat_internal(fd, allocator)
}
_same_file :: proc(fi1, fi2: File_Info) -> bool {
return fi1.fullpath == fi2.fullpath
}
_stat_internal :: proc(name: string) -> (s: _Stat, res: int) {
name_cstr, allocated := _name_to_cstring(name)
defer if allocated {
delete(name_cstr)
}
res = unix.sys_stat(name_cstr, &s)
return
}

View File

@@ -0,0 +1,20 @@
//+private
package os2
import "core:runtime"
_create_temp :: proc(dir, pattern: string) -> (^File, Error) {
//TODO
return nil, nil
}
_mkdir_temp :: proc(dir, pattern: string, allocator: runtime.Allocator) -> (string, Error) {
//TODO
return "", nil
}
_temp_dir :: proc(allocator: runtime.Allocator) -> (string, Error) {
//TODO
return "", nil
}

View File

@@ -33,8 +33,8 @@ when ODIN_ARCH == .amd64 {
SYS_rt_sigprocmask : uintptr : 14
SYS_rt_sigreturn : uintptr : 15
SYS_ioctl : uintptr : 16
SYS_pread : uintptr : 17
SYS_pwrite : uintptr : 18
SYS_pread64 : uintptr : 17
SYS_pwrite64 : uintptr : 18
SYS_readv : uintptr : 19
SYS_writev : uintptr : 20
SYS_access : uintptr : 21
@@ -1518,6 +1518,51 @@ when ODIN_ARCH == .amd64 {
#panic("Unsupported architecture")
}
// syscall related constants
AT_FDCWD :: ~uintptr(99)
AT_REMOVEDIR :: uintptr(0x200)
AT_SYMLINK_FOLLOW :: uintptr(0x400)
AT_SYMLINK_NOFOLLOW :: uintptr(0x100)
// mmap flags
PROT_NONE :: 0x0
PROT_READ :: 0x1
PROT_WRITE :: 0x2
PROT_EXEC :: 0x4
PROT_GROWSDOWN :: 0x01000000
PROT_GROWSUP :: 0x02000000
MAP_FIXED :: 0x10
MAP_SHARED :: 0x1
MAP_PRIVATE :: 0x2
MAP_SHARED_VALIDATE :: 0x3
MAP_ANONYMOUS :: 0x20
// mremap flags
MREMAP_MAYMOVE :: 1
MREMAP_FIXED :: 2
MREMAP_DONTUNMAP :: 4
// madvise flags
MADV_NORMAL :: 0
MADV_RANDOM :: 1
MADV_SEQUENTIAL :: 2
MADV_WILLNEED :: 3
MADV_DONTNEED :: 4
MADV_FREE :: 8
MADV_REMOVE :: 9
MADV_DONTFORK :: 10
MADV_DOFORK :: 11
MADV_MERGEABLE :: 12
MADV_UNMERGEABLE :: 13
MADV_HUGEPAGE :: 14
MADV_NOHUGEPAGE :: 15
MADV_DONTDUMP :: 16
MADV_DODUMP :: 17
MADV_WIPEONFORK :: 18
MADV_KEEPONFORK :: 19
MADV_HWPOISON :: 100
sys_gettid :: proc "contextless" () -> int {
return cast(int)intrinsics.syscall(SYS_gettid)
}
@@ -1525,3 +1570,285 @@ sys_gettid :: proc "contextless" () -> int {
sys_getrandom :: proc "contextless" (buf: [^]byte, buflen: int, flags: uint) -> int {
return cast(int)intrinsics.syscall(SYS_getrandom, buf, cast(uintptr)(buflen), cast(uintptr)(flags))
}
sys_open :: proc "contextless" (path: cstring, flags: int, mode: int = 0o000) -> int {
when ODIN_ARCH != .arm64 {
return int(intrinsics.syscall(SYS_open, uintptr(rawptr(path)), uintptr(flags), uintptr(mode)))
} else { // NOTE: arm64 does not have open
return int(intrinsics.syscall(SYS_openat, AT_FDCWD, uintptr(rawptr(path)), uintptr(flags), uintptr(mode)))
}
}
sys_openat :: proc "contextless" (dfd: int, path: cstring, flags: int, mode: int = 0o000) -> int {
return int(intrinsics.syscall(SYS_openat, uintptr(dfd), uintptr(rawptr(path)), uintptr(flags), uintptr(mode)))
}
sys_close :: proc "contextless" (fd: int) -> int {
return int(intrinsics.syscall(SYS_close, uintptr(fd)))
}
sys_read :: proc "contextless" (fd: int, buf: rawptr, size: uint) -> int {
return int(intrinsics.syscall(SYS_read, uintptr(fd), uintptr(buf), uintptr(size)))
}
sys_pread :: proc "contextless" (fd: int, buf: rawptr, size: uint, offset: i64) -> int {
when ODIN_ARCH == .amd64 || ODIN_ARCH == .arm64 {
return int(intrinsics.syscall(SYS_pread64, uintptr(fd), uintptr(buf), uintptr(size), uintptr(offset)))
} else {
low := uintptr(offset & 0xFFFFFFFF)
high := uintptr(offset >> 32)
return int(intrinsics.syscall(SYS_pread64, uintptr(fd), uintptr(buf), uintptr(size), high, low))
}
}
sys_write :: proc "contextless" (fd: int, buf: rawptr, size: uint) -> int {
return int(intrinsics.syscall(SYS_write, uintptr(fd), uintptr(buf), uintptr(size)))
}
sys_pwrite :: proc "contextless" (fd: int, buf: rawptr, size: uint, offset: i64) -> int {
when ODIN_ARCH == .amd64 || ODIN_ARCH == .arm64 {
return int(intrinsics.syscall(SYS_pwrite64, uintptr(fd), uintptr(buf), uintptr(size), uintptr(offset)))
} else {
low := uintptr(offset & 0xFFFFFFFF)
high := uintptr(offset >> 32)
return int(intrinsics.syscall(SYS_pwrite64, uintptr(fd), uintptr(buf), uintptr(size), high, low))
}
}
sys_lseek :: proc "contextless" (fd: int, offset: i64, whence: int) -> i64 {
when ODIN_ARCH == .amd64 || ODIN_ARCH == .arm64 {
return i64(intrinsics.syscall(SYS_lseek, uintptr(fd), uintptr(offset), uintptr(whence)))
} else {
low := uintptr(offset & 0xFFFFFFFF)
high := uintptr(offset >> 32)
result: i64
res := i64(intrinsics.syscall(SYS__llseek, uintptr(fd), high, low, &result, uintptr(whence)))
return res if res < 0 else result
}
}
sys_stat :: proc "contextless" (path: cstring, stat: rawptr) -> int {
when ODIN_ARCH == .amd64 {
return int(intrinsics.syscall(SYS_stat, uintptr(rawptr(path)), uintptr(stat)))
} else when ODIN_ARCH != .arm64 {
return int(intrinsics.syscall(SYS_stat64, uintptr(rawptr(path)), uintptr(stat)))
} else { // NOTE: arm64 does not have stat
return int(intrinsics.syscall(SYS_fstatat, AT_FDCWD, uintptr(rawptr(path)), uintptr(stat), 0))
}
}
sys_fstat :: proc "contextless" (fd: int, stat: rawptr) -> int {
when ODIN_ARCH == .amd64 || ODIN_ARCH == .arm64 {
return int(intrinsics.syscall(SYS_fstat, uintptr(fd), uintptr(stat)))
} else {
return int(intrinsics.syscall(SYS_fstat64, uintptr(fd), uintptr(stat)))
}
}
sys_lstat :: proc "contextless" (path: cstring, stat: rawptr) -> int {
when ODIN_ARCH == .amd64 {
return int(intrinsics.syscall(SYS_lstat, uintptr(rawptr(path)), uintptr(stat)))
} else when ODIN_ARCH != .arm64 {
return int(intrinsics.syscall(SYS_lstat64, uintptr(rawptr(path)), uintptr(stat)))
} else { // NOTE: arm64 does not have any lstat
return int(intrinsics.syscall(SYS_fstatat, AT_FDCWD, uintptr(rawptr(path)), uintptr(stat), AT_SYMLINK_NOFOLLOW))
}
}
sys_readlink :: proc "contextless" (path: cstring, buf: rawptr, bufsiz: uint) -> int {
when ODIN_ARCH != .arm64 {
return int(intrinsics.syscall(SYS_readlink, uintptr(rawptr(path)), uintptr(buf), uintptr(bufsiz)))
} else { // NOTE: arm64 does not have readlink
return int(intrinsics.syscall(SYS_readlinkat, AT_FDCWD, uintptr(rawptr(path)), uintptr(buf), uintptr(bufsiz)))
}
}
sys_symlink :: proc "contextless" (old_name: cstring, new_name: cstring) -> int {
when ODIN_ARCH != .arm64 {
return int(intrinsics.syscall(SYS_symlink, uintptr(rawptr(old_name)), uintptr(rawptr(new_name))))
} else { // NOTE: arm64 does not have symlink
return int(intrinsics.syscall(SYS_symlinkat, uintptr(rawptr(old_name)), AT_FDCWD, uintptr(rawptr(new_name))))
}
}
sys_access :: proc "contextless" (path: cstring, mask: int) -> int {
when ODIN_ARCH != .arm64 {
return int(intrinsics.syscall(SYS_access, uintptr(rawptr(path)), uintptr(mask)))
} else { // NOTE: arm64 does not have access
return int(intrinsics.syscall(SYS_faccessat, AT_FDCWD, uintptr(rawptr(path)), uintptr(mask)))
}
}
sys_getcwd :: proc "contextless" (buf: rawptr, size: uint) -> int {
return int(intrinsics.syscall(SYS_getcwd, uintptr(buf), uintptr(size)))
}
sys_chdir :: proc "contextless" (path: cstring) -> int {
return int(intrinsics.syscall(SYS_chdir, uintptr(rawptr(path))))
}
sys_fchdir :: proc "contextless" (fd: int) -> int {
return int(intrinsics.syscall(SYS_fchdir, uintptr(fd)))
}
sys_chmod :: proc "contextless" (path: cstring, mode: int) -> int {
when ODIN_ARCH != .arm64 {
return int(intrinsics.syscall(SYS_chmod, uintptr(rawptr(path)), uintptr(mode)))
} else { // NOTE: arm64 does not have chmod
return int(intrinsics.syscall(SYS_fchmodat, AT_FDCWD, uintptr(rawptr(path)), uintptr(mode)))
}
}
sys_fchmod :: proc "contextless" (fd: int, mode: int) -> int {
return int(intrinsics.syscall(SYS_fchmod, uintptr(fd), uintptr(mode)))
}
sys_chown :: proc "contextless" (path: cstring, user: int, group: int) -> int {
when ODIN_ARCH != .arm64 {
return int(intrinsics.syscall(SYS_chown, uintptr(rawptr(path)), uintptr(user), uintptr(group)))
} else { // NOTE: arm64 does not have chown
return int(intrinsics.syscall(SYS_fchownat, AT_FDCWD, uintptr(rawptr(path)), uintptr(user), uintptr(group), 0))
}
}
sys_fchown :: proc "contextless" (fd: int, user: int, group: int) -> int {
return int(intrinsics.syscall(SYS_fchown, uintptr(fd), uintptr(user), uintptr(group)))
}
sys_lchown :: proc "contextless" (path: cstring, user: int, group: int) -> int {
when ODIN_ARCH != .arm64 {
return int(intrinsics.syscall(SYS_lchown, uintptr(rawptr(path)), uintptr(user), uintptr(group)))
} else { // NOTE: arm64 does not have lchown
return int(intrinsics.syscall(SYS_fchownat, AT_FDCWD, uintptr(rawptr(path)), uintptr(user), uintptr(group), AT_SYMLINK_NOFOLLOW))
}
}
sys_rename :: proc "contextless" (old, new: cstring) -> int {
when ODIN_ARCH != .arm64 {
return int(intrinsics.syscall(SYS_rename, uintptr(rawptr(old)), uintptr(rawptr(new))))
} else { // NOTE: arm64 does not have rename
return int(intrinsics.syscall(SYS_renameat, AT_FDCWD, uintptr(rawptr(old)), uintptr(rawptr(new))))
}
}
sys_link :: proc "contextless" (old_name: cstring, new_name: cstring) -> int {
when ODIN_ARCH != .arm64 {
return int(intrinsics.syscall(SYS_link, uintptr(rawptr(old_name)), uintptr(rawptr(new_name))))
} else { // NOTE: arm64 does not have link
return int(intrinsics.syscall(SYS_linkat, AT_FDCWD, uintptr(rawptr(old_name)), AT_FDCWD, uintptr(rawptr(new_name)), AT_SYMLINK_FOLLOW))
}
}
sys_unlink :: proc "contextless" (path: cstring) -> int {
when ODIN_ARCH != .arm64 {
return int(intrinsics.syscall(SYS_unlink, uintptr(rawptr(path))))
} else { // NOTE: arm64 does not have unlink
return int(intrinsics.syscall(SYS_unlinkat, AT_FDCWD, uintptr(rawptr(path)), 0))
}
}
sys_unlinkat :: proc "contextless" (dfd: int, path: cstring, flag: int = 0) -> int {
return int(intrinsics.syscall(SYS_unlinkat, uintptr(dfd), uintptr(rawptr(path)), flag))
}
sys_rmdir :: proc "contextless" (path: cstring) -> int {
when ODIN_ARCH != .arm64 {
return int(intrinsics.syscall(SYS_rmdir, uintptr(rawptr(path))))
} else { // NOTE: arm64 does not have rmdir
return int(intrinsics.syscall(SYS_unlinkat, AT_FDCWD, uintptr(rawptr(path)), AT_REMOVEDIR))
}
}
sys_mkdir :: proc "contextless" (path: cstring, mode: int) -> int {
when ODIN_ARCH != .arm64 {
return int(intrinsics.syscall(SYS_mkdir, uintptr(rawptr(path)), uintptr(mode)))
} else { // NOTE: arm64 does not have mkdir
return int(intrinsics.syscall(SYS_mkdirat, AT_FDCWD, uintptr(rawptr(path)), uintptr(mode)))
}
}
sys_mkdirat :: proc "contextless" (dfd: int, path: cstring, mode: int) -> int {
return int(intrinsics.syscall(SYS_mkdirat, uintptr(dfd), uintptr(rawptr(path)), uintptr(mode)))
}
sys_mknod :: proc "contextless" (path: cstring, mode: int, dev: int) -> int {
when ODIN_ARCH != .arm64 {
return int(intrinsics.syscall(SYS_mknod, uintptr(rawptr(path)), uintptr(mode), uintptr(dev)))
} else { // NOTE: arm64 does not have mknod
return int(intrinsics.syscall(SYS_mknodat, AT_FDCWD, uintptr(rawptr(path)), uintptr(mode), uintptr(dev)))
}
}
sys_mknodat :: proc "contextless" (dfd: int, path: cstring, mode: int, dev: int) -> int {
return int(intrinsics.syscall(SYS_mknodat, uintptr(dfd), uintptr(rawptr(path)), uintptr(mode), uintptr(dev)))
}
sys_truncate :: proc "contextless" (path: cstring, length: i64) -> int {
when ODIN_ARCH == .amd64 || ODIN_ARCH == .arm64 {
return int(intrinsics.syscall(SYS_truncate, uintptr(rawptr(path)), uintptr(length)))
} else {
low := uintptr(length & 0xFFFFFFFF)
high := uintptr(length >> 32)
return int(intrinsics.syscall(SYS_truncate64, uintptr(rawptr(path)), high, low))
}
}
sys_ftruncate :: proc "contextless" (fd: int, length: i64) -> int {
when ODIN_ARCH == .amd64 || ODIN_ARCH == .arm64 {
return int(intrinsics.syscall(SYS_ftruncate, uintptr(fd), uintptr(length)))
} else {
low := uintptr(length & 0xFFFFFFFF)
high := uintptr(length >> 32)
return int(intrinsics.syscall(SYS_ftruncate64, uintptr(fd), high, low))
}
}
sys_fsync :: proc "contextless" (fd: int) -> int {
return int(intrinsics.syscall(SYS_fsync, uintptr(fd)))
}
sys_getdents64 :: proc "contextless" (fd: int, dirent: rawptr, count: int) -> int {
return int(intrinsics.syscall(SYS_getdents64, uintptr(fd), uintptr(dirent), uintptr(count)))
}
sys_fork :: proc "contextless" () -> int {
when ODIN_ARCH != .arm64 {
return int(intrinsics.syscall(SYS_fork))
} else {
return int(intrinsics.syscall(SYS_clone, SIGCHLD))
}
}
sys_mmap :: proc "contextless" (addr: rawptr, length: uint, prot, flags, fd: int, offset: uintptr) -> int {
return int(intrinsics.syscall(SYS_mmap, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), offset))
}
sys_mremap :: proc "contextless" (addr: rawptr, old_length, new_length: uint, flags: int, new_addr: rawptr = nil) -> int {
return int(intrinsics.syscall(SYS_mremap, uintptr(addr), uintptr(old_length), uintptr(new_length), uintptr(flags), uintptr(new_addr)))
}
sys_munmap :: proc "contextless" (addr: rawptr, length: uint) -> int {
return int(intrinsics.syscall(SYS_munmap, uintptr(addr), uintptr(length)))
}
sys_mprotect :: proc "contextless" (addr: rawptr, length: uint, prot: int) -> int {
return int(intrinsics.syscall(SYS_mprotect, uintptr(addr), uintptr(length), uintptr(prot)))
}
sys_madvise :: proc "contextless" (addr: rawptr, length: uint, advice: int) -> int {
return int(intrinsics.syscall(SYS_madvise, uintptr(addr), uintptr(length), uintptr(advice)))
}
// NOTE: Unsure about if this works directly on 32 bit archs. It may need 32 bit version of the time struct.
// As of Linux 5.1, there is a utimensat_time64 function. Maybe use this in the future?
sys_utimensat :: proc "contextless" (dfd: int, path: cstring, times: rawptr, flags: int) -> int {
return int(intrinsics.syscall(SYS_utimensat, uintptr(dfd), uintptr(rawptr(path)), uintptr(times), uintptr(flags)))
}
get_errno :: proc "contextless" (res: int) -> i32 {
if res < 0 && res > -4096 {
return i32(-res)
}
return 0
}

View File

@@ -794,4 +794,4 @@ Control_Event :: enum DWORD {
close = 2,
logoff = 5,
shutdown = 6,
}
}