Merge branch 'master' into sys-linux-additions

This commit is contained in:
flysand7
2023-12-06 00:23:41 +11:00
144 changed files with 11124 additions and 13799 deletions

View File

@@ -87,11 +87,6 @@ jobs:
cd tests/core
make
timeout-minutes: 10
- name: Vendor library tests
run: |
cd tests/vendor
make
timeout-minutes: 10
- name: Odin internals tests
run: |
cd tests/internal

22
.gitignore vendored
View File

@@ -25,7 +25,29 @@ bld/
tests/documentation/verify/
tests/documentation/all.odin-doc
tests/internal/test_map
tests/internal/test_pow
tests/internal/test_rtti
tests/core/test_core_compress
tests/core/test_core_filepath
tests/core/test_core_fmt
tests/core/test_core_i18n
tests/core/test_core_image
tests/core/test_core_libc
tests/core/test_core_match
tests/core/test_core_math
tests/core/test_core_net
tests/core/test_core_os_exit
tests/core/test_core_reflect
tests/core/test_core_strings
tests/core/test_crypto_hash
tests/core/test_hash
tests/core/test_hxa
tests/core/test_json
tests/core/test_linalg_glsl_math
tests/core/test_noise
tests/core/test_varint
tests/core/test_xml
tests/vendor/vendor_botan
# Visual Studio 2015 cache/options directory
.vs/
# Visual Studio Code options directory

View File

@@ -110,7 +110,8 @@ if %errorlevel% neq 0 goto end_of_build
call build_vendor.bat
if %errorlevel% neq 0 goto end_of_build
if %release_mode% EQU 0 odin run examples/demo
rem If the demo doesn't run for you and your CPU is more than a decade old, try -microarch:native
if %release_mode% EQU 0 odin run examples/demo -- Hellope World
del *.obj > NUL 2> NUL

View File

@@ -27,11 +27,13 @@ error() {
if [ -z "$LLVM_CONFIG" ]; then
# darwin, linux, openbsd
if [ -n "$(command -v llvm-config-17)" ]; then LLVM_CONFIG="llvm-config-17"
elif [ -n "$(command -v llvm-config-14)" ]; then LLVM_CONFIG="llvm-config-14"
elif [ -n "$(command -v llvm-config-13)" ]; then LLVM_CONFIG="llvm-config-13"
elif [ -n "$(command -v llvm-config-12)" ]; then LLVM_CONFIG="llvm-config-12"
elif [ -n "$(command -v llvm-config-11)" ]; then LLVM_CONFIG="llvm-config-11"
# freebsd
elif [ -n "$(command -v llvm-config17)" ]; then LLVM_CONFIG="llvm-config-17"
elif [ -n "$(command -v llvm-config14)" ]; then LLVM_CONFIG="llvm-config-14"
elif [ -n "$(command -v llvm-config13)" ]; then LLVM_CONFIG="llvm-config-13"
elif [ -n "$(command -v llvm-config12)" ]; then LLVM_CONFIG="llvm-config-12"
elif [ -n "$(command -v llvm-config11)" ]; then LLVM_CONFIG="llvm-config-11"
@@ -117,7 +119,7 @@ build_odin() {
}
run_demo() {
./odin run examples/demo/demo.odin -file
./odin run examples/demo/demo.odin -file -- Hellope World
}
if [ $# -eq 0 ]; then

View File

@@ -2,6 +2,8 @@ package libc
import "core:c"
#assert(!ODIN_NO_CRT, `"core:c/libc" cannot be imported when '-no-crt' is used`)
char :: c.char // assuming -funsigned-char
schar :: c.schar

View File

@@ -22,7 +22,9 @@ init :: proc(q: ^$Q/Queue($T), capacity := DEFAULT_CAPACITY, allocator := contex
return reserve(q, capacity)
}
// Procedure to initialize a queue from a fixed backing slice
// Procedure to initialize a queue from a fixed backing slice.
// The contents of the `backing` will be overwritten as items are pushed onto the `Queue`.
// Any previous contents are not available.
init_from_slice :: proc(q: ^$Q/Queue($T), backing: []T) -> bool {
clear(q)
q.data = transmute([dynamic]T)runtime.Raw_Dynamic_Array{
@@ -34,6 +36,21 @@ init_from_slice :: proc(q: ^$Q/Queue($T), backing: []T) -> bool {
return true
}
// Procedure to initialize a queue from a fixed backing slice.
// Existing contents are preserved and available on the queue.
init_with_contents :: proc(q: ^$Q/Queue($T), backing: []T) -> bool {
clear(q)
q.data = transmute([dynamic]T)runtime.Raw_Dynamic_Array{
data = raw_data(backing),
len = builtin.len(backing),
cap = builtin.len(backing),
allocator = {procedure=runtime.nil_allocator_proc, data=nil},
}
q.len = len(backing)
q.offset = len(backing)
return true
}
// Procedure to destroy a queue
destroy :: proc(q: ^$Q/Queue($T)) {
delete(q.data)

View File

@@ -80,11 +80,13 @@ sort :: proc(sorter: ^$S/Sorter($K)) -> (sorted, cycled: [dynamic]K) {
}
}
for root in sorted do for k, _ in relations[root].dependents {
relation := &relations[k]
relation.dependencies -= 1
if relation.dependencies == 0 {
append(&sorted, k)
for root in sorted {
for k, _ in relations[root].dependents {
relation := &relations[k]
relation.dependencies -= 1
if relation.dependencies == 0 {
append(&sorted, k)
}
}
}

View File

@@ -1,95 +1,86 @@
# crypto
A crypto library for the Odin language
A cryptography library for the Odin language
## Supported
This library offers various algorithms implemented in Odin.
Please see the chart below for the options.
Please see the chart below for some of the options.
## Hashing algorithms
| Algorithm | |
|:-------------------------------------------------------------------------------------------------------------|:-----------------|
| [BLAKE](https://web.archive.org/web/20190915215948/https://131002.net/blake) | ✔️ |
| [BLAKE2B](https://datatracker.ietf.org/doc/html/rfc7693) | ✔️ |
| [BLAKE2S](https://datatracker.ietf.org/doc/html/rfc7693) | ✔️ |
| [GOST](https://datatracker.ietf.org/doc/html/rfc5831) | ✔️ |
| [Grøstl](http://www.groestl.info/Groestl.zip) | ✔️ |
| [HAVAL](https://web.archive.org/web/20150111210116/http://labs.calyptix.com/haval.php) | ✔️ |
| [JH](https://www3.ntu.edu.sg/home/wuhj/research/jh/index.html) | ✔️ |
| [Keccak](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf) | ✔️ |
| [MD2](https://datatracker.ietf.org/doc/html/rfc1319) | ✔️ |
| [MD4](https://datatracker.ietf.org/doc/html/rfc1320) | ✔️ |
| [MD5](https://datatracker.ietf.org/doc/html/rfc1321) | ✔️ |
| [RIPEMD](https://homes.esat.kuleuven.be/~bosselae/ripemd160.html) | ✔️ |
| [SHA-1](https://datatracker.ietf.org/doc/html/rfc3174) | ✔️ |
| [SHA-2](https://csrc.nist.gov/csrc/media/publications/fips/180/2/archive/2002-08-01/documents/fips180-2.pdf) | ✔️ |
| [SHA-3](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf) | ✔️ |
| [SHAKE](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf) | ✔️ |
| [SM3](https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02) | ✔️ |
| [Streebog](https://datatracker.ietf.org/doc/html/rfc6986) | ✔️ |
| [Tiger](https://www.cs.technion.ac.il/~biham/Reports/Tiger/) | ✔️ |
| [Tiger2](https://www.cs.technion.ac.il/~biham/Reports/Tiger/) | ✔️ |
| [Whirlpool](https://web.archive.org/web/20171129084214/http://www.larc.usp.br/~pbarreto/WhirlpoolPage.html) | ✔️ |
| legacy/[Keccak](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf) | ✔️ |
| legacy/[MD5](https://datatracker.ietf.org/doc/html/rfc1321) | ✔️ |
| legacy/[SHA-1](https://datatracker.ietf.org/doc/html/rfc3174) | ✔️ |
#### High level API
Each hash algorithm contains a procedure group named `hash`, or if the algorithm provides more than one digest size `hash_<size>`\*.
Included in these groups are six procedures.
* `hash_string` - Hash a given string and return the computed hash. Just calls `hash_bytes` internally
* `hash_bytes` - Hash a given byte slice and return the computed hash
* `hash_string_to_buffer` - Hash a given string and put the computed hash in the second proc parameter. Just calls `hash_bytes_to_buffer` internally
* `hash_bytes_to_buffer` - Hash a given string and put the computed hash in the second proc parameter. The destination buffer has to be at least as big as the digest size of the hash
* `hash_stream` - Takes a stream from io.Stream and returns the computed hash from it
* `hash_file` - Takes a file handle and returns the computed hash from it. A second optional boolean parameter controls if the file is streamed (this is the default) or read at once (set to true)
\* On some algorithms there is another part to the name, since they might offer control about additional parameters.
For instance, `HAVAL` offers different sizes as well as three different round amounts.
Computing a 256-bit hash with 3 rounds is therefore achieved by calling `haval.hash_256_3(...)`.
Each hash algorithm contains a procedure group named `hash`, or if the algorithm provides more than one digest size `hash_<size>`\*.
Included in these groups are six procedures.
- `hash_string` - Hash a given string and return the computed hash. Just calls `hash_bytes` internally
- `hash_bytes` - Hash a given byte slice and return the computed hash
- `hash_string_to_buffer` - Hash a given string and put the computed hash in the second proc parameter. Just calls `hash_bytes_to_buffer` internally
- `hash_bytes_to_buffer` - Hash a given string and put the computed hash in the second proc parameter. The destination buffer has to be at least as big as the digest size of the hash
- `hash_stream` - Takes a stream from io.Stream and returns the computed hash from it
- `hash_file` - Takes a file handle and returns the computed hash from it. A second optional boolean parameter controls if the file is streamed (this is the default) or read at once (set to true)
\* On some algorithms there is another part to the name, since they might offer control about additional parameters.
For instance, `SHA-2` offers different sizes.
Computing a 512-bit hash is therefore achieved by calling `sha2.hash_512(...)`.
#### Low level API
The above mentioned procedures internally call three procedures: `init`, `update` and `final`.
You may also directly call them, if you wish.
#### Example
```odin
package crypto_example
// Import the desired package
import "core:crypto/md4"
import "core:crypto/blake2b"
main :: proc() {
input := "foo"
// Compute the hash, using the high level API
computed_hash := md4.hash(input)
computed_hash := blake2b.hash(input)
// Variant that takes a destination buffer, instead of returning the computed hash
hash := make([]byte, md4.DIGEST_SIZE) // @note: Destination buffer has to be at least as big as the digest size of the hash
md4.hash(input, hash[:])
hash := make([]byte, sha2.DIGEST_SIZE) // @note: Destination buffer has to be at least as big as the digest size of the hash
blake2b.hash(input, hash[:])
// Compute the hash, using the low level API
ctx: md4.Md4_Context
computed_hash_low: [16]byte
md4.init(&ctx)
md4.update(&ctx, transmute([]byte)input)
md4.final(&ctx, computed_hash_low[:])
ctx: blake2b.Context
computed_hash_low: [blake2b.DIGEST_SIZE]byte
blake2b.init(&ctx)
blake2b.update(&ctx, transmute([]byte)input)
blake2b.final(&ctx, computed_hash_low[:])
}
```
For example uses of all available algorithms, please see the tests within `tests/core/crypto`.
#### Thread safety
The crypto package is not thread-safe at the moment. This may change in the future.
## Implementation considerations
### Disclaimer
The algorithms were ported out of curiosity and due to interest in the field.
We have not had any of the code verified by a third party or tested/fuzzed by any automatic means.
Wherever we were able to find official test vectors, those were used to verify the implementation.
We do not recommend using them in a production environment, without any additional testing and/or verification.
- The crypto packages are not thread-safe.
- Best-effort is make to mitigate timing side-channels on reasonable
architectures. Architectures that are known to be unreasonable include
but are not limited to i386, i486, and WebAssembly.
- Some but not all of the packages attempt to santize sensitive data,
however this is not done consistently through the library at the moment.
As Thomas Pornin puts it "In general, such memory cleansing is a fool's
quest."
- All of these packages have not received independent third party review.
### ToDo
* Ciphers (Symmetric, Asymmetric)
* MACs (Message Authentication Code)
* CSPRNGs (Cryptographically Secure PseudoRandom Number Generator)
* KDFs (Key Derivation Function)
* KEAs (Key Exchange Algorithm)
## License
### License
This library is made available under the BSD-3 license.

View File

@@ -10,12 +10,12 @@ package _blake2
Implementation of the BLAKE2 hashing algorithm, as defined in <https://datatracker.ietf.org/doc/html/rfc7693> and <https://www.blake2.net/>
*/
import "../util"
import "core:encoding/endian"
BLAKE2S_BLOCK_SIZE :: 64
BLAKE2S_SIZE :: 32
BLAKE2B_BLOCK_SIZE :: 128
BLAKE2B_SIZE :: 64
BLAKE2S_BLOCK_SIZE :: 64
BLAKE2S_SIZE :: 32
BLAKE2B_BLOCK_SIZE :: 128
BLAKE2B_SIZE :: 64
Blake2s_Context :: struct {
h: [8]u32,
@@ -28,7 +28,9 @@ Blake2s_Context :: struct {
is_keyed: bool,
size: byte,
is_last_node: bool,
cfg: Blake2_Config,
cfg: Blake2_Config,
is_initialized: bool,
}
Blake2b_Context :: struct {
@@ -42,15 +44,19 @@ Blake2b_Context :: struct {
is_keyed: bool,
size: byte,
is_last_node: bool,
cfg: Blake2_Config,
cfg: Blake2_Config,
is_initialized: bool,
}
Blake2_Config :: struct {
size: byte,
key: []byte,
salt: []byte,
size: byte,
key: []byte,
salt: []byte,
person: []byte,
tree: union{Blake2_Tree},
tree: union {
Blake2_Tree,
},
}
Blake2_Tree :: struct {
@@ -63,11 +69,13 @@ Blake2_Tree :: struct {
is_last_node: bool,
}
@(private)
BLAKE2S_IV := [8]u32 {
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19,
}
@(private)
BLAKE2B_IV := [8]u64 {
0x6a09e667f3bcc908, 0xbb67ae8584caa73b,
0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
@@ -78,8 +86,14 @@ BLAKE2B_IV := [8]u64 {
init :: proc(ctx: ^$T) {
when T == Blake2s_Context {
block_size :: BLAKE2S_BLOCK_SIZE
max_size :: BLAKE2S_SIZE
} else when T == Blake2b_Context {
block_size :: BLAKE2B_BLOCK_SIZE
max_size :: BLAKE2B_SIZE
}
if ctx.cfg.size > max_size {
panic("blake2: requested output size exceeeds algorithm max")
}
p := make([]byte, block_size)
@@ -106,10 +120,10 @@ init :: proc(ctx: ^$T) {
if ctx.cfg.tree != nil {
p[2] = ctx.cfg.tree.(Blake2_Tree).fanout
p[3] = ctx.cfg.tree.(Blake2_Tree).max_depth
util.PUT_U32_LE(p[4:], ctx.cfg.tree.(Blake2_Tree).leaf_size)
endian.unchecked_put_u32le(p[4:], ctx.cfg.tree.(Blake2_Tree).leaf_size)
when T == Blake2s_Context {
p[8] = byte(ctx.cfg.tree.(Blake2_Tree).node_offset)
p[9] = byte(ctx.cfg.tree.(Blake2_Tree).node_offset >> 8)
p[8] = byte(ctx.cfg.tree.(Blake2_Tree).node_offset)
p[9] = byte(ctx.cfg.tree.(Blake2_Tree).node_offset >> 8)
p[10] = byte(ctx.cfg.tree.(Blake2_Tree).node_offset >> 16)
p[11] = byte(ctx.cfg.tree.(Blake2_Tree).node_offset >> 24)
p[12] = byte(ctx.cfg.tree.(Blake2_Tree).node_offset >> 32)
@@ -117,7 +131,7 @@ init :: proc(ctx: ^$T) {
p[14] = ctx.cfg.tree.(Blake2_Tree).node_depth
p[15] = ctx.cfg.tree.(Blake2_Tree).inner_hash_size
} else when T == Blake2b_Context {
util.PUT_U64_LE(p[8:], ctx.cfg.tree.(Blake2_Tree).node_offset)
endian.unchecked_put_u64le(p[8:], ctx.cfg.tree.(Blake2_Tree).node_offset)
p[16] = ctx.cfg.tree.(Blake2_Tree).node_depth
p[17] = ctx.cfg.tree.(Blake2_Tree).inner_hash_size
}
@@ -127,10 +141,10 @@ init :: proc(ctx: ^$T) {
ctx.size = ctx.cfg.size
for i := 0; i < 8; i += 1 {
when T == Blake2s_Context {
ctx.h[i] = BLAKE2S_IV[i] ~ util.U32_LE(p[i * 4:])
ctx.h[i] = BLAKE2S_IV[i] ~ endian.unchecked_get_u32le(p[i * 4:])
}
when T == Blake2b_Context {
ctx.h[i] = BLAKE2B_IV[i] ~ util.U64_LE(p[i * 8:])
ctx.h[i] = BLAKE2B_IV[i] ~ endian.unchecked_get_u64le(p[i * 8:])
}
}
if ctx.cfg.tree != nil && ctx.cfg.tree.(Blake2_Tree).is_last_node {
@@ -142,13 +156,19 @@ init :: proc(ctx: ^$T) {
ctx.is_keyed = true
}
copy(ctx.ih[:], ctx.h[:])
copy(ctx.h[:], ctx.ih[:])
copy(ctx.h[:], ctx.ih[:])
if ctx.is_keyed {
update(ctx, ctx.padded_key[:])
}
ctx.nx = 0
ctx.is_initialized = true
}
update :: proc "contextless" (ctx: ^$T, p: []byte) {
update :: proc(ctx: ^$T, p: []byte) {
assert(ctx.is_initialized)
p := p
when T == Blake2s_Context {
block_size :: BLAKE2S_BLOCK_SIZE
@@ -174,15 +194,25 @@ update :: proc "contextless" (ctx: ^$T, p: []byte) {
ctx.nx += copy(ctx.x[ctx.nx:], p)
}
final :: proc "contextless" (ctx: ^$T, hash: []byte) {
final :: proc(ctx: ^$T, hash: []byte) {
assert(ctx.is_initialized)
when T == Blake2s_Context {
if len(hash) < int(ctx.cfg.size) {
panic("crypto/blake2s: invalid destination digest size")
}
blake2s_final(ctx, hash)
}
when T == Blake2b_Context {
} else when T == Blake2b_Context {
if len(hash) < int(ctx.cfg.size) {
panic("crypto/blake2b: invalid destination digest size")
}
blake2b_final(ctx, hash)
}
ctx.is_initialized = false
}
@(private)
blake2s_final :: proc "contextless" (ctx: ^Blake2s_Context, hash: []byte) {
if ctx.is_keyed {
for i := 0; i < len(ctx.padded_key); i += 1 {
@@ -203,16 +233,14 @@ blake2s_final :: proc "contextless" (ctx: ^Blake2s_Context, hash: []byte) {
blocks(ctx, ctx.x[:])
j := 0
for s, _ in ctx.h[:(ctx.size - 1) / 4 + 1] {
hash[j + 0] = byte(s >> 0)
hash[j + 1] = byte(s >> 8)
hash[j + 2] = byte(s >> 16)
hash[j + 3] = byte(s >> 24)
j += 4
dst: [BLAKE2S_SIZE]byte
for i := 0; i < BLAKE2S_SIZE / 4; i += 1 {
endian.unchecked_put_u32le(dst[i * 4:], ctx.h[i])
}
copy(hash, dst[:])
}
@(private)
blake2b_final :: proc "contextless" (ctx: ^Blake2b_Context, hash: []byte) {
if ctx.is_keyed {
for i := 0; i < len(ctx.padded_key); i += 1 {
@@ -229,56 +257,52 @@ blake2b_final :: proc "contextless" (ctx: ^Blake2b_Context, hash: []byte) {
ctx.f[0] = 0xffffffffffffffff
if ctx.is_last_node {
ctx.f[1] = 0xffffffffffffffff
}
}
blocks(ctx, ctx.x[:])
j := 0
for s, _ in ctx.h[:(ctx.size - 1) / 8 + 1] {
hash[j + 0] = byte(s >> 0)
hash[j + 1] = byte(s >> 8)
hash[j + 2] = byte(s >> 16)
hash[j + 3] = byte(s >> 24)
hash[j + 4] = byte(s >> 32)
hash[j + 5] = byte(s >> 40)
hash[j + 6] = byte(s >> 48)
hash[j + 7] = byte(s >> 56)
j += 8
dst: [BLAKE2B_SIZE]byte
for i := 0; i < BLAKE2B_SIZE / 8; i += 1 {
endian.unchecked_put_u64le(dst[i * 8:], ctx.h[i])
}
copy(hash, dst[:])
}
@(private)
blocks :: proc "contextless" (ctx: ^$T, p: []byte) {
when T == Blake2s_Context {
blake2s_blocks(ctx, p)
}
when T == Blake2b_Context {
} else when T == Blake2b_Context {
blake2b_blocks(ctx, p)
}
}
@(private)
blake2s_blocks :: #force_inline proc "contextless" (ctx: ^Blake2s_Context, p: []byte) {
h0, h1, h2, h3, h4, h5, h6, h7 := ctx.h[0], ctx.h[1], ctx.h[2], ctx.h[3], ctx.h[4], ctx.h[5], ctx.h[6], ctx.h[7]
h0, h1, h2, h3, h4, h5, h6, h7 :=
ctx.h[0], ctx.h[1], ctx.h[2], ctx.h[3], ctx.h[4], ctx.h[5], ctx.h[6], ctx.h[7]
p := p
for len(p) >= BLAKE2S_BLOCK_SIZE {
ctx.t[0] += BLAKE2S_BLOCK_SIZE
if ctx.t[0] < BLAKE2S_BLOCK_SIZE {
ctx.t[1] += 1
}
}
v0, v1, v2, v3, v4, v5, v6, v7 := h0, h1, h2, h3, h4, h5, h6, h7
v8 := BLAKE2S_IV[0]
v9 := BLAKE2S_IV[1]
v8 := BLAKE2S_IV[0]
v9 := BLAKE2S_IV[1]
v10 := BLAKE2S_IV[2]
v11 := BLAKE2S_IV[3]
v12 := BLAKE2S_IV[4] ~ ctx.t[0]
v13 := BLAKE2S_IV[5] ~ ctx.t[1]
v14 := BLAKE2S_IV[6] ~ ctx.f[0]
v15 := BLAKE2S_IV[7] ~ ctx.f[1]
m: [16]u32
j := 0
m: [16]u32 = ---
for i := 0; i < 16; i += 1 {
m[i] = u32(p[j]) | u32(p[j + 1]) << 8 | u32(p[j + 2]) << 16 | u32(p[j + 3]) << 24
j += 4
m[i] = endian.unchecked_get_u32le(p[i * 4:])
}
// Round 1
v0 += m[0]
v0 += v4
v12 ~= v0
@@ -391,6 +415,8 @@ blake2s_blocks :: #force_inline proc "contextless" (ctx: ^Blake2s_Context, p: []
v10 += v15
v5 ~= v10
v5 = v5 << (32 - 7) | v5 >> 7
// Round 2
v0 += m[14]
v0 += v4
v12 ~= v0
@@ -503,6 +529,8 @@ blake2s_blocks :: #force_inline proc "contextless" (ctx: ^Blake2s_Context, p: []
v10 += v15
v5 ~= v10
v5 = v5 << (32 - 7) | v5 >> 7
// Round 3
v0 += m[11]
v0 += v4
v12 ~= v0
@@ -615,6 +643,8 @@ blake2s_blocks :: #force_inline proc "contextless" (ctx: ^Blake2s_Context, p: []
v10 += v15
v5 ~= v10
v5 = v5 << (32 - 7) | v5 >> 7
// Round 4
v0 += m[7]
v0 += v4
v12 ~= v0
@@ -727,6 +757,8 @@ blake2s_blocks :: #force_inline proc "contextless" (ctx: ^Blake2s_Context, p: []
v10 += v15
v5 ~= v10
v5 = v5 << (32 - 7) | v5 >> 7
// Round 5
v0 += m[9]
v0 += v4
v12 ~= v0
@@ -839,6 +871,8 @@ blake2s_blocks :: #force_inline proc "contextless" (ctx: ^Blake2s_Context, p: []
v10 += v15
v5 ~= v10
v5 = v5 << (32 - 7) | v5 >> 7
// Round 6
v0 += m[2]
v0 += v4
v12 ~= v0
@@ -951,6 +985,8 @@ blake2s_blocks :: #force_inline proc "contextless" (ctx: ^Blake2s_Context, p: []
v10 += v15
v5 ~= v10
v5 = v5 << (32 - 7) | v5 >> 7
// Round 7
v0 += m[12]
v0 += v4
v12 ~= v0
@@ -1063,6 +1099,8 @@ blake2s_blocks :: #force_inline proc "contextless" (ctx: ^Blake2s_Context, p: []
v10 += v15
v5 ~= v10
v5 = v5 << (32 - 7) | v5 >> 7
// Round 8
v0 += m[13]
v0 += v4
v12 ~= v0
@@ -1175,6 +1213,8 @@ blake2s_blocks :: #force_inline proc "contextless" (ctx: ^Blake2s_Context, p: []
v10 += v15
v5 ~= v10
v5 = v5 << (32 - 7) | v5 >> 7
// Round 9
v0 += m[6]
v0 += v4
v12 ~= v0
@@ -1287,6 +1327,8 @@ blake2s_blocks :: #force_inline proc "contextless" (ctx: ^Blake2s_Context, p: []
v10 += v15
v5 ~= v10
v5 = v5 << (32 - 7) | v5 >> 7
// Round 10
v0 += m[10]
v0 += v4
v12 ~= v0
@@ -1399,6 +1441,7 @@ blake2s_blocks :: #force_inline proc "contextless" (ctx: ^Blake2s_Context, p: []
v10 += v15
v5 ~= v10
v5 = v5 << (32 - 7) | v5 >> 7
h0 ~= v0 ~ v8
h1 ~= v1 ~ v9
h2 ~= v2 ~ v10
@@ -1407,19 +1450,23 @@ blake2s_blocks :: #force_inline proc "contextless" (ctx: ^Blake2s_Context, p: []
h5 ~= v5 ~ v13
h6 ~= v6 ~ v14
h7 ~= v7 ~ v15
p = p[BLAKE2S_BLOCK_SIZE:]
}
ctx.h[0], ctx.h[1], ctx.h[2], ctx.h[3], ctx.h[4], ctx.h[5], ctx.h[6], ctx.h[7] = h0, h1, h2, h3, h4, h5, h6, h7
ctx.h[0], ctx.h[1], ctx.h[2], ctx.h[3], ctx.h[4], ctx.h[5], ctx.h[6], ctx.h[7] =
h0, h1, h2, h3, h4, h5, h6, h7
}
@(private)
blake2b_blocks :: #force_inline proc "contextless" (ctx: ^Blake2b_Context, p: []byte) {
h0, h1, h2, h3, h4, h5, h6, h7 := ctx.h[0], ctx.h[1], ctx.h[2], ctx.h[3], ctx.h[4], ctx.h[5], ctx.h[6], ctx.h[7]
h0, h1, h2, h3, h4, h5, h6, h7 :=
ctx.h[0], ctx.h[1], ctx.h[2], ctx.h[3], ctx.h[4], ctx.h[5], ctx.h[6], ctx.h[7]
p := p
for len(p) >= BLAKE2B_BLOCK_SIZE {
ctx.t[0] += BLAKE2B_BLOCK_SIZE
if ctx.t[0] < BLAKE2B_BLOCK_SIZE {
ctx.t[1]+=1
}
ctx.t[1] += 1
}
v0, v1, v2, v3, v4, v5, v6, v7 := h0, h1, h2, h3, h4, h5, h6, h7
v8 := BLAKE2B_IV[0]
v9 := BLAKE2B_IV[1]
@@ -1429,13 +1476,13 @@ blake2b_blocks :: #force_inline proc "contextless" (ctx: ^Blake2b_Context, p: []
v13 := BLAKE2B_IV[5] ~ ctx.t[1]
v14 := BLAKE2B_IV[6] ~ ctx.f[0]
v15 := BLAKE2B_IV[7] ~ ctx.f[1]
m: [16]u64 = ---
j := 0
for i := 0; i < 16; i+=1 {
m[i] = u64(p[j]) | u64(p[j + 1]) << 8 | u64(p[j + 2]) << 16 | u64(p[j + 3]) << 24 |
u64(p[j + 4]) << 32 | u64(p[j + 5]) << 40 | u64(p[j + 6]) << 48 | u64(p[j + 7]) << 56
j += 8
for i := 0; i < 16; i += 1 {
m[i] = endian.unchecked_get_u64le(p[i * 8:])
}
// Round 1
v0 += m[0]
v0 += v4
v12 ~= v0
@@ -1548,6 +1595,8 @@ blake2b_blocks :: #force_inline proc "contextless" (ctx: ^Blake2b_Context, p: []
v10 += v15
v5 ~= v10
v5 = v5 << (64 - 63) | v5 >> 63
// Round 2
v0 += m[14]
v0 += v4
v12 ~= v0
@@ -1660,6 +1709,8 @@ blake2b_blocks :: #force_inline proc "contextless" (ctx: ^Blake2b_Context, p: []
v10 += v15
v5 ~= v10
v5 = v5 << (64 - 63) | v5 >> 63
// Round 3
v0 += m[11]
v0 += v4
v12 ~= v0
@@ -1772,6 +1823,8 @@ blake2b_blocks :: #force_inline proc "contextless" (ctx: ^Blake2b_Context, p: []
v10 += v15
v5 ~= v10
v5 = v5 << (64 - 63) | v5 >> 63
// Round 4
v0 += m[7]
v0 += v4
v12 ~= v0
@@ -1884,6 +1937,8 @@ blake2b_blocks :: #force_inline proc "contextless" (ctx: ^Blake2b_Context, p: []
v10 += v15
v5 ~= v10
v5 = v5 << (64 - 63) | v5 >> 63
// Round 5
v0 += m[9]
v0 += v4
v12 ~= v0
@@ -1996,6 +2051,8 @@ blake2b_blocks :: #force_inline proc "contextless" (ctx: ^Blake2b_Context, p: []
v10 += v15
v5 ~= v10
v5 = v5 << (64 - 63) | v5 >> 63
// Round 6
v0 += m[2]
v0 += v4
v12 ~= v0
@@ -2108,6 +2165,8 @@ blake2b_blocks :: #force_inline proc "contextless" (ctx: ^Blake2b_Context, p: []
v10 += v15
v5 ~= v10
v5 = v5 << (64 - 63) | v5 >> 63
// Round 7
v0 += m[12]
v0 += v4
v12 ~= v0
@@ -2220,6 +2279,8 @@ blake2b_blocks :: #force_inline proc "contextless" (ctx: ^Blake2b_Context, p: []
v10 += v15
v5 ~= v10
v5 = v5 << (64 - 63) | v5 >> 63
// Round 8
v0 += m[13]
v0 += v4
v12 ~= v0
@@ -2332,6 +2393,8 @@ blake2b_blocks :: #force_inline proc "contextless" (ctx: ^Blake2b_Context, p: []
v10 += v15
v5 ~= v10
v5 = v5 << (64 - 63) | v5 >> 63
// Round 9
v0 += m[6]
v0 += v4
v12 ~= v0
@@ -2444,6 +2507,8 @@ blake2b_blocks :: #force_inline proc "contextless" (ctx: ^Blake2b_Context, p: []
v10 += v15
v5 ~= v10
v5 = v5 << (64 - 63) | v5 >> 63
// Round 10
v0 += m[10]
v0 += v4
v12 ~= v0
@@ -2556,6 +2621,8 @@ blake2b_blocks :: #force_inline proc "contextless" (ctx: ^Blake2b_Context, p: []
v10 += v15
v5 ~= v10
v5 = v5 << (64 - 63) | v5 >> 63
// Round 11
v0 += m[0]
v0 += v4
v12 ~= v0
@@ -2668,6 +2735,8 @@ blake2b_blocks :: #force_inline proc "contextless" (ctx: ^Blake2b_Context, p: []
v10 += v15
v5 ~= v10
v5 = v5 << (64 - 63) | v5 >> 63
// Round 12
v0 += m[14]
v0 += v4
v12 ~= v0
@@ -2780,6 +2849,7 @@ blake2b_blocks :: #force_inline proc "contextless" (ctx: ^Blake2b_Context, p: []
v10 += v15
v5 ~= v10
v5 = v5 << (64 - 63) | v5 >> 63
h0 ~= v0 ~ v8
h1 ~= v1 ~ v9
h2 ~= v2 ~ v10
@@ -2788,7 +2858,9 @@ blake2b_blocks :: #force_inline proc "contextless" (ctx: ^Blake2b_Context, p: []
h5 ~= v5 ~ v13
h6 ~= v6 ~ v14
h7 ~= v7 ~ v15
p = p[BLAKE2B_BLOCK_SIZE:]
}
ctx.h[0], ctx.h[1], ctx.h[2], ctx.h[3], ctx.h[4], ctx.h[5], ctx.h[6], ctx.h[7] = h0, h1, h2, h3, h4, h5, h6, h7
}
ctx.h[0], ctx.h[1], ctx.h[2], ctx.h[3], ctx.h[4], ctx.h[5], ctx.h[6], ctx.h[7] =
h0, h1, h2, h3, h4, h5, h6, h7
}

View File

@@ -1,6 +1,6 @@
package field_poly1305
import "core:crypto/util"
import "core:encoding/endian"
import "core:mem"
fe_relax_cast :: #force_inline proc "contextless" (arg1: ^Tight_Field_Element) -> ^Loose_Field_Element {
@@ -11,7 +11,7 @@ fe_tighten_cast :: #force_inline proc "contextless" (arg1: ^Loose_Field_Element)
return transmute(^Tight_Field_Element)(arg1)
}
fe_from_bytes :: #force_inline proc (out1: ^Tight_Field_Element, arg1: []byte, arg2: byte, sanitize: bool = true) {
fe_from_bytes :: #force_inline proc (out1: ^Tight_Field_Element, arg1: []byte, arg2: byte) {
// fiat-crypto's deserialization routine effectively processes a
// single byte at a time, and wants 256-bits of input for a value
// that will be 128-bits or 129-bits.
@@ -22,42 +22,29 @@ fe_from_bytes :: #force_inline proc (out1: ^Tight_Field_Element, arg1: []byte, a
assert(len(arg1) == 16)
when ODIN_ARCH == .i386 || ODIN_ARCH == .amd64 {
// While it may be unwise to do deserialization here on our
// own when fiat-crypto provides equivalent functionality,
// doing it this way provides a little under 3x performance
// improvement when optimization is enabled.
src_p := transmute(^[2]u64)(&arg1[0])
lo := src_p[0]
hi := src_p[1]
// While it may be unwise to do deserialization here on our
// own when fiat-crypto provides equivalent functionality,
// doing it this way provides a little under 3x performance
// improvement when optimization is enabled.
lo := endian.unchecked_get_u64le(arg1[0:])
hi := endian.unchecked_get_u64le(arg1[8:])
// This is inspired by poly1305-donna, though adjustments were
// made since a Tight_Field_Element's limbs are 44-bits, 43-bits,
// and 43-bits wide.
//
// Note: This could be transplated into fe_from_u64s, but that
// code is called once per MAC, and is non-criticial path.
hibit := u64(arg2) << 41 // arg2 << 128
out1[0] = lo & 0xfffffffffff
out1[1] = ((lo >> 44) | (hi << 20)) & 0x7ffffffffff
out1[2] = ((hi >> 23) & 0x7ffffffffff) | hibit
} else {
tmp: [32]byte
copy_slice(tmp[0:16], arg1[:])
tmp[16] = arg2
_fe_from_bytes(out1, &tmp)
if sanitize {
// This is used to deserialize `s` which is confidential.
mem.zero_explicit(&tmp, size_of(tmp))
}
}
// This is inspired by poly1305-donna, though adjustments were
// made since a Tight_Field_Element's limbs are 44-bits, 43-bits,
// and 43-bits wide.
//
// Note: This could be transplated into fe_from_u64s, but that
// code is called once per MAC, and is non-criticial path.
hibit := u64(arg2) << 41 // arg2 << 128
out1[0] = lo & 0xfffffffffff
out1[1] = ((lo >> 44) | (hi << 20)) & 0x7ffffffffff
out1[2] = ((hi >> 23) & 0x7ffffffffff) | hibit
}
fe_from_u64s :: proc "contextless" (out1: ^Tight_Field_Element, lo, hi: u64) {
tmp: [32]byte
util.PUT_U64_LE(tmp[0:8], lo)
util.PUT_U64_LE(tmp[8:16], hi)
endian.unchecked_put_u64le(tmp[0:], lo)
endian.unchecked_put_u64le(tmp[8:], hi)
_fe_from_bytes(out1, &tmp)

View File

@@ -11,159 +11,173 @@ package _sha3
To use the original Keccak padding, set the is_keccak bool to true, otherwise it will use SHA3 padding.
*/
import "../util"
import "core:math/bits"
ROUNDS :: 24
Sha3_Context :: struct {
st: struct #raw_union {
b: [200]u8,
q: [25]u64,
},
pt: int,
rsiz: int,
mdlen: int,
is_keccak: bool,
st: struct #raw_union {
b: [200]u8,
q: [25]u64,
},
pt: int,
rsiz: int,
mdlen: int,
is_keccak: bool,
is_initialized: bool,
is_finalized: bool, // For SHAKE (unlimited squeeze is allowed)
}
keccakf :: proc "contextless" (st: ^[25]u64) {
keccakf_rndc := [?]u64 {
0x0000000000000001, 0x0000000000008082, 0x800000000000808a,
0x8000000080008000, 0x000000000000808b, 0x0000000080000001,
0x8000000080008081, 0x8000000000008009, 0x000000000000008a,
0x0000000000000088, 0x0000000080008009, 0x000000008000000a,
0x000000008000808b, 0x800000000000008b, 0x8000000000008089,
0x8000000000008003, 0x8000000000008002, 0x8000000000000080,
0x000000000000800a, 0x800000008000000a, 0x8000000080008081,
0x8000000000008080, 0x0000000080000001, 0x8000000080008008,
}
keccakf_rndc := [?]u64 {
0x0000000000000001, 0x0000000000008082, 0x800000000000808a,
0x8000000080008000, 0x000000000000808b, 0x0000000080000001,
0x8000000080008081, 0x8000000000008009, 0x000000000000008a,
0x0000000000000088, 0x0000000080008009, 0x000000008000000a,
0x000000008000808b, 0x800000000000008b, 0x8000000000008089,
0x8000000000008003, 0x8000000000008002, 0x8000000000000080,
0x000000000000800a, 0x800000008000000a, 0x8000000080008081,
0x8000000000008080, 0x0000000080000001, 0x8000000080008008,
}
keccakf_rotc := [?]i32 {
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44,
}
keccakf_rotc := [?]int {
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44,
}
keccakf_piln := [?]i32 {
10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1,
}
keccakf_piln := [?]i32 {
10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1,
}
i, j, r: i32 = ---, ---, ---
t: u64 = ---
bc: [5]u64 = ---
i, j, r: i32 = ---, ---, ---
t: u64 = ---
bc: [5]u64 = ---
when ODIN_ENDIAN != .Little {
v: uintptr = ---
for i = 0; i < 25; i += 1 {
v := uintptr(&st[i])
st[i] = u64((^u8)(v + 0)^ << 0) | u64((^u8)(v + 1)^ << 8) |
u64((^u8)(v + 2)^ << 16) | u64((^u8)(v + 3)^ << 24) |
u64((^u8)(v + 4)^ << 32) | u64((^u8)(v + 5)^ << 40) |
u64((^u8)(v + 6)^ << 48) | u64((^u8)(v + 7)^ << 56)
}
}
when ODIN_ENDIAN != .Little {
for i = 0; i < 25; i += 1 {
st[i] = bits.byte_swap(st[i])
}
}
for r = 0; r < ROUNDS; r += 1 {
// theta
for i = 0; i < 5; i += 1 {
bc[i] = st[i] ~ st[i + 5] ~ st[i + 10] ~ st[i + 15] ~ st[i + 20]
}
for r = 0; r < ROUNDS; r += 1 {
// theta
for i = 0; i < 5; i += 1 {
bc[i] = st[i] ~ st[i + 5] ~ st[i + 10] ~ st[i + 15] ~ st[i + 20]
}
for i = 0; i < 5; i += 1 {
t = bc[(i + 4) % 5] ~ util.ROTL64(bc[(i + 1) % 5], 1)
for j = 0; j < 25; j += 5 {
st[j + i] ~= t
}
}
for i = 0; i < 5; i += 1 {
t = bc[(i + 4) % 5] ~ bits.rotate_left64(bc[(i + 1) % 5], 1)
for j = 0; j < 25; j += 5 {
st[j + i] ~= t
}
}
// rho pi
t = st[1]
for i = 0; i < 24; i += 1 {
j = keccakf_piln[i]
bc[0] = st[j]
st[j] = util.ROTL64(t, u64(keccakf_rotc[i]))
t = bc[0]
}
// rho pi
t = st[1]
for i = 0; i < 24; i += 1 {
j = keccakf_piln[i]
bc[0] = st[j]
st[j] = bits.rotate_left64(t, keccakf_rotc[i])
t = bc[0]
}
// chi
for j = 0; j < 25; j += 5 {
for i = 0; i < 5; i += 1 {
bc[i] = st[j + i]
}
for i = 0; i < 5; i += 1 {
st[j + i] ~= ~bc[(i + 1) % 5] & bc[(i + 2) % 5]
}
}
// chi
for j = 0; j < 25; j += 5 {
for i = 0; i < 5; i += 1 {
bc[i] = st[j + i]
}
for i = 0; i < 5; i += 1 {
st[j + i] ~= ~bc[(i + 1) % 5] & bc[(i + 2) % 5]
}
}
st[0] ~= keccakf_rndc[r]
}
st[0] ~= keccakf_rndc[r]
}
when ODIN_ENDIAN != .Little {
for i = 0; i < 25; i += 1 {
v = uintptr(&st[i])
t = st[i]
(^u8)(v + 0)^ = (t >> 0) & 0xff
(^u8)(v + 1)^ = (t >> 8) & 0xff
(^u8)(v + 2)^ = (t >> 16) & 0xff
(^u8)(v + 3)^ = (t >> 24) & 0xff
(^u8)(v + 4)^ = (t >> 32) & 0xff
(^u8)(v + 5)^ = (t >> 40) & 0xff
(^u8)(v + 6)^ = (t >> 48) & 0xff
(^u8)(v + 7)^ = (t >> 56) & 0xff
}
}
when ODIN_ENDIAN != .Little {
for i = 0; i < 25; i += 1 {
st[i] = bits.byte_swap(st[i])
}
}
}
init :: proc "contextless" (c: ^Sha3_Context) {
for i := 0; i < 25; i += 1 {
c.st.q[i] = 0
}
c.rsiz = 200 - 2 * c.mdlen
init :: proc(c: ^Sha3_Context) {
for i := 0; i < 25; i += 1 {
c.st.q[i] = 0
}
c.rsiz = 200 - 2 * c.mdlen
c.pt = 0
c.is_initialized = true
c.is_finalized = false
}
update :: proc "contextless" (c: ^Sha3_Context, data: []byte) {
j := c.pt
for i := 0; i < len(data); i += 1 {
c.st.b[j] ~= data[i]
j += 1
if j >= c.rsiz {
keccakf(&c.st.q)
j = 0
}
}
c.pt = j
update :: proc(c: ^Sha3_Context, data: []byte) {
assert(c.is_initialized)
assert(!c.is_finalized)
j := c.pt
for i := 0; i < len(data); i += 1 {
c.st.b[j] ~= data[i]
j += 1
if j >= c.rsiz {
keccakf(&c.st.q)
j = 0
}
}
c.pt = j
}
final :: proc "contextless" (c: ^Sha3_Context, hash: []byte) {
if c.is_keccak {
c.st.b[c.pt] ~= 0x01
} else {
c.st.b[c.pt] ~= 0x06
}
c.st.b[c.rsiz - 1] ~= 0x80
keccakf(&c.st.q)
for i := 0; i < c.mdlen; i += 1 {
hash[i] = c.st.b[i]
}
final :: proc(c: ^Sha3_Context, hash: []byte) {
assert(c.is_initialized)
if len(hash) < c.mdlen {
if c.is_keccak {
panic("crypto/keccac: invalid destination digest size")
}
panic("crypto/sha3: invalid destination digest size")
}
if c.is_keccak {
c.st.b[c.pt] ~= 0x01
} else {
c.st.b[c.pt] ~= 0x06
}
c.st.b[c.rsiz - 1] ~= 0x80
keccakf(&c.st.q)
for i := 0; i < c.mdlen; i += 1 {
hash[i] = c.st.b[i]
}
c.is_initialized = false // No more absorb, no more squeeze.
}
shake_xof :: proc "contextless" (c: ^Sha3_Context) {
c.st.b[c.pt] ~= 0x1F
c.st.b[c.rsiz - 1] ~= 0x80
keccakf(&c.st.q)
c.pt = 0
shake_xof :: proc(c: ^Sha3_Context) {
assert(c.is_initialized)
assert(!c.is_finalized)
c.st.b[c.pt] ~= 0x1F
c.st.b[c.rsiz - 1] ~= 0x80
keccakf(&c.st.q)
c.pt = 0
c.is_finalized = true // No more absorb, unlimited squeeze.
}
shake_out :: proc "contextless" (c: ^Sha3_Context, hash: []byte) {
j := c.pt
for i := 0; i < len(hash); i += 1 {
if j >= c.rsiz {
keccakf(&c.st.q)
j = 0
}
hash[i] = c.st.b[j]
j += 1
}
c.pt = j
shake_out :: proc(c: ^Sha3_Context, hash: []byte) {
assert(c.is_initialized)
assert(c.is_finalized)
j := c.pt
for i := 0; i < len(hash); i += 1 {
if j >= c.rsiz {
keccakf(&c.st.q)
j = 0
}
hash[i] = c.st.b[j]
j += 1
}
c.pt = j
}

View File

@@ -1,410 +0,0 @@
package _tiger
/*
Copyright 2021 zhibog
Made available under the BSD-3 license.
List of contributors:
zhibog, dotbmp: Initial implementation.
Implementation of the Tiger hashing algorithm, as defined in <https://www.cs.technion.ac.il/~biham/Reports/Tiger/>
*/
import "../util"
T1 := [?]u64 {
0x02aab17cf7e90c5e, 0xac424b03e243a8ec, 0x72cd5be30dd5fcd3, 0x6d019b93f6f97f3a,
0xcd9978ffd21f9193, 0x7573a1c9708029e2, 0xb164326b922a83c3, 0x46883eee04915870,
0xeaace3057103ece6, 0xc54169b808a3535c, 0x4ce754918ddec47c, 0x0aa2f4dfdc0df40c,
0x10b76f18a74dbefa, 0xc6ccb6235ad1ab6a, 0x13726121572fe2ff, 0x1a488c6f199d921e,
0x4bc9f9f4da0007ca, 0x26f5e6f6e85241c7, 0x859079dbea5947b6, 0x4f1885c5c99e8c92,
0xd78e761ea96f864b, 0x8e36428c52b5c17d, 0x69cf6827373063c1, 0xb607c93d9bb4c56e,
0x7d820e760e76b5ea, 0x645c9cc6f07fdc42, 0xbf38a078243342e0, 0x5f6b343c9d2e7d04,
0xf2c28aeb600b0ec6, 0x6c0ed85f7254bcac, 0x71592281a4db4fe5, 0x1967fa69ce0fed9f,
0xfd5293f8b96545db, 0xc879e9d7f2a7600b, 0x860248920193194e, 0xa4f9533b2d9cc0b3,
0x9053836c15957613, 0xdb6dcf8afc357bf1, 0x18beea7a7a370f57, 0x037117ca50b99066,
0x6ab30a9774424a35, 0xf4e92f02e325249b, 0x7739db07061ccae1, 0xd8f3b49ceca42a05,
0xbd56be3f51382f73, 0x45faed5843b0bb28, 0x1c813d5c11bf1f83, 0x8af0e4b6d75fa169,
0x33ee18a487ad9999, 0x3c26e8eab1c94410, 0xb510102bc0a822f9, 0x141eef310ce6123b,
0xfc65b90059ddb154, 0xe0158640c5e0e607, 0x884e079826c3a3cf, 0x930d0d9523c535fd,
0x35638d754e9a2b00, 0x4085fccf40469dd5, 0xc4b17ad28be23a4c, 0xcab2f0fc6a3e6a2e,
0x2860971a6b943fcd, 0x3dde6ee212e30446, 0x6222f32ae01765ae, 0x5d550bb5478308fe,
0xa9efa98da0eda22a, 0xc351a71686c40da7, 0x1105586d9c867c84, 0xdcffee85fda22853,
0xccfbd0262c5eef76, 0xbaf294cb8990d201, 0xe69464f52afad975, 0x94b013afdf133e14,
0x06a7d1a32823c958, 0x6f95fe5130f61119, 0xd92ab34e462c06c0, 0xed7bde33887c71d2,
0x79746d6e6518393e, 0x5ba419385d713329, 0x7c1ba6b948a97564, 0x31987c197bfdac67,
0xde6c23c44b053d02, 0x581c49fed002d64d, 0xdd474d6338261571, 0xaa4546c3e473d062,
0x928fce349455f860, 0x48161bbacaab94d9, 0x63912430770e6f68, 0x6ec8a5e602c6641c,
0x87282515337ddd2b, 0x2cda6b42034b701b, 0xb03d37c181cb096d, 0xe108438266c71c6f,
0x2b3180c7eb51b255, 0xdf92b82f96c08bbc, 0x5c68c8c0a632f3ba, 0x5504cc861c3d0556,
0xabbfa4e55fb26b8f, 0x41848b0ab3baceb4, 0xb334a273aa445d32, 0xbca696f0a85ad881,
0x24f6ec65b528d56c, 0x0ce1512e90f4524a, 0x4e9dd79d5506d35a, 0x258905fac6ce9779,
0x2019295b3e109b33, 0xf8a9478b73a054cc, 0x2924f2f934417eb0, 0x3993357d536d1bc4,
0x38a81ac21db6ff8b, 0x47c4fbf17d6016bf, 0x1e0faadd7667e3f5, 0x7abcff62938beb96,
0xa78dad948fc179c9, 0x8f1f98b72911e50d, 0x61e48eae27121a91, 0x4d62f7ad31859808,
0xeceba345ef5ceaeb, 0xf5ceb25ebc9684ce, 0xf633e20cb7f76221, 0xa32cdf06ab8293e4,
0x985a202ca5ee2ca4, 0xcf0b8447cc8a8fb1, 0x9f765244979859a3, 0xa8d516b1a1240017,
0x0bd7ba3ebb5dc726, 0xe54bca55b86adb39, 0x1d7a3afd6c478063, 0x519ec608e7669edd,
0x0e5715a2d149aa23, 0x177d4571848ff194, 0xeeb55f3241014c22, 0x0f5e5ca13a6e2ec2,
0x8029927b75f5c361, 0xad139fabc3d6e436, 0x0d5df1a94ccf402f, 0x3e8bd948bea5dfc8,
0xa5a0d357bd3ff77e, 0xa2d12e251f74f645, 0x66fd9e525e81a082, 0x2e0c90ce7f687a49,
0xc2e8bcbeba973bc5, 0x000001bce509745f, 0x423777bbe6dab3d6, 0xd1661c7eaef06eb5,
0xa1781f354daacfd8, 0x2d11284a2b16affc, 0xf1fc4f67fa891d1f, 0x73ecc25dcb920ada,
0xae610c22c2a12651, 0x96e0a810d356b78a, 0x5a9a381f2fe7870f, 0xd5ad62ede94e5530,
0xd225e5e8368d1427, 0x65977b70c7af4631, 0x99f889b2de39d74f, 0x233f30bf54e1d143,
0x9a9675d3d9a63c97, 0x5470554ff334f9a8, 0x166acb744a4f5688, 0x70c74caab2e4aead,
0xf0d091646f294d12, 0x57b82a89684031d1, 0xefd95a5a61be0b6b, 0x2fbd12e969f2f29a,
0x9bd37013feff9fe8, 0x3f9b0404d6085a06, 0x4940c1f3166cfe15, 0x09542c4dcdf3defb,
0xb4c5218385cd5ce3, 0xc935b7dc4462a641, 0x3417f8a68ed3b63f, 0xb80959295b215b40,
0xf99cdaef3b8c8572, 0x018c0614f8fcb95d, 0x1b14accd1a3acdf3, 0x84d471f200bb732d,
0xc1a3110e95e8da16, 0x430a7220bf1a82b8, 0xb77e090d39df210e, 0x5ef4bd9f3cd05e9d,
0x9d4ff6da7e57a444, 0xda1d60e183d4a5f8, 0xb287c38417998e47, 0xfe3edc121bb31886,
0xc7fe3ccc980ccbef, 0xe46fb590189bfd03, 0x3732fd469a4c57dc, 0x7ef700a07cf1ad65,
0x59c64468a31d8859, 0x762fb0b4d45b61f6, 0x155baed099047718, 0x68755e4c3d50baa6,
0xe9214e7f22d8b4df, 0x2addbf532eac95f4, 0x32ae3909b4bd0109, 0x834df537b08e3450,
0xfa209da84220728d, 0x9e691d9b9efe23f7, 0x0446d288c4ae8d7f, 0x7b4cc524e169785b,
0x21d87f0135ca1385, 0xcebb400f137b8aa5, 0x272e2b66580796be, 0x3612264125c2b0de,
0x057702bdad1efbb2, 0xd4babb8eacf84be9, 0x91583139641bc67b, 0x8bdc2de08036e024,
0x603c8156f49f68ed, 0xf7d236f7dbef5111, 0x9727c4598ad21e80, 0xa08a0896670a5fd7,
0xcb4a8f4309eba9cb, 0x81af564b0f7036a1, 0xc0b99aa778199abd, 0x959f1ec83fc8e952,
0x8c505077794a81b9, 0x3acaaf8f056338f0, 0x07b43f50627a6778, 0x4a44ab49f5eccc77,
0x3bc3d6e4b679ee98, 0x9cc0d4d1cf14108c, 0x4406c00b206bc8a0, 0x82a18854c8d72d89,
0x67e366b35c3c432c, 0xb923dd61102b37f2, 0x56ab2779d884271d, 0xbe83e1b0ff1525af,
0xfb7c65d4217e49a9, 0x6bdbe0e76d48e7d4, 0x08df828745d9179e, 0x22ea6a9add53bd34,
0xe36e141c5622200a, 0x7f805d1b8cb750ee, 0xafe5c7a59f58e837, 0xe27f996a4fb1c23c,
0xd3867dfb0775f0d0, 0xd0e673de6e88891a, 0x123aeb9eafb86c25, 0x30f1d5d5c145b895,
0xbb434a2dee7269e7, 0x78cb67ecf931fa38, 0xf33b0372323bbf9c, 0x52d66336fb279c74,
0x505f33ac0afb4eaa, 0xe8a5cd99a2cce187, 0x534974801e2d30bb, 0x8d2d5711d5876d90,
0x1f1a412891bc038e, 0xd6e2e71d82e56648, 0x74036c3a497732b7, 0x89b67ed96361f5ab,
0xffed95d8f1ea02a2, 0xe72b3bd61464d43d, 0xa6300f170bdc4820, 0xebc18760ed78a77a,
}
T2 := [?]u64 {
0xe6a6be5a05a12138, 0xb5a122a5b4f87c98, 0x563c6089140b6990, 0x4c46cb2e391f5dd5,
0xd932addbc9b79434, 0x08ea70e42015aff5, 0xd765a6673e478cf1, 0xc4fb757eab278d99,
0xdf11c6862d6e0692, 0xddeb84f10d7f3b16, 0x6f2ef604a665ea04, 0x4a8e0f0ff0e0dfb3,
0xa5edeef83dbcba51, 0xfc4f0a2a0ea4371e, 0xe83e1da85cb38429, 0xdc8ff882ba1b1ce2,
0xcd45505e8353e80d, 0x18d19a00d4db0717, 0x34a0cfeda5f38101, 0x0be77e518887caf2,
0x1e341438b3c45136, 0xe05797f49089ccf9, 0xffd23f9df2591d14, 0x543dda228595c5cd,
0x661f81fd99052a33, 0x8736e641db0f7b76, 0x15227725418e5307, 0xe25f7f46162eb2fa,
0x48a8b2126c13d9fe, 0xafdc541792e76eea, 0x03d912bfc6d1898f, 0x31b1aafa1b83f51b,
0xf1ac2796e42ab7d9, 0x40a3a7d7fcd2ebac, 0x1056136d0afbbcc5, 0x7889e1dd9a6d0c85,
0xd33525782a7974aa, 0xa7e25d09078ac09b, 0xbd4138b3eac6edd0, 0x920abfbe71eb9e70,
0xa2a5d0f54fc2625c, 0xc054e36b0b1290a3, 0xf6dd59ff62fe932b, 0x3537354511a8ac7d,
0xca845e9172fadcd4, 0x84f82b60329d20dc, 0x79c62ce1cd672f18, 0x8b09a2add124642c,
0xd0c1e96a19d9e726, 0x5a786a9b4ba9500c, 0x0e020336634c43f3, 0xc17b474aeb66d822,
0x6a731ae3ec9baac2, 0x8226667ae0840258, 0x67d4567691caeca5, 0x1d94155c4875adb5,
0x6d00fd985b813fdf, 0x51286efcb774cd06, 0x5e8834471fa744af, 0xf72ca0aee761ae2e,
0xbe40e4cdaee8e09a, 0xe9970bbb5118f665, 0x726e4beb33df1964, 0x703b000729199762,
0x4631d816f5ef30a7, 0xb880b5b51504a6be, 0x641793c37ed84b6c, 0x7b21ed77f6e97d96,
0x776306312ef96b73, 0xae528948e86ff3f4, 0x53dbd7f286a3f8f8, 0x16cadce74cfc1063,
0x005c19bdfa52c6dd, 0x68868f5d64d46ad3, 0x3a9d512ccf1e186a, 0x367e62c2385660ae,
0xe359e7ea77dcb1d7, 0x526c0773749abe6e, 0x735ae5f9d09f734b, 0x493fc7cc8a558ba8,
0xb0b9c1533041ab45, 0x321958ba470a59bd, 0x852db00b5f46c393, 0x91209b2bd336b0e5,
0x6e604f7d659ef19f, 0xb99a8ae2782ccb24, 0xccf52ab6c814c4c7, 0x4727d9afbe11727b,
0x7e950d0c0121b34d, 0x756f435670ad471f, 0xf5add442615a6849, 0x4e87e09980b9957a,
0x2acfa1df50aee355, 0xd898263afd2fd556, 0xc8f4924dd80c8fd6, 0xcf99ca3d754a173a,
0xfe477bacaf91bf3c, 0xed5371f6d690c12d, 0x831a5c285e687094, 0xc5d3c90a3708a0a4,
0x0f7f903717d06580, 0x19f9bb13b8fdf27f, 0xb1bd6f1b4d502843, 0x1c761ba38fff4012,
0x0d1530c4e2e21f3b, 0x8943ce69a7372c8a, 0xe5184e11feb5ce66, 0x618bdb80bd736621,
0x7d29bad68b574d0b, 0x81bb613e25e6fe5b, 0x071c9c10bc07913f, 0xc7beeb7909ac2d97,
0xc3e58d353bc5d757, 0xeb017892f38f61e8, 0xd4effb9c9b1cc21a, 0x99727d26f494f7ab,
0xa3e063a2956b3e03, 0x9d4a8b9a4aa09c30, 0x3f6ab7d500090fb4, 0x9cc0f2a057268ac0,
0x3dee9d2dedbf42d1, 0x330f49c87960a972, 0xc6b2720287421b41, 0x0ac59ec07c00369c,
0xef4eac49cb353425, 0xf450244eef0129d8, 0x8acc46e5caf4deb6, 0x2ffeab63989263f7,
0x8f7cb9fe5d7a4578, 0x5bd8f7644e634635, 0x427a7315bf2dc900, 0x17d0c4aa2125261c,
0x3992486c93518e50, 0xb4cbfee0a2d7d4c3, 0x7c75d6202c5ddd8d, 0xdbc295d8e35b6c61,
0x60b369d302032b19, 0xce42685fdce44132, 0x06f3ddb9ddf65610, 0x8ea4d21db5e148f0,
0x20b0fce62fcd496f, 0x2c1b912358b0ee31, 0xb28317b818f5a308, 0xa89c1e189ca6d2cf,
0x0c6b18576aaadbc8, 0xb65deaa91299fae3, 0xfb2b794b7f1027e7, 0x04e4317f443b5beb,
0x4b852d325939d0a6, 0xd5ae6beefb207ffc, 0x309682b281c7d374, 0xbae309a194c3b475,
0x8cc3f97b13b49f05, 0x98a9422ff8293967, 0x244b16b01076ff7c, 0xf8bf571c663d67ee,
0x1f0d6758eee30da1, 0xc9b611d97adeb9b7, 0xb7afd5887b6c57a2, 0x6290ae846b984fe1,
0x94df4cdeacc1a5fd, 0x058a5bd1c5483aff, 0x63166cc142ba3c37, 0x8db8526eb2f76f40,
0xe10880036f0d6d4e, 0x9e0523c9971d311d, 0x45ec2824cc7cd691, 0x575b8359e62382c9,
0xfa9e400dc4889995, 0xd1823ecb45721568, 0xdafd983b8206082f, 0xaa7d29082386a8cb,
0x269fcd4403b87588, 0x1b91f5f728bdd1e0, 0xe4669f39040201f6, 0x7a1d7c218cf04ade,
0x65623c29d79ce5ce, 0x2368449096c00bb1, 0xab9bf1879da503ba, 0xbc23ecb1a458058e,
0x9a58df01bb401ecc, 0xa070e868a85f143d, 0x4ff188307df2239e, 0x14d565b41a641183,
0xee13337452701602, 0x950e3dcf3f285e09, 0x59930254b9c80953, 0x3bf299408930da6d,
0xa955943f53691387, 0xa15edecaa9cb8784, 0x29142127352be9a0, 0x76f0371fff4e7afb,
0x0239f450274f2228, 0xbb073af01d5e868b, 0xbfc80571c10e96c1, 0xd267088568222e23,
0x9671a3d48e80b5b0, 0x55b5d38ae193bb81, 0x693ae2d0a18b04b8, 0x5c48b4ecadd5335f,
0xfd743b194916a1ca, 0x2577018134be98c4, 0xe77987e83c54a4ad, 0x28e11014da33e1b9,
0x270cc59e226aa213, 0x71495f756d1a5f60, 0x9be853fb60afef77, 0xadc786a7f7443dbf,
0x0904456173b29a82, 0x58bc7a66c232bd5e, 0xf306558c673ac8b2, 0x41f639c6b6c9772a,
0x216defe99fda35da, 0x11640cc71c7be615, 0x93c43694565c5527, 0xea038e6246777839,
0xf9abf3ce5a3e2469, 0x741e768d0fd312d2, 0x0144b883ced652c6, 0xc20b5a5ba33f8552,
0x1ae69633c3435a9d, 0x97a28ca4088cfdec, 0x8824a43c1e96f420, 0x37612fa66eeea746,
0x6b4cb165f9cf0e5a, 0x43aa1c06a0abfb4a, 0x7f4dc26ff162796b, 0x6cbacc8e54ed9b0f,
0xa6b7ffefd2bb253e, 0x2e25bc95b0a29d4f, 0x86d6a58bdef1388c, 0xded74ac576b6f054,
0x8030bdbc2b45805d, 0x3c81af70e94d9289, 0x3eff6dda9e3100db, 0xb38dc39fdfcc8847,
0x123885528d17b87e, 0xf2da0ed240b1b642, 0x44cefadcd54bf9a9, 0x1312200e433c7ee6,
0x9ffcc84f3a78c748, 0xf0cd1f72248576bb, 0xec6974053638cfe4, 0x2ba7b67c0cec4e4c,
0xac2f4df3e5ce32ed, 0xcb33d14326ea4c11, 0xa4e9044cc77e58bc, 0x5f513293d934fcef,
0x5dc9645506e55444, 0x50de418f317de40a, 0x388cb31a69dde259, 0x2db4a83455820a86,
0x9010a91e84711ae9, 0x4df7f0b7b1498371, 0xd62a2eabc0977179, 0x22fac097aa8d5c0e,
}
T3 := [?]u64 {
0xf49fcc2ff1daf39b, 0x487fd5c66ff29281, 0xe8a30667fcdca83f, 0x2c9b4be3d2fcce63,
0xda3ff74b93fbbbc2, 0x2fa165d2fe70ba66, 0xa103e279970e93d4, 0xbecdec77b0e45e71,
0xcfb41e723985e497, 0xb70aaa025ef75017, 0xd42309f03840b8e0, 0x8efc1ad035898579,
0x96c6920be2b2abc5, 0x66af4163375a9172, 0x2174abdcca7127fb, 0xb33ccea64a72ff41,
0xf04a4933083066a5, 0x8d970acdd7289af5, 0x8f96e8e031c8c25e, 0xf3fec02276875d47,
0xec7bf310056190dd, 0xf5adb0aebb0f1491, 0x9b50f8850fd58892, 0x4975488358b74de8,
0xa3354ff691531c61, 0x0702bbe481d2c6ee, 0x89fb24057deded98, 0xac3075138596e902,
0x1d2d3580172772ed, 0xeb738fc28e6bc30d, 0x5854ef8f63044326, 0x9e5c52325add3bbe,
0x90aa53cf325c4623, 0xc1d24d51349dd067, 0x2051cfeea69ea624, 0x13220f0a862e7e4f,
0xce39399404e04864, 0xd9c42ca47086fcb7, 0x685ad2238a03e7cc, 0x066484b2ab2ff1db,
0xfe9d5d70efbf79ec, 0x5b13b9dd9c481854, 0x15f0d475ed1509ad, 0x0bebcd060ec79851,
0xd58c6791183ab7f8, 0xd1187c5052f3eee4, 0xc95d1192e54e82ff, 0x86eea14cb9ac6ca2,
0x3485beb153677d5d, 0xdd191d781f8c492a, 0xf60866baa784ebf9, 0x518f643ba2d08c74,
0x8852e956e1087c22, 0xa768cb8dc410ae8d, 0x38047726bfec8e1a, 0xa67738b4cd3b45aa,
0xad16691cec0dde19, 0xc6d4319380462e07, 0xc5a5876d0ba61938, 0x16b9fa1fa58fd840,
0x188ab1173ca74f18, 0xabda2f98c99c021f, 0x3e0580ab134ae816, 0x5f3b05b773645abb,
0x2501a2be5575f2f6, 0x1b2f74004e7e8ba9, 0x1cd7580371e8d953, 0x7f6ed89562764e30,
0xb15926ff596f003d, 0x9f65293da8c5d6b9, 0x6ecef04dd690f84c, 0x4782275fff33af88,
0xe41433083f820801, 0xfd0dfe409a1af9b5, 0x4325a3342cdb396b, 0x8ae77e62b301b252,
0xc36f9e9f6655615a, 0x85455a2d92d32c09, 0xf2c7dea949477485, 0x63cfb4c133a39eba,
0x83b040cc6ebc5462, 0x3b9454c8fdb326b0, 0x56f56a9e87ffd78c, 0x2dc2940d99f42bc6,
0x98f7df096b096e2d, 0x19a6e01e3ad852bf, 0x42a99ccbdbd4b40b, 0xa59998af45e9c559,
0x366295e807d93186, 0x6b48181bfaa1f773, 0x1fec57e2157a0a1d, 0x4667446af6201ad5,
0xe615ebcacfb0f075, 0xb8f31f4f68290778, 0x22713ed6ce22d11e, 0x3057c1a72ec3c93b,
0xcb46acc37c3f1f2f, 0xdbb893fd02aaf50e, 0x331fd92e600b9fcf, 0xa498f96148ea3ad6,
0xa8d8426e8b6a83ea, 0xa089b274b7735cdc, 0x87f6b3731e524a11, 0x118808e5cbc96749,
0x9906e4c7b19bd394, 0xafed7f7e9b24a20c, 0x6509eadeeb3644a7, 0x6c1ef1d3e8ef0ede,
0xb9c97d43e9798fb4, 0xa2f2d784740c28a3, 0x7b8496476197566f, 0x7a5be3e6b65f069d,
0xf96330ed78be6f10, 0xeee60de77a076a15, 0x2b4bee4aa08b9bd0, 0x6a56a63ec7b8894e,
0x02121359ba34fef4, 0x4cbf99f8283703fc, 0x398071350caf30c8, 0xd0a77a89f017687a,
0xf1c1a9eb9e423569, 0x8c7976282dee8199, 0x5d1737a5dd1f7abd, 0x4f53433c09a9fa80,
0xfa8b0c53df7ca1d9, 0x3fd9dcbc886ccb77, 0xc040917ca91b4720, 0x7dd00142f9d1dcdf,
0x8476fc1d4f387b58, 0x23f8e7c5f3316503, 0x032a2244e7e37339, 0x5c87a5d750f5a74b,
0x082b4cc43698992e, 0xdf917becb858f63c, 0x3270b8fc5bf86dda, 0x10ae72bb29b5dd76,
0x576ac94e7700362b, 0x1ad112dac61efb8f, 0x691bc30ec5faa427, 0xff246311cc327143,
0x3142368e30e53206, 0x71380e31e02ca396, 0x958d5c960aad76f1, 0xf8d6f430c16da536,
0xc8ffd13f1be7e1d2, 0x7578ae66004ddbe1, 0x05833f01067be646, 0xbb34b5ad3bfe586d,
0x095f34c9a12b97f0, 0x247ab64525d60ca8, 0xdcdbc6f3017477d1, 0x4a2e14d4decad24d,
0xbdb5e6d9be0a1eeb, 0x2a7e70f7794301ab, 0xdef42d8a270540fd, 0x01078ec0a34c22c1,
0xe5de511af4c16387, 0x7ebb3a52bd9a330a, 0x77697857aa7d6435, 0x004e831603ae4c32,
0xe7a21020ad78e312, 0x9d41a70c6ab420f2, 0x28e06c18ea1141e6, 0xd2b28cbd984f6b28,
0x26b75f6c446e9d83, 0xba47568c4d418d7f, 0xd80badbfe6183d8e, 0x0e206d7f5f166044,
0xe258a43911cbca3e, 0x723a1746b21dc0bc, 0xc7caa854f5d7cdd3, 0x7cac32883d261d9c,
0x7690c26423ba942c, 0x17e55524478042b8, 0xe0be477656a2389f, 0x4d289b5e67ab2da0,
0x44862b9c8fbbfd31, 0xb47cc8049d141365, 0x822c1b362b91c793, 0x4eb14655fb13dfd8,
0x1ecbba0714e2a97b, 0x6143459d5cde5f14, 0x53a8fbf1d5f0ac89, 0x97ea04d81c5e5b00,
0x622181a8d4fdb3f3, 0xe9bcd341572a1208, 0x1411258643cce58a, 0x9144c5fea4c6e0a4,
0x0d33d06565cf620f, 0x54a48d489f219ca1, 0xc43e5eac6d63c821, 0xa9728b3a72770daf,
0xd7934e7b20df87ef, 0xe35503b61a3e86e5, 0xcae321fbc819d504, 0x129a50b3ac60bfa6,
0xcd5e68ea7e9fb6c3, 0xb01c90199483b1c7, 0x3de93cd5c295376c, 0xaed52edf2ab9ad13,
0x2e60f512c0a07884, 0xbc3d86a3e36210c9, 0x35269d9b163951ce, 0x0c7d6e2ad0cdb5fa,
0x59e86297d87f5733, 0x298ef221898db0e7, 0x55000029d1a5aa7e, 0x8bc08ae1b5061b45,
0xc2c31c2b6c92703a, 0x94cc596baf25ef42, 0x0a1d73db22540456, 0x04b6a0f9d9c4179a,
0xeffdafa2ae3d3c60, 0xf7c8075bb49496c4, 0x9cc5c7141d1cd4e3, 0x78bd1638218e5534,
0xb2f11568f850246a, 0xedfabcfa9502bc29, 0x796ce5f2da23051b, 0xaae128b0dc93537c,
0x3a493da0ee4b29ae, 0xb5df6b2c416895d7, 0xfcabbd25122d7f37, 0x70810b58105dc4b1,
0xe10fdd37f7882a90, 0x524dcab5518a3f5c, 0x3c9e85878451255b, 0x4029828119bd34e2,
0x74a05b6f5d3ceccb, 0xb610021542e13eca, 0x0ff979d12f59e2ac, 0x6037da27e4f9cc50,
0x5e92975a0df1847d, 0xd66de190d3e623fe, 0x5032d6b87b568048, 0x9a36b7ce8235216e,
0x80272a7a24f64b4a, 0x93efed8b8c6916f7, 0x37ddbff44cce1555, 0x4b95db5d4b99bd25,
0x92d3fda169812fc0, 0xfb1a4a9a90660bb6, 0x730c196946a4b9b2, 0x81e289aa7f49da68,
0x64669a0f83b1a05f, 0x27b3ff7d9644f48b, 0xcc6b615c8db675b3, 0x674f20b9bcebbe95,
0x6f31238275655982, 0x5ae488713e45cf05, 0xbf619f9954c21157, 0xeabac46040a8eae9,
0x454c6fe9f2c0c1cd, 0x419cf6496412691c, 0xd3dc3bef265b0f70, 0x6d0e60f5c3578a9e,
}
T4 := [?]u64 {
0x5b0e608526323c55, 0x1a46c1a9fa1b59f5, 0xa9e245a17c4c8ffa, 0x65ca5159db2955d7,
0x05db0a76ce35afc2, 0x81eac77ea9113d45, 0x528ef88ab6ac0a0d, 0xa09ea253597be3ff,
0x430ddfb3ac48cd56, 0xc4b3a67af45ce46f, 0x4ececfd8fbe2d05e, 0x3ef56f10b39935f0,
0x0b22d6829cd619c6, 0x17fd460a74df2069, 0x6cf8cc8e8510ed40, 0xd6c824bf3a6ecaa7,
0x61243d581a817049, 0x048bacb6bbc163a2, 0xd9a38ac27d44cc32, 0x7fddff5baaf410ab,
0xad6d495aa804824b, 0xe1a6a74f2d8c9f94, 0xd4f7851235dee8e3, 0xfd4b7f886540d893,
0x247c20042aa4bfda, 0x096ea1c517d1327c, 0xd56966b4361a6685, 0x277da5c31221057d,
0x94d59893a43acff7, 0x64f0c51ccdc02281, 0x3d33bcc4ff6189db, 0xe005cb184ce66af1,
0xff5ccd1d1db99bea, 0xb0b854a7fe42980f, 0x7bd46a6a718d4b9f, 0xd10fa8cc22a5fd8c,
0xd31484952be4bd31, 0xc7fa975fcb243847, 0x4886ed1e5846c407, 0x28cddb791eb70b04,
0xc2b00be2f573417f, 0x5c9590452180f877, 0x7a6bddfff370eb00, 0xce509e38d6d9d6a4,
0xebeb0f00647fa702, 0x1dcc06cf76606f06, 0xe4d9f28ba286ff0a, 0xd85a305dc918c262,
0x475b1d8732225f54, 0x2d4fb51668ccb5fe, 0xa679b9d9d72bba20, 0x53841c0d912d43a5,
0x3b7eaa48bf12a4e8, 0x781e0e47f22f1ddf, 0xeff20ce60ab50973, 0x20d261d19dffb742,
0x16a12b03062a2e39, 0x1960eb2239650495, 0x251c16fed50eb8b8, 0x9ac0c330f826016e,
0xed152665953e7671, 0x02d63194a6369570, 0x5074f08394b1c987, 0x70ba598c90b25ce1,
0x794a15810b9742f6, 0x0d5925e9fcaf8c6c, 0x3067716cd868744e, 0x910ab077e8d7731b,
0x6a61bbdb5ac42f61, 0x93513efbf0851567, 0xf494724b9e83e9d5, 0xe887e1985c09648d,
0x34b1d3c675370cfd, 0xdc35e433bc0d255d, 0xd0aab84234131be0, 0x08042a50b48b7eaf,
0x9997c4ee44a3ab35, 0x829a7b49201799d0, 0x263b8307b7c54441, 0x752f95f4fd6a6ca6,
0x927217402c08c6e5, 0x2a8ab754a795d9ee, 0xa442f7552f72943d, 0x2c31334e19781208,
0x4fa98d7ceaee6291, 0x55c3862f665db309, 0xbd0610175d53b1f3, 0x46fe6cb840413f27,
0x3fe03792df0cfa59, 0xcfe700372eb85e8f, 0xa7be29e7adbce118, 0xe544ee5cde8431dd,
0x8a781b1b41f1873e, 0xa5c94c78a0d2f0e7, 0x39412e2877b60728, 0xa1265ef3afc9a62c,
0xbcc2770c6a2506c5, 0x3ab66dd5dce1ce12, 0xe65499d04a675b37, 0x7d8f523481bfd216,
0x0f6f64fcec15f389, 0x74efbe618b5b13c8, 0xacdc82b714273e1d, 0xdd40bfe003199d17,
0x37e99257e7e061f8, 0xfa52626904775aaa, 0x8bbbf63a463d56f9, 0xf0013f1543a26e64,
0xa8307e9f879ec898, 0xcc4c27a4150177cc, 0x1b432f2cca1d3348, 0xde1d1f8f9f6fa013,
0x606602a047a7ddd6, 0xd237ab64cc1cb2c7, 0x9b938e7225fcd1d3, 0xec4e03708e0ff476,
0xfeb2fbda3d03c12d, 0xae0bced2ee43889a, 0x22cb8923ebfb4f43, 0x69360d013cf7396d,
0x855e3602d2d4e022, 0x073805bad01f784c, 0x33e17a133852f546, 0xdf4874058ac7b638,
0xba92b29c678aa14a, 0x0ce89fc76cfaadcd, 0x5f9d4e0908339e34, 0xf1afe9291f5923b9,
0x6e3480f60f4a265f, 0xeebf3a2ab29b841c, 0xe21938a88f91b4ad, 0x57dfeff845c6d3c3,
0x2f006b0bf62caaf2, 0x62f479ef6f75ee78, 0x11a55ad41c8916a9, 0xf229d29084fed453,
0x42f1c27b16b000e6, 0x2b1f76749823c074, 0x4b76eca3c2745360, 0x8c98f463b91691bd,
0x14bcc93cf1ade66a, 0x8885213e6d458397, 0x8e177df0274d4711, 0xb49b73b5503f2951,
0x10168168c3f96b6b, 0x0e3d963b63cab0ae, 0x8dfc4b5655a1db14, 0xf789f1356e14de5c,
0x683e68af4e51dac1, 0xc9a84f9d8d4b0fd9, 0x3691e03f52a0f9d1, 0x5ed86e46e1878e80,
0x3c711a0e99d07150, 0x5a0865b20c4e9310, 0x56fbfc1fe4f0682e, 0xea8d5de3105edf9b,
0x71abfdb12379187a, 0x2eb99de1bee77b9c, 0x21ecc0ea33cf4523, 0x59a4d7521805c7a1,
0x3896f5eb56ae7c72, 0xaa638f3db18f75dc, 0x9f39358dabe9808e, 0xb7defa91c00b72ac,
0x6b5541fd62492d92, 0x6dc6dee8f92e4d5b, 0x353f57abc4beea7e, 0x735769d6da5690ce,
0x0a234aa642391484, 0xf6f9508028f80d9d, 0xb8e319a27ab3f215, 0x31ad9c1151341a4d,
0x773c22a57bef5805, 0x45c7561a07968633, 0xf913da9e249dbe36, 0xda652d9b78a64c68,
0x4c27a97f3bc334ef, 0x76621220e66b17f4, 0x967743899acd7d0b, 0xf3ee5bcae0ed6782,
0x409f753600c879fc, 0x06d09a39b5926db6, 0x6f83aeb0317ac588, 0x01e6ca4a86381f21,
0x66ff3462d19f3025, 0x72207c24ddfd3bfb, 0x4af6b6d3e2ece2eb, 0x9c994dbec7ea08de,
0x49ace597b09a8bc4, 0xb38c4766cf0797ba, 0x131b9373c57c2a75, 0xb1822cce61931e58,
0x9d7555b909ba1c0c, 0x127fafdd937d11d2, 0x29da3badc66d92e4, 0xa2c1d57154c2ecbc,
0x58c5134d82f6fe24, 0x1c3ae3515b62274f, 0xe907c82e01cb8126, 0xf8ed091913e37fcb,
0x3249d8f9c80046c9, 0x80cf9bede388fb63, 0x1881539a116cf19e, 0x5103f3f76bd52457,
0x15b7e6f5ae47f7a8, 0xdbd7c6ded47e9ccf, 0x44e55c410228bb1a, 0xb647d4255edb4e99,
0x5d11882bb8aafc30, 0xf5098bbb29d3212a, 0x8fb5ea14e90296b3, 0x677b942157dd025a,
0xfb58e7c0a390acb5, 0x89d3674c83bd4a01, 0x9e2da4df4bf3b93b, 0xfcc41e328cab4829,
0x03f38c96ba582c52, 0xcad1bdbd7fd85db2, 0xbbb442c16082ae83, 0xb95fe86ba5da9ab0,
0xb22e04673771a93f, 0x845358c9493152d8, 0xbe2a488697b4541e, 0x95a2dc2dd38e6966,
0xc02c11ac923c852b, 0x2388b1990df2a87b, 0x7c8008fa1b4f37be, 0x1f70d0c84d54e503,
0x5490adec7ece57d4, 0x002b3c27d9063a3a, 0x7eaea3848030a2bf, 0xc602326ded2003c0,
0x83a7287d69a94086, 0xc57a5fcb30f57a8a, 0xb56844e479ebe779, 0xa373b40f05dcbce9,
0xd71a786e88570ee2, 0x879cbacdbde8f6a0, 0x976ad1bcc164a32f, 0xab21e25e9666d78b,
0x901063aae5e5c33c, 0x9818b34448698d90, 0xe36487ae3e1e8abb, 0xafbdf931893bdcb4,
0x6345a0dc5fbbd519, 0x8628fe269b9465ca, 0x1e5d01603f9c51ec, 0x4de44006a15049b7,
0xbf6c70e5f776cbb1, 0x411218f2ef552bed, 0xcb0c0708705a36a3, 0xe74d14754f986044,
0xcd56d9430ea8280e, 0xc12591d7535f5065, 0xc83223f1720aef96, 0xc3a0396f7363a51f,
}
Tiger_Context :: struct {
a: u64,
b: u64,
c: u64,
x: [64]byte,
nx: int,
length: u64,
ver: int,
}
round :: #force_inline proc "contextless" (a, b, c, x, mul: u64) -> (u64, u64, u64) {
a, b, c := a, b, c
c ~= x
a -= T1[c & 0xff] ~ T2[(c >> 16) & 0xff] ~ T3[(c >> 32) & 0xff] ~ T4[(c >> 48) & 0xff]
b += T4[(c >> 8) & 0xff] ~ T3[(c >> 24) & 0xff] ~ T2[(c >> 40) & 0xff] ~ T1[(c >> 56) & 0xff]
b *= mul
return a, b, c
}
pass :: #force_inline proc "contextless" (a, b, c: u64, d: []u64, mul: u64) -> (x, y, z: u64) {
x, y, z = round(a, b, c, d[0], mul)
y, z, x = round(y, z, x, d[1], mul)
z, x, y = round(z, x, y, d[2], mul)
x, y, z = round(x, y, z, d[3], mul)
y, z, x = round(y, z, x, d[4], mul)
z, x, y = round(z, x, y, d[5], mul)
x, y, z = round(x, y, z, d[6], mul)
y, z, x = round(y, z, x, d[7], mul)
return
}
key_schedule :: #force_inline proc "contextless" (x: []u64) {
x[0] -= x[7] ~ 0xa5a5a5a5a5a5a5a5
x[1] ~= x[0]
x[2] += x[1]
x[3] -= x[2] ~ ((~x[1]) << 19)
x[4] ~= x[3]
x[5] += x[4]
x[6] -= x[5] ~ ((~x[4]) >> 23)
x[7] ~= x[6]
x[0] += x[7]
x[1] -= x[0] ~ ((~x[7]) << 19)
x[2] ~= x[1]
x[3] += x[2]
x[4] -= x[3] ~ ((~x[2]) >> 23)
x[5] ~= x[4]
x[6] += x[5]
x[7] -= x[6] ~ 0x0123456789abcdef
}
compress :: #force_inline proc "contextless" (ctx: ^Tiger_Context, data: []byte) {
a := ctx.a
b := ctx.b
c := ctx.c
x := util.cast_slice([]u64, data)
ctx.a, ctx.b, ctx.c = pass(ctx.a, ctx.b, ctx.c, x, 5)
key_schedule(x)
ctx.c, ctx.a, ctx.b = pass(ctx.c, ctx.a, ctx.b, x, 7)
key_schedule(x)
ctx.b, ctx.c, ctx.a = pass(ctx.b, ctx.c, ctx.a, x, 9)
ctx.a ~= a
ctx.b -= b
ctx.c += c
}
init :: proc "contextless" (ctx: ^Tiger_Context) {
ctx.a = 0x0123456789abcdef
ctx.b = 0xfedcba9876543210
ctx.c = 0xf096a5b4c3b2e187
}
update :: proc(ctx: ^Tiger_Context, input: []byte) {
p := make([]byte, len(input))
copy(p, input)
length := len(p)
ctx.length += u64(length)
if ctx.nx > 0 {
n := len(p)
if n > 64 - ctx.nx {
n = 64 - ctx.nx
}
copy(ctx.x[ctx.nx:ctx.nx + n], p[:n])
ctx.nx += n
if ctx.nx == 64 {
compress(ctx, ctx.x[:64 - 1])
ctx.nx = 0
}
p = p[n:]
}
for len(p) >= 64 {
compress(ctx, p[:64])
p = p[64:]
}
if len(p) > 0 {
ctx.nx = copy(ctx.x[:], p)
}
}
final :: proc(ctx: ^Tiger_Context, hash: []byte) {
length := ctx.length
tmp: [64]byte
if ctx.ver == 1 {
tmp[0] = 0x01
} else {
tmp[0] = 0x80
}
size := length & 0x3f
if size < 56 {
update(ctx, tmp[:56 - size])
} else {
update(ctx, tmp[:64 + 56 - size])
}
length <<= 3
for i := uint(0); i < 8; i += 1 {
tmp[i] = byte(length >> (8 * i))
}
update(ctx, tmp[:8])
for i := uint(0); i < 8; i += 1 {
tmp[i] = byte(ctx.a >> (8 * i))
tmp[i + 8] = byte(ctx.b >> (8 * i))
tmp[i + 16] = byte(ctx.c >> (8 * i))
}
copy(hash[:], tmp[:len(hash)])
}

View File

@@ -1,726 +0,0 @@
package blake
/*
Copyright 2021 zhibog
Made available under the BSD-3 license.
List of contributors:
zhibog, dotbmp: Initial implementation.
Implementation of the BLAKE hashing algorithm, as defined in <https://web.archive.org/web/20190915215948/https://131002.net/blake>
*/
import "core:os"
import "core:io"
/*
High level API
*/
DIGEST_SIZE_224 :: 28
DIGEST_SIZE_256 :: 32
DIGEST_SIZE_384 :: 48
DIGEST_SIZE_512 :: 64
// hash_string_224 will hash the given input and return the
// computed hash
hash_string_224 :: proc "contextless" (data: string) -> [DIGEST_SIZE_224]byte {
return hash_bytes_224(transmute([]byte)(data))
}
// hash_bytes_224 will hash the given input and return the
// computed hash
hash_bytes_224 :: proc "contextless" (data: []byte) -> [DIGEST_SIZE_224]byte {
hash: [DIGEST_SIZE_224]byte
ctx: Blake256_Context
ctx.is224 = true
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_224 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_224 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
ctx: Blake256_Context
ctx.is224 = true
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_224 will read the stream in chunks and compute a
// hash from its contents
hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
hash: [DIGEST_SIZE_224]byte
ctx: Blake256_Context
ctx.is224 = true
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_224 will read the file provided by the given handle
// and compute a hash
hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
if !load_at_once {
return hash_stream_224(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_224(buf[:]), ok
}
}
return [DIGEST_SIZE_224]byte{}, false
}
hash_224 :: proc {
hash_stream_224,
hash_file_224,
hash_bytes_224,
hash_string_224,
hash_bytes_to_buffer_224,
hash_string_to_buffer_224,
}
// hash_string_256 will hash the given input and return the
// computed hash
hash_string_256 :: proc "contextless" (data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
hash_bytes_256 :: proc "contextless" (data: []byte) -> [DIGEST_SIZE_256]byte {
hash: [DIGEST_SIZE_256]byte
ctx: Blake256_Context
ctx.is224 = false
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_256 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_256 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
ctx: Blake256_Context
ctx.is224 = false
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
hash: [DIGEST_SIZE_256]byte
ctx: Blake256_Context
ctx.is224 = false
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_256 will read the file provided by the given handle
// and compute a hash
hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_256(buf[:]), ok
}
}
return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
hash_stream_256,
hash_file_256,
hash_bytes_256,
hash_string_256,
hash_bytes_to_buffer_256,
hash_string_to_buffer_256,
}
// hash_string_384 will hash the given input and return the
// computed hash
hash_string_384 :: proc "contextless" (data: string) -> [DIGEST_SIZE_384]byte {
return hash_bytes_384(transmute([]byte)(data))
}
// hash_bytes_384 will hash the given input and return the
// computed hash
hash_bytes_384 :: proc "contextless" (data: []byte) -> [DIGEST_SIZE_384]byte {
hash: [DIGEST_SIZE_384]byte
ctx: Blake512_Context
ctx.is384 = true
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_384 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_384 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
ctx: Blake512_Context
ctx.is384 = true
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_384 will read the stream in chunks and compute a
// hash from its contents
hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
hash: [DIGEST_SIZE_384]byte
ctx: Blake512_Context
ctx.is384 = true
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_384 will read the file provided by the given handle
// and compute a hash
hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
if !load_at_once {
return hash_stream_384(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_384(buf[:]), ok
}
}
return [DIGEST_SIZE_384]byte{}, false
}
hash_384 :: proc {
hash_stream_384,
hash_file_384,
hash_bytes_384,
hash_string_384,
hash_bytes_to_buffer_384,
hash_string_to_buffer_384,
}
// hash_string_512 will hash the given input and return the
// computed hash
hash_string_512 :: proc "contextless" (data: string) -> [DIGEST_SIZE_512]byte {
return hash_bytes_512(transmute([]byte)(data))
}
// hash_bytes_512 will hash the given input and return the
// computed hash
hash_bytes_512 :: proc "contextless" (data: []byte) -> [DIGEST_SIZE_512]byte {
hash: [DIGEST_SIZE_512]byte
ctx: Blake512_Context
ctx.is384 = false
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_512 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_512 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
ctx: Blake512_Context
ctx.is384 = false
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_512 will read the stream in chunks and compute a
// hash from its contents
hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
hash: [DIGEST_SIZE_512]byte
ctx: Blake512_Context
ctx.is384 = false
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_512 will read the file provided by the given handle
// and compute a hash
hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
if !load_at_once {
return hash_stream_512(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_512(buf[:]), ok
}
}
return [DIGEST_SIZE_512]byte{}, false
}
hash_512 :: proc {
hash_stream_512,
hash_file_512,
hash_bytes_512,
hash_string_512,
hash_bytes_to_buffer_512,
hash_string_to_buffer_512,
}
/*
Low level API
*/
init :: proc "contextless" (ctx: ^$T) {
when T == Blake256_Context {
if ctx.is224 {
ctx.h[0] = 0xc1059ed8
ctx.h[1] = 0x367cd507
ctx.h[2] = 0x3070dd17
ctx.h[3] = 0xf70e5939
ctx.h[4] = 0xffc00b31
ctx.h[5] = 0x68581511
ctx.h[6] = 0x64f98fa7
ctx.h[7] = 0xbefa4fa4
} else {
ctx.h[0] = 0x6a09e667
ctx.h[1] = 0xbb67ae85
ctx.h[2] = 0x3c6ef372
ctx.h[3] = 0xa54ff53a
ctx.h[4] = 0x510e527f
ctx.h[5] = 0x9b05688c
ctx.h[6] = 0x1f83d9ab
ctx.h[7] = 0x5be0cd19
}
} else when T == Blake512_Context {
if ctx.is384 {
ctx.h[0] = 0xcbbb9d5dc1059ed8
ctx.h[1] = 0x629a292a367cd507
ctx.h[2] = 0x9159015a3070dd17
ctx.h[3] = 0x152fecd8f70e5939
ctx.h[4] = 0x67332667ffc00b31
ctx.h[5] = 0x8eb44a8768581511
ctx.h[6] = 0xdb0c2e0d64f98fa7
ctx.h[7] = 0x47b5481dbefa4fa4
} else {
ctx.h[0] = 0x6a09e667f3bcc908
ctx.h[1] = 0xbb67ae8584caa73b
ctx.h[2] = 0x3c6ef372fe94f82b
ctx.h[3] = 0xa54ff53a5f1d36f1
ctx.h[4] = 0x510e527fade682d1
ctx.h[5] = 0x9b05688c2b3e6c1f
ctx.h[6] = 0x1f83d9abfb41bd6b
ctx.h[7] = 0x5be0cd19137e2179
}
}
}
update :: proc "contextless" (ctx: ^$T, data: []byte) {
data := data
when T == Blake256_Context {
if ctx.nx > 0 {
n := copy(ctx.x[ctx.nx:], data)
ctx.nx += n
if ctx.nx == BLOCKSIZE_256 {
block256(ctx, ctx.x[:])
ctx.nx = 0
}
data = data[n:]
}
if len(data) >= BLOCKSIZE_256 {
n := len(data) &~ (BLOCKSIZE_256 - 1)
block256(ctx, data[:n])
data = data[n:]
}
if len(data) > 0 {
ctx.nx = copy(ctx.x[:], data)
}
} else when T == Blake512_Context {
if ctx.nx > 0 {
n := copy(ctx.x[ctx.nx:], data)
ctx.nx += n
if ctx.nx == BLOCKSIZE_512 {
block512(ctx, ctx.x[:])
ctx.nx = 0
}
data = data[n:]
}
if len(data) >= BLOCKSIZE_512 {
n := len(data) &~ (BLOCKSIZE_512 - 1)
block512(ctx, data[:n])
data = data[n:]
}
if len(data) > 0 {
ctx.nx = copy(ctx.x[:], data)
}
}
}
final :: proc "contextless" (ctx: ^$T, hash: []byte) {
when T == Blake256_Context {
tmp: [65]byte
} else when T == Blake512_Context {
tmp: [129]byte
}
nx := u64(ctx.nx)
tmp[0] = 0x80
length := (ctx.t + nx) << 3
when T == Blake256_Context {
if nx == 55 {
if ctx.is224 {
write_additional(ctx, {0x80})
} else {
write_additional(ctx, {0x81})
}
} else {
if nx < 55 {
if nx == 0 {
ctx.nullt = true
}
write_additional(ctx, tmp[0 : 55 - nx])
} else {
write_additional(ctx, tmp[0 : 64 - nx])
write_additional(ctx, tmp[1:56])
ctx.nullt = true
}
if ctx.is224 {
write_additional(ctx, {0x00})
} else {
write_additional(ctx, {0x01})
}
}
for i : uint = 0; i < 8; i += 1 {
tmp[i] = byte(length >> (56 - 8 * i))
}
write_additional(ctx, tmp[0:8])
h := ctx.h[:]
if ctx.is224 {
h = h[0:7]
}
for s, i in h {
hash[i * 4] = byte(s >> 24)
hash[i * 4 + 1] = byte(s >> 16)
hash[i * 4 + 2] = byte(s >> 8)
hash[i * 4 + 3] = byte(s)
}
} else when T == Blake512_Context {
if nx == 111 {
if ctx.is384 {
write_additional(ctx, {0x80})
} else {
write_additional(ctx, {0x81})
}
} else {
if nx < 111 {
if nx == 0 {
ctx.nullt = true
}
write_additional(ctx, tmp[0 : 111 - nx])
} else {
write_additional(ctx, tmp[0 : 128 - nx])
write_additional(ctx, tmp[1:112])
ctx.nullt = true
}
if ctx.is384 {
write_additional(ctx, {0x00})
} else {
write_additional(ctx, {0x01})
}
}
for i : uint = 0; i < 16; i += 1 {
tmp[i] = byte(length >> (120 - 8 * i))
}
write_additional(ctx, tmp[0:16])
h := ctx.h[:]
if ctx.is384 {
h = h[0:6]
}
for s, i in h {
hash[i * 8] = byte(s >> 56)
hash[i * 8 + 1] = byte(s >> 48)
hash[i * 8 + 2] = byte(s >> 40)
hash[i * 8 + 3] = byte(s >> 32)
hash[i * 8 + 4] = byte(s >> 24)
hash[i * 8 + 5] = byte(s >> 16)
hash[i * 8 + 6] = byte(s >> 8)
hash[i * 8 + 7] = byte(s)
}
}
}
SIZE_224 :: 28
SIZE_256 :: 32
SIZE_384 :: 48
SIZE_512 :: 64
BLOCKSIZE_256 :: 64
BLOCKSIZE_512 :: 128
Blake256_Context :: struct {
h: [8]u32,
s: [4]u32,
t: u64,
x: [64]byte,
nx: int,
is224: bool,
nullt: bool,
}
Blake512_Context :: struct {
h: [8]u64,
s: [4]u64,
t: u64,
x: [128]byte,
nx: int,
is384: bool,
nullt: bool,
}
SIGMA := [?]int {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3,
11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4,
7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8,
9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13,
2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9,
12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11,
13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10,
6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5,
10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0,
}
U256 := [16]u32 {
0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344,
0xa4093822, 0x299f31d0, 0x082efa98, 0xec4e6c89,
0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917,
}
U512 := [16]u64 {
0x243f6a8885a308d3, 0x13198a2e03707344, 0xa4093822299f31d0, 0x082efa98ec4e6c89,
0x452821e638d01377, 0xbe5466cf34e90c6c, 0xc0ac29b7c97c50dd, 0x3f84d5b5b5470917,
0x9216d5d98979fb1b, 0xd1310ba698dfb5ac, 0x2ffd72dbd01adfb7, 0xb8e1afed6a267e96,
0xba7c9045f12c7f99, 0x24a19947b3916cf7, 0x0801f2e2858efc16, 0x636920d871574e69,
}
G256 :: #force_inline proc "contextless" (a, b, c, d: u32, m: [16]u32, i, j: int) -> (u32, u32, u32, u32) {
a, b, c, d := a, b, c, d
a += m[SIGMA[(i % 10) * 16 + (2 * j)]] ~ U256[SIGMA[(i % 10) * 16 + (2 * j + 1)]]
a += b
d ~= a
d = d << (32 - 16) | d >> 16
c += d
b ~= c
b = b << (32 - 12) | b >> 12
a += m[SIGMA[(i % 10) * 16 + (2 * j + 1)]] ~ U256[SIGMA[(i % 10) * 16 + (2 * j)]]
a += b
d ~= a
d = d << (32 - 8) | d >> 8
c += d
b ~= c
b = b << (32 - 7) | b >> 7
return a, b, c, d
}
G512 :: #force_inline proc "contextless" (a, b, c, d: u64, m: [16]u64, i, j: int) -> (u64, u64, u64, u64) {
a, b, c, d := a, b, c, d
a += m[SIGMA[(i % 10) * 16 + (2 * j)]] ~ U512[SIGMA[(i % 10) * 16 + (2 * j + 1)]]
a += b
d ~= a
d = d << (64 - 32) | d >> 32
c += d
b ~= c
b = b << (64 - 25) | b >> 25
a += m[SIGMA[(i % 10) * 16 + (2 * j + 1)]] ~ U512[SIGMA[(i % 10) * 16 + (2 * j)]]
a += b
d ~= a
d = d << (64 - 16) | d >> 16
c += d
b ~= c
b = b << (64 - 11) | b >> 11
return a, b, c, d
}
block256 :: proc "contextless" (ctx: ^Blake256_Context, p: []byte) #no_bounds_check {
i, j: int = ---, ---
v, m: [16]u32 = ---, ---
p := p
for len(p) >= BLOCKSIZE_256 {
v[0] = ctx.h[0]
v[1] = ctx.h[1]
v[2] = ctx.h[2]
v[3] = ctx.h[3]
v[4] = ctx.h[4]
v[5] = ctx.h[5]
v[6] = ctx.h[6]
v[7] = ctx.h[7]
v[8] = ctx.s[0] ~ U256[0]
v[9] = ctx.s[1] ~ U256[1]
v[10] = ctx.s[2] ~ U256[2]
v[11] = ctx.s[3] ~ U256[3]
v[12] = U256[4]
v[13] = U256[5]
v[14] = U256[6]
v[15] = U256[7]
ctx.t += 512
if !ctx.nullt {
v[12] ~= u32(ctx.t)
v[13] ~= u32(ctx.t)
v[14] ~= u32(ctx.t >> 32)
v[15] ~= u32(ctx.t >> 32)
}
for i, j = 0, 0; i < 16; i, j = i+1, j+4 {
m[i] = u32(p[j]) << 24 | u32(p[j + 1]) << 16 | u32(p[j + 2]) << 8 | u32(p[j + 3])
}
for i = 0; i < 14; i += 1 {
v[0], v[4], v[8], v[12] = G256(v[0], v[4], v[8], v[12], m, i, 0)
v[1], v[5], v[9], v[13] = G256(v[1], v[5], v[9], v[13], m, i, 1)
v[2], v[6], v[10], v[14] = G256(v[2], v[6], v[10], v[14], m, i, 2)
v[3], v[7], v[11], v[15] = G256(v[3], v[7], v[11], v[15], m, i, 3)
v[0], v[5], v[10], v[15] = G256(v[0], v[5], v[10], v[15], m, i, 4)
v[1], v[6], v[11], v[12] = G256(v[1], v[6], v[11], v[12], m, i, 5)
v[2], v[7], v[8], v[13] = G256(v[2], v[7], v[8], v[13], m, i, 6)
v[3], v[4], v[9], v[14] = G256(v[3], v[4], v[9], v[14], m, i, 7)
}
for i = 0; i < 8; i += 1 {
ctx.h[i] ~= ctx.s[i % 4] ~ v[i] ~ v[i + 8]
}
p = p[BLOCKSIZE_256:]
}
}
block512 :: proc "contextless" (ctx: ^Blake512_Context, p: []byte) #no_bounds_check {
i, j: int = ---, ---
v, m: [16]u64 = ---, ---
p := p
for len(p) >= BLOCKSIZE_512 {
v[0] = ctx.h[0]
v[1] = ctx.h[1]
v[2] = ctx.h[2]
v[3] = ctx.h[3]
v[4] = ctx.h[4]
v[5] = ctx.h[5]
v[6] = ctx.h[6]
v[7] = ctx.h[7]
v[8] = ctx.s[0] ~ U512[0]
v[9] = ctx.s[1] ~ U512[1]
v[10] = ctx.s[2] ~ U512[2]
v[11] = ctx.s[3] ~ U512[3]
v[12] = U512[4]
v[13] = U512[5]
v[14] = U512[6]
v[15] = U512[7]
ctx.t += 1024
if !ctx.nullt {
v[12] ~= ctx.t
v[13] ~= ctx.t
v[14] ~= 0
v[15] ~= 0
}
for i, j = 0, 0; i < 16; i, j = i + 1, j + 8 {
m[i] = u64(p[j]) << 56 | u64(p[j + 1]) << 48 | u64(p[j + 2]) << 40 | u64(p[j + 3]) << 32 |
u64(p[j + 4]) << 24 | u64(p[j + 5]) << 16 | u64(p[j + 6]) << 8 | u64(p[j + 7])
}
for i = 0; i < 16; i += 1 {
v[0], v[4], v[8], v[12] = G512(v[0], v[4], v[8], v[12], m, i, 0)
v[1], v[5], v[9], v[13] = G512(v[1], v[5], v[9], v[13], m, i, 1)
v[2], v[6], v[10], v[14] = G512(v[2], v[6], v[10], v[14], m, i, 2)
v[3], v[7], v[11], v[15] = G512(v[3], v[7], v[11], v[15], m, i, 3)
v[0], v[5], v[10], v[15] = G512(v[0], v[5], v[10], v[15], m, i, 4)
v[1], v[6], v[11], v[12] = G512(v[1], v[6], v[11], v[12], m, i, 5)
v[2], v[7], v[8], v[13] = G512(v[2], v[7], v[8], v[13], m, i, 6)
v[3], v[4], v[9], v[14] = G512(v[3], v[4], v[9], v[14], m, i, 7)
}
for i = 0; i < 8; i += 1 {
ctx.h[i] ~= ctx.s[i % 4] ~ v[i] ~ v[i + 8]
}
p = p[BLOCKSIZE_512:]
}
}
write_additional :: proc "contextless" (ctx: ^$T, data: []byte) {
ctx.t -= u64(len(data)) << 3
update(ctx, data)
}

View File

@@ -7,12 +7,12 @@ package blake2b
List of contributors:
zhibog, dotbmp: Initial implementation.
Interface for the BLAKE2B hashing algorithm.
BLAKE2B and BLAKE2B share the implementation in the _blake2 package.
Interface for the BLAKE2b hashing algorithm.
BLAKE2b and BLAKE2s share the implementation in the _blake2 package.
*/
import "core:os"
import "core:io"
import "core:os"
import "../_blake2"
@@ -25,103 +25,103 @@ DIGEST_SIZE :: 64
// hash_string will hash the given input and return the
// computed hash
hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
hash: [DIGEST_SIZE]byte
ctx: _blake2.Blake2b_Context
cfg: _blake2.Blake2_Config
cfg.size = _blake2.BLAKE2B_SIZE
ctx.cfg = cfg
_blake2.init(&ctx)
_blake2.update(&ctx, data)
_blake2.final(&ctx, hash[:])
return hash
hash: [DIGEST_SIZE]byte
ctx: Context
cfg: _blake2.Blake2_Config
cfg.size = _blake2.BLAKE2B_SIZE
ctx.cfg = cfg
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer(transmute([]byte)(data), hash)
hash_bytes_to_buffer(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
ctx: _blake2.Blake2b_Context
cfg: _blake2.Blake2_Config
cfg.size = _blake2.BLAKE2B_SIZE
ctx.cfg = cfg
_blake2.init(&ctx)
_blake2.update(&ctx, data)
_blake2.final(&ctx, hash)
ctx: Context
cfg: _blake2.Blake2_Config
cfg.size = _blake2.BLAKE2B_SIZE
ctx.cfg = cfg
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream will read the stream in chunks and compute a
// hash from its contents
hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
hash: [DIGEST_SIZE]byte
ctx: _blake2.Blake2b_Context
cfg: _blake2.Blake2_Config
cfg.size = _blake2.BLAKE2B_SIZE
ctx.cfg = cfg
_blake2.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
_blake2.update(&ctx, buf[:read])
}
}
_blake2.final(&ctx, hash[:])
return hash, true
hash: [DIGEST_SIZE]byte
ctx: Context
cfg: _blake2.Blake2_Config
cfg.size = _blake2.BLAKE2B_SIZE
ctx.cfg = cfg
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file will read the file provided by the given handle
// and compute a hash
hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes(buf[:]), ok
}
}
return [DIGEST_SIZE]byte{}, false
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes(buf[:]), ok
}
}
return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
hash_stream,
hash_file,
hash_bytes,
hash_string,
hash_bytes_to_buffer,
hash_string_to_buffer,
hash_stream,
hash_file,
hash_bytes,
hash_string,
hash_bytes_to_buffer,
hash_string_to_buffer,
}
/*
Low level API
*/
Blake2b_Context :: _blake2.Blake2b_Context
Context :: _blake2.Blake2b_Context
init :: proc(ctx: ^_blake2.Blake2b_Context) {
_blake2.init(ctx)
init :: proc(ctx: ^Context) {
_blake2.init(ctx)
}
update :: proc "contextless" (ctx: ^_blake2.Blake2b_Context, data: []byte) {
_blake2.update(ctx, data)
update :: proc(ctx: ^Context, data: []byte) {
_blake2.update(ctx, data)
}
final :: proc "contextless" (ctx: ^_blake2.Blake2b_Context, hash: []byte) {
_blake2.final(ctx, hash)
final :: proc(ctx: ^Context, hash: []byte) {
_blake2.final(ctx, hash)
}

View File

@@ -7,12 +7,12 @@ package blake2s
List of contributors:
zhibog, dotbmp: Initial implementation.
Interface for the BLAKE2S hashing algorithm.
BLAKE2B and BLAKE2B share the implementation in the _blake2 package.
Interface for the BLAKE2s hashing algorithm.
BLAKE2s and BLAKE2b share the implementation in the _blake2 package.
*/
import "core:os"
import "core:io"
import "core:os"
import "../_blake2"
@@ -25,103 +25,103 @@ DIGEST_SIZE :: 32
// hash_string will hash the given input and return the
// computed hash
hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
hash: [DIGEST_SIZE]byte
ctx: _blake2.Blake2s_Context
cfg: _blake2.Blake2_Config
cfg.size = _blake2.BLAKE2S_SIZE
ctx.cfg = cfg
_blake2.init(&ctx)
_blake2.update(&ctx, data)
_blake2.final(&ctx, hash[:])
return hash
hash: [DIGEST_SIZE]byte
ctx: Context
cfg: _blake2.Blake2_Config
cfg.size = _blake2.BLAKE2S_SIZE
ctx.cfg = cfg
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer(transmute([]byte)(data), hash)
hash_bytes_to_buffer(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
ctx: _blake2.Blake2s_Context
cfg: _blake2.Blake2_Config
cfg.size = _blake2.BLAKE2S_SIZE
ctx.cfg = cfg
_blake2.init(&ctx)
_blake2.update(&ctx, data)
_blake2.final(&ctx, hash)
ctx: Context
cfg: _blake2.Blake2_Config
cfg.size = _blake2.BLAKE2S_SIZE
ctx.cfg = cfg
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream will read the stream in chunks and compute a
// hash from its contents
hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
hash: [DIGEST_SIZE]byte
ctx: _blake2.Blake2s_Context
cfg: _blake2.Blake2_Config
cfg.size = _blake2.BLAKE2S_SIZE
ctx.cfg = cfg
_blake2.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
_blake2.update(&ctx, buf[:read])
}
}
_blake2.final(&ctx, hash[:])
return hash, true
hash: [DIGEST_SIZE]byte
ctx: Context
cfg: _blake2.Blake2_Config
cfg.size = _blake2.BLAKE2S_SIZE
ctx.cfg = cfg
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file will read the file provided by the given handle
// and compute a hash
hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes(buf[:]), ok
}
}
return [DIGEST_SIZE]byte{}, false
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes(buf[:]), ok
}
}
return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
hash_stream,
hash_file,
hash_bytes,
hash_string,
hash_bytes_to_buffer,
hash_string_to_buffer,
hash_stream,
hash_file,
hash_bytes,
hash_string,
hash_bytes_to_buffer,
hash_string_to_buffer,
}
/*
Low level API
*/
Blake2s_Context :: _blake2.Blake2b_Context
Context :: _blake2.Blake2s_Context
init :: proc(ctx: ^_blake2.Blake2s_Context) {
_blake2.init(ctx)
init :: proc(ctx: ^Context) {
_blake2.init(ctx)
}
update :: proc "contextless" (ctx: ^_blake2.Blake2s_Context, data: []byte) {
_blake2.update(ctx, data)
update :: proc(ctx: ^Context, data: []byte) {
_blake2.update(ctx, data)
}
final :: proc "contextless" (ctx: ^_blake2.Blake2s_Context, hash: []byte) {
_blake2.final(ctx, hash)
final :: proc(ctx: ^Context, hash: []byte) {
_blake2.final(ctx, hash)
}

View File

@@ -1,6 +1,6 @@
package chacha20
import "core:crypto/util"
import "core:encoding/endian"
import "core:math/bits"
import "core:mem"
@@ -60,23 +60,23 @@ init :: proc (ctx: ^Context, key, nonce: []byte) {
ctx._s[1] = _SIGMA_1
ctx._s[2] = _SIGMA_2
ctx._s[3] = _SIGMA_3
ctx._s[4] = util.U32_LE(k[0:4])
ctx._s[5] = util.U32_LE(k[4:8])
ctx._s[6] = util.U32_LE(k[8:12])
ctx._s[7] = util.U32_LE(k[12:16])
ctx._s[8] = util.U32_LE(k[16:20])
ctx._s[9] = util.U32_LE(k[20:24])
ctx._s[10] = util.U32_LE(k[24:28])
ctx._s[11] = util.U32_LE(k[28:32])
ctx._s[4] = endian.unchecked_get_u32le(k[0:4])
ctx._s[5] = endian.unchecked_get_u32le(k[4:8])
ctx._s[6] = endian.unchecked_get_u32le(k[8:12])
ctx._s[7] = endian.unchecked_get_u32le(k[12:16])
ctx._s[8] = endian.unchecked_get_u32le(k[16:20])
ctx._s[9] = endian.unchecked_get_u32le(k[20:24])
ctx._s[10] = endian.unchecked_get_u32le(k[24:28])
ctx._s[11] = endian.unchecked_get_u32le(k[28:32])
ctx._s[12] = 0
if !is_xchacha {
ctx._s[13] = util.U32_LE(n[0:4])
ctx._s[14] = util.U32_LE(n[4:8])
ctx._s[15] = util.U32_LE(n[8:12])
ctx._s[13] = endian.unchecked_get_u32le(n[0:4])
ctx._s[14] = endian.unchecked_get_u32le(n[4:8])
ctx._s[15] = endian.unchecked_get_u32le(n[8:12])
} else {
ctx._s[13] = 0
ctx._s[14] = util.U32_LE(n[0:4])
ctx._s[15] = util.U32_LE(n[4:8])
ctx._s[14] = endian.unchecked_get_u32le(n[0:4])
ctx._s[15] = endian.unchecked_get_u32le(n[4:8])
// The sub-key is stored in the keystream buffer. While
// this will be overwritten in most circumstances, explicitly
@@ -221,114 +221,114 @@ _do_blocks :: proc (ctx: ^Context, dst, src: []byte, nr_blocks: int) {
// quarterround(x, 0, 4, 8, 12)
x0 += x4
x12 ~= x0
x12 = util.ROTL32(x12, 16)
x12 = bits.rotate_left32(x12, 16)
x8 += x12
x4 ~= x8
x4 = util.ROTL32(x4, 12)
x4 = bits.rotate_left32(x4, 12)
x0 += x4
x12 ~= x0
x12 = util.ROTL32(x12, 8)
x12 = bits.rotate_left32(x12, 8)
x8 += x12
x4 ~= x8
x4 = util.ROTL32(x4, 7)
x4 = bits.rotate_left32(x4, 7)
// quarterround(x, 1, 5, 9, 13)
x1 += x5
x13 ~= x1
x13 = util.ROTL32(x13, 16)
x13 = bits.rotate_left32(x13, 16)
x9 += x13
x5 ~= x9
x5 = util.ROTL32(x5, 12)
x5 = bits.rotate_left32(x5, 12)
x1 += x5
x13 ~= x1
x13 = util.ROTL32(x13, 8)
x13 = bits.rotate_left32(x13, 8)
x9 += x13
x5 ~= x9
x5 = util.ROTL32(x5, 7)
x5 = bits.rotate_left32(x5, 7)
// quarterround(x, 2, 6, 10, 14)
x2 += x6
x14 ~= x2
x14 = util.ROTL32(x14, 16)
x14 = bits.rotate_left32(x14, 16)
x10 += x14
x6 ~= x10
x6 = util.ROTL32(x6, 12)
x6 = bits.rotate_left32(x6, 12)
x2 += x6
x14 ~= x2
x14 = util.ROTL32(x14, 8)
x14 = bits.rotate_left32(x14, 8)
x10 += x14
x6 ~= x10
x6 = util.ROTL32(x6, 7)
x6 = bits.rotate_left32(x6, 7)
// quarterround(x, 3, 7, 11, 15)
x3 += x7
x15 ~= x3
x15 = util.ROTL32(x15, 16)
x15 = bits.rotate_left32(x15, 16)
x11 += x15
x7 ~= x11
x7 = util.ROTL32(x7, 12)
x7 = bits.rotate_left32(x7, 12)
x3 += x7
x15 ~= x3
x15 = util.ROTL32(x15, 8)
x15 = bits.rotate_left32(x15, 8)
x11 += x15
x7 ~= x11
x7 = util.ROTL32(x7, 7)
x7 = bits.rotate_left32(x7, 7)
// quarterround(x, 0, 5, 10, 15)
x0 += x5
x15 ~= x0
x15 = util.ROTL32(x15, 16)
x15 = bits.rotate_left32(x15, 16)
x10 += x15
x5 ~= x10
x5 = util.ROTL32(x5, 12)
x5 = bits.rotate_left32(x5, 12)
x0 += x5
x15 ~= x0
x15 = util.ROTL32(x15, 8)
x15 = bits.rotate_left32(x15, 8)
x10 += x15
x5 ~= x10
x5 = util.ROTL32(x5, 7)
x5 = bits.rotate_left32(x5, 7)
// quarterround(x, 1, 6, 11, 12)
x1 += x6
x12 ~= x1
x12 = util.ROTL32(x12, 16)
x12 = bits.rotate_left32(x12, 16)
x11 += x12
x6 ~= x11
x6 = util.ROTL32(x6, 12)
x6 = bits.rotate_left32(x6, 12)
x1 += x6
x12 ~= x1
x12 = util.ROTL32(x12, 8)
x12 = bits.rotate_left32(x12, 8)
x11 += x12
x6 ~= x11
x6 = util.ROTL32(x6, 7)
x6 = bits.rotate_left32(x6, 7)
// quarterround(x, 2, 7, 8, 13)
x2 += x7
x13 ~= x2
x13 = util.ROTL32(x13, 16)
x13 = bits.rotate_left32(x13, 16)
x8 += x13
x7 ~= x8
x7 = util.ROTL32(x7, 12)
x7 = bits.rotate_left32(x7, 12)
x2 += x7
x13 ~= x2
x13 = util.ROTL32(x13, 8)
x13 = bits.rotate_left32(x13, 8)
x8 += x13
x7 ~= x8
x7 = util.ROTL32(x7, 7)
x7 = bits.rotate_left32(x7, 7)
// quarterround(x, 3, 4, 9, 14)
x3 += x4
x14 ~= x3
x14 = util.ROTL32(x14, 16)
x14 = bits.rotate_left32(x14, 16)
x9 += x14
x4 ~= x9
x4 = util.ROTL32(x4, 12)
x4 = bits.rotate_left32(x4, 12)
x3 += x4
x14 ~= x3
x14 = util.ROTL32(x14, 8)
x14 = bits.rotate_left32(x14, 8)
x9 += x14
x4 ~= x9
x4 = util.ROTL32(x4, 7)
x4 = bits.rotate_left32(x4, 7)
}
x0 += _SIGMA_0
@@ -352,93 +352,48 @@ _do_blocks :: proc (ctx: ^Context, dst, src: []byte, nr_blocks: int) {
// this is "use vector operations", support for that is currently
// a work in progress/to be designed.
//
// Until dedicated assembly can be written leverage the fact that
// the callers of this routine ensure that src/dst are valid.
// In the meantime:
// - The caller(s) ensure that src/dst are valid.
// - The compiler knows if the target is picky about alignment.
when ODIN_ARCH == .i386 || ODIN_ARCH == .amd64 {
// util.PUT_U32_LE/util.U32_LE are not required on little-endian
// systems that also happen to not be strict about aligned
// memory access.
dst_p := transmute(^[16]u32)(&dst[0])
#no_bounds_check {
if src != nil {
src_p := transmute(^[16]u32)(&src[0])
dst_p[0] = src_p[0] ~ x0
dst_p[1] = src_p[1] ~ x1
dst_p[2] = src_p[2] ~ x2
dst_p[3] = src_p[3] ~ x3
dst_p[4] = src_p[4] ~ x4
dst_p[5] = src_p[5] ~ x5
dst_p[6] = src_p[6] ~ x6
dst_p[7] = src_p[7] ~ x7
dst_p[8] = src_p[8] ~ x8
dst_p[9] = src_p[9] ~ x9
dst_p[10] = src_p[10] ~ x10
dst_p[11] = src_p[11] ~ x11
dst_p[12] = src_p[12] ~ x12
dst_p[13] = src_p[13] ~ x13
dst_p[14] = src_p[14] ~ x14
dst_p[15] = src_p[15] ~ x15
endian.unchecked_put_u32le(dst[0:4], endian.unchecked_get_u32le(src[0:4]) ~ x0)
endian.unchecked_put_u32le(dst[4:8], endian.unchecked_get_u32le(src[4:8]) ~ x1)
endian.unchecked_put_u32le(dst[8:12], endian.unchecked_get_u32le(src[8:12]) ~ x2)
endian.unchecked_put_u32le(dst[12:16], endian.unchecked_get_u32le(src[12:16]) ~ x3)
endian.unchecked_put_u32le(dst[16:20], endian.unchecked_get_u32le(src[16:20]) ~ x4)
endian.unchecked_put_u32le(dst[20:24], endian.unchecked_get_u32le(src[20:24]) ~ x5)
endian.unchecked_put_u32le(dst[24:28], endian.unchecked_get_u32le(src[24:28]) ~ x6)
endian.unchecked_put_u32le(dst[28:32], endian.unchecked_get_u32le(src[28:32]) ~ x7)
endian.unchecked_put_u32le(dst[32:36], endian.unchecked_get_u32le(src[32:36]) ~ x8)
endian.unchecked_put_u32le(dst[36:40], endian.unchecked_get_u32le(src[36:40]) ~ x9)
endian.unchecked_put_u32le(dst[40:44], endian.unchecked_get_u32le(src[40:44]) ~ x10)
endian.unchecked_put_u32le(dst[44:48], endian.unchecked_get_u32le(src[44:48]) ~ x11)
endian.unchecked_put_u32le(dst[48:52], endian.unchecked_get_u32le(src[48:52]) ~ x12)
endian.unchecked_put_u32le(dst[52:56], endian.unchecked_get_u32le(src[52:56]) ~ x13)
endian.unchecked_put_u32le(dst[56:60], endian.unchecked_get_u32le(src[56:60]) ~ x14)
endian.unchecked_put_u32le(dst[60:64], endian.unchecked_get_u32le(src[60:64]) ~ x15)
src = src[_BLOCK_SIZE:]
} else {
dst_p[0] = x0
dst_p[1] = x1
dst_p[2] = x2
dst_p[3] = x3
dst_p[4] = x4
dst_p[5] = x5
dst_p[6] = x6
dst_p[7] = x7
dst_p[8] = x8
dst_p[9] = x9
dst_p[10] = x10
dst_p[11] = x11
dst_p[12] = x12
dst_p[13] = x13
dst_p[14] = x14
dst_p[15] = x15
endian.unchecked_put_u32le(dst[0:4], x0)
endian.unchecked_put_u32le(dst[4:8], x1)
endian.unchecked_put_u32le(dst[8:12], x2)
endian.unchecked_put_u32le(dst[12:16], x3)
endian.unchecked_put_u32le(dst[16:20], x4)
endian.unchecked_put_u32le(dst[20:24], x5)
endian.unchecked_put_u32le(dst[24:28], x6)
endian.unchecked_put_u32le(dst[28:32], x7)
endian.unchecked_put_u32le(dst[32:36], x8)
endian.unchecked_put_u32le(dst[36:40], x9)
endian.unchecked_put_u32le(dst[40:44], x10)
endian.unchecked_put_u32le(dst[44:48], x11)
endian.unchecked_put_u32le(dst[48:52], x12)
endian.unchecked_put_u32le(dst[52:56], x13)
endian.unchecked_put_u32le(dst[56:60], x14)
endian.unchecked_put_u32le(dst[60:64], x15)
}
dst = dst[_BLOCK_SIZE:]
} else {
#no_bounds_check {
if src != nil {
util.PUT_U32_LE(dst[0:4], util.U32_LE(src[0:4]) ~ x0)
util.PUT_U32_LE(dst[4:8], util.U32_LE(src[4:8]) ~ x1)
util.PUT_U32_LE(dst[8:12], util.U32_LE(src[8:12]) ~ x2)
util.PUT_U32_LE(dst[12:16], util.U32_LE(src[12:16]) ~ x3)
util.PUT_U32_LE(dst[16:20], util.U32_LE(src[16:20]) ~ x4)
util.PUT_U32_LE(dst[20:24], util.U32_LE(src[20:24]) ~ x5)
util.PUT_U32_LE(dst[24:28], util.U32_LE(src[24:28]) ~ x6)
util.PUT_U32_LE(dst[28:32], util.U32_LE(src[28:32]) ~ x7)
util.PUT_U32_LE(dst[32:36], util.U32_LE(src[32:36]) ~ x8)
util.PUT_U32_LE(dst[36:40], util.U32_LE(src[36:40]) ~ x9)
util.PUT_U32_LE(dst[40:44], util.U32_LE(src[40:44]) ~ x10)
util.PUT_U32_LE(dst[44:48], util.U32_LE(src[44:48]) ~ x11)
util.PUT_U32_LE(dst[48:52], util.U32_LE(src[48:52]) ~ x12)
util.PUT_U32_LE(dst[52:56], util.U32_LE(src[52:56]) ~ x13)
util.PUT_U32_LE(dst[56:60], util.U32_LE(src[56:60]) ~ x14)
util.PUT_U32_LE(dst[60:64], util.U32_LE(src[60:64]) ~ x15)
src = src[_BLOCK_SIZE:]
} else {
util.PUT_U32_LE(dst[0:4], x0)
util.PUT_U32_LE(dst[4:8], x1)
util.PUT_U32_LE(dst[8:12], x2)
util.PUT_U32_LE(dst[12:16], x3)
util.PUT_U32_LE(dst[16:20], x4)
util.PUT_U32_LE(dst[20:24], x5)
util.PUT_U32_LE(dst[24:28], x6)
util.PUT_U32_LE(dst[28:32], x7)
util.PUT_U32_LE(dst[32:36], x8)
util.PUT_U32_LE(dst[36:40], x9)
util.PUT_U32_LE(dst[40:44], x10)
util.PUT_U32_LE(dst[44:48], x11)
util.PUT_U32_LE(dst[48:52], x12)
util.PUT_U32_LE(dst[52:56], x13)
util.PUT_U32_LE(dst[56:60], x14)
util.PUT_U32_LE(dst[60:64], x15)
}
dst = dst[_BLOCK_SIZE:]
}
}
// Increment the counter. Overflow checking is done upon
@@ -451,141 +406,141 @@ _do_blocks :: proc (ctx: ^Context, dst, src: []byte, nr_blocks: int) {
}
@(private)
_hchacha20 :: proc (dst, key, nonce: []byte) {
_hchacha20 :: proc "contextless" (dst, key, nonce: []byte) {
x0, x1, x2, x3 := _SIGMA_0, _SIGMA_1, _SIGMA_2, _SIGMA_3
x4 := util.U32_LE(key[0:4])
x5 := util.U32_LE(key[4:8])
x6 := util.U32_LE(key[8:12])
x7 := util.U32_LE(key[12:16])
x8 := util.U32_LE(key[16:20])
x9 := util.U32_LE(key[20:24])
x10 := util.U32_LE(key[24:28])
x11 := util.U32_LE(key[28:32])
x12 := util.U32_LE(nonce[0:4])
x13 := util.U32_LE(nonce[4:8])
x14 := util.U32_LE(nonce[8:12])
x15 := util.U32_LE(nonce[12:16])
x4 := endian.unchecked_get_u32le(key[0:4])
x5 := endian.unchecked_get_u32le(key[4:8])
x6 := endian.unchecked_get_u32le(key[8:12])
x7 := endian.unchecked_get_u32le(key[12:16])
x8 := endian.unchecked_get_u32le(key[16:20])
x9 := endian.unchecked_get_u32le(key[20:24])
x10 := endian.unchecked_get_u32le(key[24:28])
x11 := endian.unchecked_get_u32le(key[28:32])
x12 := endian.unchecked_get_u32le(nonce[0:4])
x13 := endian.unchecked_get_u32le(nonce[4:8])
x14 := endian.unchecked_get_u32le(nonce[8:12])
x15 := endian.unchecked_get_u32le(nonce[12:16])
for i := _ROUNDS; i > 0; i = i - 2 {
// quarterround(x, 0, 4, 8, 12)
x0 += x4
x12 ~= x0
x12 = util.ROTL32(x12, 16)
x12 = bits.rotate_left32(x12, 16)
x8 += x12
x4 ~= x8
x4 = util.ROTL32(x4, 12)
x4 = bits.rotate_left32(x4, 12)
x0 += x4
x12 ~= x0
x12 = util.ROTL32(x12, 8)
x12 = bits.rotate_left32(x12, 8)
x8 += x12
x4 ~= x8
x4 = util.ROTL32(x4, 7)
x4 = bits.rotate_left32(x4, 7)
// quarterround(x, 1, 5, 9, 13)
x1 += x5
x13 ~= x1
x13 = util.ROTL32(x13, 16)
x13 = bits.rotate_left32(x13, 16)
x9 += x13
x5 ~= x9
x5 = util.ROTL32(x5, 12)
x5 = bits.rotate_left32(x5, 12)
x1 += x5
x13 ~= x1
x13 = util.ROTL32(x13, 8)
x13 = bits.rotate_left32(x13, 8)
x9 += x13
x5 ~= x9
x5 = util.ROTL32(x5, 7)
x5 = bits.rotate_left32(x5, 7)
// quarterround(x, 2, 6, 10, 14)
x2 += x6
x14 ~= x2
x14 = util.ROTL32(x14, 16)
x14 = bits.rotate_left32(x14, 16)
x10 += x14
x6 ~= x10
x6 = util.ROTL32(x6, 12)
x6 = bits.rotate_left32(x6, 12)
x2 += x6
x14 ~= x2
x14 = util.ROTL32(x14, 8)
x14 = bits.rotate_left32(x14, 8)
x10 += x14
x6 ~= x10
x6 = util.ROTL32(x6, 7)
x6 = bits.rotate_left32(x6, 7)
// quarterround(x, 3, 7, 11, 15)
x3 += x7
x15 ~= x3
x15 = util.ROTL32(x15, 16)
x15 = bits.rotate_left32(x15, 16)
x11 += x15
x7 ~= x11
x7 = util.ROTL32(x7, 12)
x7 = bits.rotate_left32(x7, 12)
x3 += x7
x15 ~= x3
x15 = util.ROTL32(x15, 8)
x15 = bits.rotate_left32(x15, 8)
x11 += x15
x7 ~= x11
x7 = util.ROTL32(x7, 7)
x7 = bits.rotate_left32(x7, 7)
// quarterround(x, 0, 5, 10, 15)
x0 += x5
x15 ~= x0
x15 = util.ROTL32(x15, 16)
x15 = bits.rotate_left32(x15, 16)
x10 += x15
x5 ~= x10
x5 = util.ROTL32(x5, 12)
x5 = bits.rotate_left32(x5, 12)
x0 += x5
x15 ~= x0
x15 = util.ROTL32(x15, 8)
x15 = bits.rotate_left32(x15, 8)
x10 += x15
x5 ~= x10
x5 = util.ROTL32(x5, 7)
x5 = bits.rotate_left32(x5, 7)
// quarterround(x, 1, 6, 11, 12)
x1 += x6
x12 ~= x1
x12 = util.ROTL32(x12, 16)
x12 = bits.rotate_left32(x12, 16)
x11 += x12
x6 ~= x11
x6 = util.ROTL32(x6, 12)
x6 = bits.rotate_left32(x6, 12)
x1 += x6
x12 ~= x1
x12 = util.ROTL32(x12, 8)
x12 = bits.rotate_left32(x12, 8)
x11 += x12
x6 ~= x11
x6 = util.ROTL32(x6, 7)
x6 = bits.rotate_left32(x6, 7)
// quarterround(x, 2, 7, 8, 13)
x2 += x7
x13 ~= x2
x13 = util.ROTL32(x13, 16)
x13 = bits.rotate_left32(x13, 16)
x8 += x13
x7 ~= x8
x7 = util.ROTL32(x7, 12)
x7 = bits.rotate_left32(x7, 12)
x2 += x7
x13 ~= x2
x13 = util.ROTL32(x13, 8)
x13 = bits.rotate_left32(x13, 8)
x8 += x13
x7 ~= x8
x7 = util.ROTL32(x7, 7)
x7 = bits.rotate_left32(x7, 7)
// quarterround(x, 3, 4, 9, 14)
x3 += x4
x14 ~= x3
x14 = util.ROTL32(x14, 16)
x14 = bits.rotate_left32(x14, 16)
x9 += x14
x4 ~= x9
x4 = util.ROTL32(x4, 12)
x4 = bits.rotate_left32(x4, 12)
x3 += x4
x14 ~= x3
x14 = util.ROTL32(x14, 8)
x14 = bits.rotate_left32(x14, 8)
x9 += x14
x4 ~= x9
x4 = util.ROTL32(x4, 7)
x4 = bits.rotate_left32(x4, 7)
}
util.PUT_U32_LE(dst[0:4], x0)
util.PUT_U32_LE(dst[4:8], x1)
util.PUT_U32_LE(dst[8:12], x2)
util.PUT_U32_LE(dst[12:16], x3)
util.PUT_U32_LE(dst[16:20], x12)
util.PUT_U32_LE(dst[20:24], x13)
util.PUT_U32_LE(dst[24:28], x14)
util.PUT_U32_LE(dst[28:32], x15)
endian.unchecked_put_u32le(dst[0:4], x0)
endian.unchecked_put_u32le(dst[4:8], x1)
endian.unchecked_put_u32le(dst[8:12], x2)
endian.unchecked_put_u32le(dst[12:16], x3)
endian.unchecked_put_u32le(dst[16:20], x12)
endian.unchecked_put_u32le(dst[20:24], x13)
endian.unchecked_put_u32le(dst[24:28], x14)
endian.unchecked_put_u32le(dst[28:32], x15)
}

View File

@@ -3,7 +3,7 @@ package chacha20poly1305
import "core:crypto"
import "core:crypto/chacha20"
import "core:crypto/poly1305"
import "core:crypto/util"
import "core:encoding/endian"
import "core:mem"
KEY_SIZE :: chacha20.KEY_SIZE
@@ -87,8 +87,8 @@ encrypt :: proc (ciphertext, tag, key, nonce, aad, plaintext: []byte) {
// mac_data |= num_to_8_le_bytes(aad.length)
// mac_data |= num_to_8_le_bytes(ciphertext.length)
l_buf := otk[0:16] // Reuse the scratch buffer.
util.PUT_U64_LE(l_buf[0:8], u64(aad_len))
util.PUT_U64_LE(l_buf[8:16], u64(ciphertext_len))
endian.unchecked_put_u64le(l_buf[0:8], u64(aad_len))
endian.unchecked_put_u64le(l_buf[8:16], u64(ciphertext_len))
poly1305.update(&mac_ctx, l_buf)
// tag = poly1305_mac(mac_data, otk)
@@ -128,8 +128,8 @@ decrypt :: proc (plaintext, tag, key, nonce, aad, ciphertext: []byte) -> bool {
poly1305.update(&mac_ctx, ciphertext)
_update_mac_pad16(&mac_ctx, ciphertext_len)
l_buf := otk[0:16] // Reuse the scratch buffer.
util.PUT_U64_LE(l_buf[0:8], u64(aad_len))
util.PUT_U64_LE(l_buf[8:16], u64(ciphertext_len))
endian.unchecked_put_u64le(l_buf[0:8], u64(aad_len))
endian.unchecked_put_u64le(l_buf[8:16], u64(ciphertext_len))
poly1305.update(&mac_ctx, l_buf)
// tag = poly1305_mac(mac_data, otk)

View File

@@ -1,382 +0,0 @@
package gost
/*
Copyright 2021 zhibog
Made available under the BSD-3 license.
List of contributors:
zhibog, dotbmp: Initial implementation.
Implementation of the GOST hashing algorithm, as defined in RFC 5831 <https://datatracker.ietf.org/doc/html/rfc5831>
*/
import "core:mem"
import "core:os"
import "core:io"
/*
High level API
*/
DIGEST_SIZE :: 32
// hash_string will hash the given input and return the
// computed hash
hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
hash: [DIGEST_SIZE]byte
ctx: Gost_Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
ctx: Gost_Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream will read the stream in chunks and compute a
// hash from its contents
hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
hash: [DIGEST_SIZE]byte
ctx: Gost_Context
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file will read the file provided by the given handle
// and compute a hash
hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes(buf[:]), ok
}
}
return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
hash_stream,
hash_file,
hash_bytes,
hash_string,
hash_bytes_to_buffer,
hash_string_to_buffer,
}
/*
Low level API
*/
init :: proc "contextless" (ctx: ^Gost_Context) {
sbox: [8][16]u32 = {
{ 10, 4, 5, 6, 8, 1, 3, 7, 13, 12, 14, 0, 9, 2, 11, 15 },
{ 5, 15, 4, 0, 2, 13, 11, 9, 1, 7, 6, 3, 12, 14, 10, 8 },
{ 7, 15, 12, 14, 9, 4, 1, 0, 3, 11, 5, 2, 6, 10, 8, 13 },
{ 4, 10, 7, 12, 0, 15, 2, 8, 14, 1, 6, 5, 13, 11, 9, 3 },
{ 7, 6, 4, 11, 9, 12, 2, 10, 1, 8, 0, 14, 15, 13, 3, 5 },
{ 7, 6, 2, 4, 13, 9, 15, 0, 10, 1, 5, 11, 8, 14, 12, 3 },
{ 13, 14, 4, 1, 7, 0, 5, 10, 3, 12, 8, 15, 6, 2, 9, 11 },
{ 1, 3, 10, 9, 5, 11, 4, 15, 8, 6, 7, 14, 13, 0, 2, 12 },
}
i := 0
for a := 0; a < 16; a += 1 {
ax := sbox[1][a] << 15
bx := sbox[3][a] << 23
cx := sbox[5][a]
cx = (cx >> 1) | (cx << 31)
dx := sbox[7][a] << 7
for b := 0; b < 16; b, i = b + 1, i + 1 {
SBOX_1[i] = ax | (sbox[0][b] << 11)
SBOX_2[i] = bx | (sbox[2][b] << 19)
SBOX_3[i] = cx | (sbox[4][b] << 27)
SBOX_4[i] = dx | (sbox[6][b] << 3)
}
}
}
update :: proc(ctx: ^Gost_Context, data: []byte) {
length := byte(len(data))
j: byte
i := ctx.partial_bytes
for i < 32 && j < length {
ctx.partial[i] = data[j]
i, j = i + 1, j + 1
}
if i < 32 {
ctx.partial_bytes = i
return
}
bytes(ctx, ctx.partial[:], 256)
for (j + 32) < length {
bytes(ctx, data[j:], 256)
j += 32
}
i = 0
for j < length {
ctx.partial[i] = data[j]
i, j = i + 1, j + 1
}
ctx.partial_bytes = i
}
final :: proc(ctx: ^Gost_Context, hash: []byte) {
if ctx.partial_bytes > 0 {
mem.set(&ctx.partial[ctx.partial_bytes], 0, 32 - int(ctx.partial_bytes))
bytes(ctx, ctx.partial[:], u32(ctx.partial_bytes) << 3)
}
compress(ctx.hash[:], ctx.len[:])
compress(ctx.hash[:], ctx.sum[:])
for i, j := 0, 0; i < 8; i, j = i + 1, j + 4 {
hash[j] = byte(ctx.hash[i])
hash[j + 1] = byte(ctx.hash[i] >> 8)
hash[j + 2] = byte(ctx.hash[i] >> 16)
hash[j + 3] = byte(ctx.hash[i] >> 24)
}
}
/*
GOST implementation
*/
Gost_Context :: struct {
sum: [8]u32,
hash: [8]u32,
len: [8]u32,
partial: [32]byte,
partial_bytes: byte,
}
SBOX_1: [256]u32
SBOX_2: [256]u32
SBOX_3: [256]u32
SBOX_4: [256]u32
ENCRYPT_ROUND :: #force_inline proc "contextless" (l, r, t, k1, k2: u32) -> (u32, u32, u32) {
l, r, t := l, r, t
t = (k1) + r
l ~= SBOX_1[t & 0xff] ~ SBOX_2[(t >> 8) & 0xff] ~ SBOX_3[(t >> 16) & 0xff] ~ SBOX_4[t >> 24]
t = (k2) + l
r ~= SBOX_1[t & 0xff] ~ SBOX_2[(t >> 8) & 0xff] ~ SBOX_3[(t >> 16) & 0xff] ~ SBOX_4[t >> 24]
return l, r, t
}
ENCRYPT :: #force_inline proc "contextless" (a, b, c: u32, key: []u32) -> (l, r, t: u32) {
l, r, t = ENCRYPT_ROUND(a, b, c, key[0], key[1])
l, r, t = ENCRYPT_ROUND(l, r, t, key[2], key[3])
l, r, t = ENCRYPT_ROUND(l, r, t, key[4], key[5])
l, r, t = ENCRYPT_ROUND(l, r, t, key[6], key[7])
l, r, t = ENCRYPT_ROUND(l, r, t, key[0], key[1])
l, r, t = ENCRYPT_ROUND(l, r, t, key[2], key[3])
l, r, t = ENCRYPT_ROUND(l, r, t, key[4], key[5])
l, r, t = ENCRYPT_ROUND(l, r, t, key[6], key[7])
l, r, t = ENCRYPT_ROUND(l, r, t, key[0], key[1])
l, r, t = ENCRYPT_ROUND(l, r, t, key[2], key[3])
l, r, t = ENCRYPT_ROUND(l, r, t, key[4], key[5])
l, r, t = ENCRYPT_ROUND(l, r, t, key[6], key[7])
l, r, t = ENCRYPT_ROUND(l, r, t, key[7], key[6])
l, r, t = ENCRYPT_ROUND(l, r, t, key[5], key[4])
l, r, t = ENCRYPT_ROUND(l, r, t, key[3], key[2])
l, r, t = ENCRYPT_ROUND(l, r, t, key[1], key[0])
t = r
r = l
l = t
return
}
bytes :: proc(ctx: ^Gost_Context, buf: []byte, bits: u32) {
a, c: u32
m: [8]u32
for i, j := 0, 0; i < 8; i += 1 {
a = u32(buf[j]) | u32(buf[j + 1]) << 8 | u32(buf[j + 2]) << 16 | u32(buf[j + 3]) << 24
j += 4
m[i] = a
c = a + c + ctx.sum[i]
ctx.sum[i] = c
c = c < a ? 1 : 0
}
compress(ctx.hash[:], m[:])
ctx.len[0] += bits
if ctx.len[0] < bits {
ctx.len[1] += 1
}
}
compress :: proc(h, m: []u32) {
key, u, v, w, s: [8]u32
copy(u[:], h)
copy(v[:], m)
for i := 0; i < 8; i += 2 {
w[0] = u[0] ~ v[0]
w[1] = u[1] ~ v[1]
w[2] = u[2] ~ v[2]
w[3] = u[3] ~ v[3]
w[4] = u[4] ~ v[4]
w[5] = u[5] ~ v[5]
w[6] = u[6] ~ v[6]
w[7] = u[7] ~ v[7]
key[0] = (w[0] & 0x000000ff) | (w[2] & 0x000000ff) << 8 | (w[4] & 0x000000ff) << 16 | (w[6] & 0x000000ff) << 24
key[1] = (w[0] & 0x0000ff00) >> 8 | (w[2] & 0x0000ff00) | (w[4] & 0x0000ff00) << 8 | (w[6] & 0x0000ff00) << 16
key[2] = (w[0] & 0x00ff0000) >> 16 | (w[2] & 0x00ff0000) >> 8 | (w[4] & 0x00ff0000) | (w[6] & 0x00ff0000) << 8
key[3] = (w[0] & 0xff000000) >> 24 | (w[2] & 0xff000000) >> 16 | (w[4] & 0xff000000) >> 8 | (w[6] & 0xff000000)
key[4] = (w[1] & 0x000000ff) | (w[3] & 0x000000ff) << 8 | (w[5] & 0x000000ff) << 16 | (w[7] & 0x000000ff) << 24
key[5] = (w[1] & 0x0000ff00) >> 8 | (w[3] & 0x0000ff00) | (w[5] & 0x0000ff00) << 8 | (w[7] & 0x0000ff00) << 16
key[6] = (w[1] & 0x00ff0000) >> 16 | (w[3] & 0x00ff0000) >> 8 | (w[5] & 0x00ff0000) | (w[7] & 0x00ff0000) << 8
key[7] = (w[1] & 0xff000000) >> 24 | (w[3] & 0xff000000) >> 16 | (w[5] & 0xff000000) >> 8 | (w[7] & 0xff000000)
r := h[i]
l := h[i + 1]
t: u32
l, r, t = ENCRYPT(l, r, 0, key[:])
s[i] = r
s[i + 1] = l
if i == 6 {
break
}
l = u[0] ~ u[2]
r = u[1] ~ u[3]
u[0] = u[2]
u[1] = u[3]
u[2] = u[4]
u[3] = u[5]
u[4] = u[6]
u[5] = u[7]
u[6] = l
u[7] = r
if i == 2 {
u[0] ~= 0xff00ff00
u[1] ~= 0xff00ff00
u[2] ~= 0x00ff00ff
u[3] ~= 0x00ff00ff
u[4] ~= 0x00ffff00
u[5] ~= 0xff0000ff
u[6] ~= 0x000000ff
u[7] ~= 0xff00ffff
}
l = v[0]
r = v[2]
v[0] = v[4]
v[2] = v[6]
v[4] = l ~ r
v[6] = v[0] ~ r
l = v[1]
r = v[3]
v[1] = v[5]
v[3] = v[7]
v[5] = l ~ r
v[7] = v[1] ~ r
}
u[0] = m[0] ~ s[6]
u[1] = m[1] ~ s[7]
u[2] = m[2] ~ (s[0] << 16) ~ (s[0] >> 16) ~ (s[0] & 0xffff) ~
(s[1] & 0xffff) ~ (s[1] >> 16) ~ (s[2] << 16) ~ s[6] ~ (s[6] << 16) ~
(s[7] & 0xffff0000) ~ (s[7] >> 16)
u[3] = m[3] ~ (s[0] & 0xffff) ~ (s[0] << 16) ~ (s[1] & 0xffff) ~
(s[1] << 16) ~ (s[1] >> 16) ~ (s[2] << 16) ~ (s[2] >> 16) ~
(s[3] << 16) ~ s[6] ~ (s[6] << 16) ~ (s[6] >> 16) ~ (s[7] & 0xffff) ~
(s[7] << 16) ~ (s[7] >> 16)
u[4] = m[4] ~
(s[0] & 0xffff0000) ~ (s[0] << 16) ~ (s[0] >> 16) ~
(s[1] & 0xffff0000) ~ (s[1] >> 16) ~ (s[2] << 16) ~ (s[2] >> 16) ~
(s[3] << 16) ~ (s[3] >> 16) ~ (s[4] << 16) ~ (s[6] << 16) ~
(s[6] >> 16) ~(s[7] & 0xffff) ~ (s[7] << 16) ~ (s[7] >> 16)
u[5] = m[5] ~ (s[0] << 16) ~ (s[0] >> 16) ~ (s[0] & 0xffff0000) ~
(s[1] & 0xffff) ~ s[2] ~ (s[2] >> 16) ~ (s[3] << 16) ~ (s[3] >> 16) ~
(s[4] << 16) ~ (s[4] >> 16) ~ (s[5] << 16) ~ (s[6] << 16) ~
(s[6] >> 16) ~ (s[7] & 0xffff0000) ~ (s[7] << 16) ~ (s[7] >> 16)
u[6] = m[6] ~ s[0] ~ (s[1] >> 16) ~ (s[2] << 16) ~ s[3] ~ (s[3] >> 16) ~
(s[4] << 16) ~ (s[4] >> 16) ~ (s[5] << 16) ~ (s[5] >> 16) ~ s[6] ~
(s[6] << 16) ~ (s[6] >> 16) ~ (s[7] << 16)
u[7] = m[7] ~ (s[0] & 0xffff0000) ~ (s[0] << 16) ~ (s[1] & 0xffff) ~
(s[1] << 16) ~ (s[2] >> 16) ~ (s[3] << 16) ~ s[4] ~ (s[4] >> 16) ~
(s[5] << 16) ~ (s[5] >> 16) ~ (s[6] >> 16) ~ (s[7] & 0xffff) ~
(s[7] << 16) ~ (s[7] >> 16)
v[0] = h[0] ~ (u[1] << 16) ~ (u[0] >> 16)
v[1] = h[1] ~ (u[2] << 16) ~ (u[1] >> 16)
v[2] = h[2] ~ (u[3] << 16) ~ (u[2] >> 16)
v[3] = h[3] ~ (u[4] << 16) ~ (u[3] >> 16)
v[4] = h[4] ~ (u[5] << 16) ~ (u[4] >> 16)
v[5] = h[5] ~ (u[6] << 16) ~ (u[5] >> 16)
v[6] = h[6] ~ (u[7] << 16) ~ (u[6] >> 16)
v[7] = h[7] ~ (u[0] & 0xffff0000) ~ (u[0] << 16) ~ (u[7] >> 16) ~ (u[1] & 0xffff0000) ~ (u[1] << 16) ~ (u[6] << 16) ~ (u[7] & 0xffff0000)
h[0] = (v[0] & 0xffff0000) ~ (v[0] << 16) ~ (v[0] >> 16) ~ (v[1] >> 16) ~
(v[1] & 0xffff0000) ~ (v[2] << 16) ~ (v[3] >> 16) ~ (v[4] << 16) ~
(v[5] >> 16) ~ v[5] ~ (v[6] >> 16) ~ (v[7] << 16) ~ (v[7] >> 16) ~
(v[7] & 0xffff)
h[1] = (v[0] << 16) ~ (v[0] >> 16) ~ (v[0] & 0xffff0000) ~ (v[1] & 0xffff) ~
v[2] ~ (v[2] >> 16) ~ (v[3] << 16) ~ (v[4] >> 16) ~ (v[5] << 16) ~
(v[6] << 16) ~ v[6] ~ (v[7] & 0xffff0000) ~ (v[7] >> 16)
h[2] = (v[0] & 0xffff) ~ (v[0] << 16) ~ (v[1] << 16) ~ (v[1] >> 16) ~
(v[1] & 0xffff0000) ~ (v[2] << 16) ~ (v[3] >> 16) ~ v[3] ~ (v[4] << 16) ~
(v[5] >> 16) ~ v[6] ~ (v[6] >> 16) ~ (v[7] & 0xffff) ~ (v[7] << 16) ~
(v[7] >> 16)
h[3] = (v[0] << 16) ~ (v[0] >> 16) ~ (v[0] & 0xffff0000) ~
(v[1] & 0xffff0000) ~ (v[1] >> 16) ~ (v[2] << 16) ~ (v[2] >> 16) ~ v[2] ~
(v[3] << 16) ~ (v[4] >> 16) ~ v[4] ~ (v[5] << 16) ~ (v[6] << 16) ~
(v[7] & 0xffff) ~ (v[7] >> 16)
h[4] = (v[0] >> 16) ~ (v[1] << 16) ~ v[1] ~ (v[2] >> 16) ~ v[2] ~
(v[3] << 16) ~ (v[3] >> 16) ~ v[3] ~ (v[4] << 16) ~ (v[5] >> 16) ~
v[5] ~ (v[6] << 16) ~ (v[6] >> 16) ~ (v[7] << 16)
h[5] = (v[0] << 16) ~ (v[0] & 0xffff0000) ~ (v[1] << 16) ~ (v[1] >> 16) ~
(v[1] & 0xffff0000) ~ (v[2] << 16) ~ v[2] ~ (v[3] >> 16) ~ v[3] ~
(v[4] << 16) ~ (v[4] >> 16) ~ v[4] ~ (v[5] << 16) ~ (v[6] << 16) ~
(v[6] >> 16) ~ v[6] ~ (v[7] << 16) ~ (v[7] >> 16) ~ (v[7] & 0xffff0000)
h[6] = v[0] ~ v[2] ~ (v[2] >> 16) ~ v[3] ~ (v[3] << 16) ~ v[4] ~
(v[4] >> 16) ~ (v[5] << 16) ~ (v[5] >> 16) ~ v[5] ~ (v[6] << 16) ~
(v[6] >> 16) ~ v[6] ~ (v[7] << 16) ~ v[7]
h[7] = v[0] ~ (v[0] >> 16) ~ (v[1] << 16) ~ (v[1] >> 16) ~ (v[2] << 16) ~
(v[3] >> 16) ~ v[3] ~ (v[4] << 16) ~ v[4] ~ (v[5] >> 16) ~ v[5] ~
(v[6] << 16) ~ (v[6] >> 16) ~ (v[7] << 16) ~ v[7]
}

View File

@@ -1,653 +0,0 @@
package groestl
/*
Copyright 2021 zhibog
Made available under the BSD-3 license.
List of contributors:
zhibog, dotbmp: Initial implementation.
Implementation of the GROESTL hashing algorithm, as defined in <http://www.groestl.info/Groestl.zip>
*/
import "core:os"
import "core:io"
/*
High level API
*/
DIGEST_SIZE_224 :: 28
DIGEST_SIZE_256 :: 32
DIGEST_SIZE_384 :: 48
DIGEST_SIZE_512 :: 64
// hash_string_224 will hash the given input and return the
// computed hash
hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
return hash_bytes_224(transmute([]byte)(data))
}
// hash_bytes_224 will hash the given input and return the
// computed hash
hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
hash: [DIGEST_SIZE_224]byte
ctx: Groestl_Context
ctx.hashbitlen = 224
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_224 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_224 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
ctx: Groestl_Context
ctx.hashbitlen = 224
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_224 will read the stream in chunks and compute a
// hash from its contents
hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
hash: [DIGEST_SIZE_224]byte
ctx: Groestl_Context
ctx.hashbitlen = 224
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_224 will read the file provided by the given handle
// and compute a hash
hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
if !load_at_once {
return hash_stream_224(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_224(buf[:]), ok
}
}
return [DIGEST_SIZE_224]byte{}, false
}
hash_224 :: proc {
hash_stream_224,
hash_file_224,
hash_bytes_224,
hash_string_224,
hash_bytes_to_buffer_224,
hash_string_to_buffer_224,
}
// hash_string_256 will hash the given input and return the
// computed hash
hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
hash: [DIGEST_SIZE_256]byte
ctx: Groestl_Context
ctx.hashbitlen = 256
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_256 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_256 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
ctx: Groestl_Context
ctx.hashbitlen = 256
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
hash: [DIGEST_SIZE_256]byte
ctx: Groestl_Context
ctx.hashbitlen = 256
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_256 will read the file provided by the given handle
// and compute a hash
hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_256(buf[:]), ok
}
}
return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
hash_stream_256,
hash_file_256,
hash_bytes_256,
hash_string_256,
hash_bytes_to_buffer_256,
hash_string_to_buffer_256,
}
// hash_string_384 will hash the given input and return the
// computed hash
hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
return hash_bytes_384(transmute([]byte)(data))
}
// hash_bytes_384 will hash the given input and return the
// computed hash
hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
hash: [DIGEST_SIZE_384]byte
ctx: Groestl_Context
ctx.hashbitlen = 384
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_384 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_384 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
ctx: Groestl_Context
ctx.hashbitlen = 384
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_384 will read the stream in chunks and compute a
// hash from its contents
hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
hash: [DIGEST_SIZE_384]byte
ctx: Groestl_Context
ctx.hashbitlen = 384
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_384 will read the file provided by the given handle
// and compute a hash
hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
if !load_at_once {
return hash_stream_384(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_384(buf[:]), ok
}
}
return [DIGEST_SIZE_384]byte{}, false
}
hash_384 :: proc {
hash_stream_384,
hash_file_384,
hash_bytes_384,
hash_string_384,
hash_bytes_to_buffer_384,
hash_string_to_buffer_384,
}
// hash_string_512 will hash the given input and return the
// computed hash
hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
return hash_bytes_512(transmute([]byte)(data))
}
// hash_bytes_512 will hash the given input and return the
// computed hash
hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
hash: [DIGEST_SIZE_512]byte
ctx: Groestl_Context
ctx.hashbitlen = 512
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_512 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_512 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
ctx: Groestl_Context
ctx.hashbitlen = 512
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_512 will read the stream in chunks and compute a
// hash from its contents
hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
hash: [DIGEST_SIZE_512]byte
ctx: Groestl_Context
ctx.hashbitlen = 512
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_512 will read the file provided by the given handle
// and compute a hash
hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
if !load_at_once {
return hash_stream_512(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_512(buf[:]), ok
}
}
return [DIGEST_SIZE_512]byte{}, false
}
hash_512 :: proc {
hash_stream_512,
hash_file_512,
hash_bytes_512,
hash_string_512,
hash_bytes_to_buffer_512,
hash_string_to_buffer_512,
}
/*
Low level API
*/
init :: proc(ctx: ^Groestl_Context) {
assert(ctx.hashbitlen == 224 || ctx.hashbitlen == 256 || ctx.hashbitlen == 384 || ctx.hashbitlen == 512, "hashbitlen must be set to 224, 256, 384 or 512")
if ctx.hashbitlen <= 256 {
ctx.rounds = 10
ctx.columns = 8
ctx.statesize = 64
} else {
ctx.rounds = 14
ctx.columns = 16
ctx.statesize = 128
}
for i := 8 - size_of(i32); i < 8; i += 1 {
ctx.chaining[i][ctx.columns - 1] = byte(ctx.hashbitlen >> (8 * (7 - uint(i))))
}
}
update :: proc(ctx: ^Groestl_Context, data: []byte) {
databitlen := len(data) * 8
msglen := databitlen / 8
rem := databitlen % 8
i: int
assert(ctx.bits_in_last_byte == 0)
if ctx.buf_ptr != 0 {
for i = 0; ctx.buf_ptr < ctx.statesize && i < msglen; i, ctx.buf_ptr = i + 1, ctx.buf_ptr + 1 {
ctx.buffer[ctx.buf_ptr] = data[i]
}
if ctx.buf_ptr < ctx.statesize {
if rem != 0 {
ctx.bits_in_last_byte = rem
ctx.buffer[ctx.buf_ptr] = data[i]
ctx.buf_ptr += 1
}
return
}
ctx.buf_ptr = 0
transform(ctx, ctx.buffer[:], u32(ctx.statesize))
}
transform(ctx, data[i:], u32(msglen - i))
i += ((msglen - i) / ctx.statesize) * ctx.statesize
for i < msglen {
ctx.buffer[ctx.buf_ptr] = data[i]
i, ctx.buf_ptr = i + 1, ctx.buf_ptr + 1
}
if rem != 0 {
ctx.bits_in_last_byte = rem
ctx.buffer[ctx.buf_ptr] = data[i]
ctx.buf_ptr += 1
}
}
final :: proc(ctx: ^Groestl_Context, hash: []byte) {
hashbytelen := ctx.hashbitlen / 8
if ctx.bits_in_last_byte != 0 {
ctx.buffer[ctx.buf_ptr - 1] &= ((1 << uint(ctx.bits_in_last_byte)) - 1) << (8 - uint(ctx.bits_in_last_byte))
ctx.buffer[ctx.buf_ptr - 1] ~= 0x1 << (7 - uint(ctx.bits_in_last_byte))
} else {
ctx.buffer[ctx.buf_ptr] = 0x80
ctx.buf_ptr += 1
}
if ctx.buf_ptr > ctx.statesize - 8 {
for ctx.buf_ptr < ctx.statesize {
ctx.buffer[ctx.buf_ptr] = 0
ctx.buf_ptr += 1
}
transform(ctx, ctx.buffer[:], u32(ctx.statesize))
ctx.buf_ptr = 0
}
for ctx.buf_ptr < ctx.statesize - 8 {
ctx.buffer[ctx.buf_ptr] = 0
ctx.buf_ptr += 1
}
ctx.block_counter += 1
ctx.buf_ptr = ctx.statesize
for ctx.buf_ptr > ctx.statesize - 8 {
ctx.buf_ptr -= 1
ctx.buffer[ctx.buf_ptr] = byte(ctx.block_counter)
ctx.block_counter >>= 8
}
transform(ctx, ctx.buffer[:], u32(ctx.statesize))
output_transformation(ctx)
for i, j := ctx.statesize - hashbytelen , 0; i < ctx.statesize; i, j = i + 1, j + 1 {
hash[j] = ctx.chaining[i % 8][i / 8]
}
}
/*
GROESTL implementation
*/
SBOX := [256]byte {
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16,
}
SHIFT := [2][2][8]int {
{{0, 1, 2, 3, 4, 5, 6, 7}, {1, 3, 5, 7, 0, 2, 4, 6}},
{{0, 1, 2, 3, 4, 5, 6, 11}, {1, 3, 5, 11, 0, 2, 4, 6}},
}
Groestl_Context :: struct {
chaining: [8][16]byte,
block_counter: u64,
hashbitlen: int,
buffer: [128]byte,
buf_ptr: int,
bits_in_last_byte: int,
columns: int,
rounds: int,
statesize: int,
}
Groestl_Variant :: enum {
P512 = 0,
Q512 = 1,
P1024 = 2,
Q1024 = 3,
}
MUL2 :: #force_inline proc "contextless"(b: byte) -> byte {
return (b >> 7) != 0 ? (b << 1) ~ 0x1b : (b << 1)
}
MUL3 :: #force_inline proc "contextless"(b: byte) -> byte {
return MUL2(b) ~ b
}
MUL4 :: #force_inline proc "contextless"(b: byte) -> byte {
return MUL2(MUL2(b))
}
MUL5 :: #force_inline proc "contextless"(b: byte) -> byte {
return MUL4(b) ~ b
}
MUL6 :: #force_inline proc "contextless"(b: byte) -> byte {
return MUL4(b) ~ MUL2(b)
}
MUL7 :: #force_inline proc "contextless"(b: byte) -> byte {
return MUL4(b) ~ MUL2(b) ~ b
}
sub_bytes :: #force_inline proc (x: [][16]byte, columns: int) {
for i := 0; i < 8; i += 1 {
for j := 0; j < columns; j += 1 {
x[i][j] = SBOX[x[i][j]]
}
}
}
shift_bytes :: #force_inline proc (x: [][16]byte, columns: int, v: Groestl_Variant) {
temp: [16]byte
R := &SHIFT[int(v) / 2][int(v) & 1]
for i := 0; i < 8; i += 1 {
for j := 0; j < columns; j += 1 {
temp[j] = x[i][(j + R[i]) % columns]
}
for j := 0; j < columns; j += 1 {
x[i][j] = temp[j]
}
}
}
mix_bytes :: #force_inline proc (x: [][16]byte, columns: int) {
temp: [8]byte
for i := 0; i < columns; i += 1 {
for j := 0; j < 8; j += 1 {
temp[j] = MUL2(x[(j + 0) % 8][i]) ~
MUL2(x[(j + 1) % 8][i]) ~
MUL3(x[(j + 2) % 8][i]) ~
MUL4(x[(j + 3) % 8][i]) ~
MUL5(x[(j + 4) % 8][i]) ~
MUL3(x[(j + 5) % 8][i]) ~
MUL5(x[(j + 6) % 8][i]) ~
MUL7(x[(j + 7) % 8][i])
}
for j := 0; j < 8; j += 1 {
x[j][i] = temp[j]
}
}
}
p :: #force_inline proc (ctx: ^Groestl_Context, x: [][16]byte) {
v := ctx.columns == 8 ? Groestl_Variant.P512 : Groestl_Variant.P1024
for i := 0; i < ctx.rounds; i += 1 {
add_roundconstant(x, ctx.columns, byte(i), v)
sub_bytes(x, ctx.columns)
shift_bytes(x, ctx.columns, v)
mix_bytes(x, ctx.columns)
}
}
q :: #force_inline proc (ctx: ^Groestl_Context, x: [][16]byte) {
v := ctx.columns == 8 ? Groestl_Variant.Q512 : Groestl_Variant.Q1024
for i := 0; i < ctx.rounds; i += 1 {
add_roundconstant(x, ctx.columns, byte(i), v)
sub_bytes(x, ctx.columns)
shift_bytes(x, ctx.columns, v)
mix_bytes(x, ctx.columns)
}
}
transform :: proc(ctx: ^Groestl_Context, input: []byte, msglen: u32) {
tmp1, tmp2: [8][16]byte
input, msglen := input, msglen
for msglen >= u32(ctx.statesize) {
for i := 0; i < 8; i += 1 {
for j := 0; j < ctx.columns; j += 1 {
tmp1[i][j] = ctx.chaining[i][j] ~ input[j * 8 + i]
tmp2[i][j] = input[j * 8 + i]
}
}
p(ctx, tmp1[:])
q(ctx, tmp2[:])
for i := 0; i < 8; i += 1 {
for j := 0; j < ctx.columns; j += 1 {
ctx.chaining[i][j] ~= tmp1[i][j] ~ tmp2[i][j]
}
}
ctx.block_counter += 1
msglen -= u32(ctx.statesize)
input = input[ctx.statesize:]
}
}
output_transformation :: proc(ctx: ^Groestl_Context) {
temp: [8][16]byte
for i := 0; i < 8; i += 1 {
for j := 0; j < ctx.columns; j += 1 {
temp[i][j] = ctx.chaining[i][j]
}
}
p(ctx, temp[:])
for i := 0; i < 8; i += 1 {
for j := 0; j < ctx.columns; j += 1 {
ctx.chaining[i][j] ~= temp[i][j]
}
}
}
add_roundconstant :: proc(x: [][16]byte, columns: int, round: byte, v: Groestl_Variant) {
switch (i32(v) & 1) {
case 0:
for i := 0; i < columns; i += 1 {
x[0][i] ~= byte(i << 4) ~ round
}
case 1:
for i := 0; i < columns; i += 1 {
for j := 0; j < 7; j += 1 {
x[j][i] ~= 0xff
}
}
for i := 0; i < columns; i += 1 {
x[7][i] ~= byte(i << 4) ~ 0xff ~ round
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,584 +0,0 @@
package jh
/*
Copyright 2021 zhibog
Made available under the BSD-3 license.
List of contributors:
zhibog, dotbmp: Initial implementation.
Implementation of the JH hashing algorithm, as defined in <https://www3.ntu.edu.sg/home/wuhj/research/jh/index.html>
*/
import "core:os"
import "core:io"
/*
High level API
*/
DIGEST_SIZE_224 :: 28
DIGEST_SIZE_256 :: 32
DIGEST_SIZE_384 :: 48
DIGEST_SIZE_512 :: 64
// hash_string_224 will hash the given input and return the
// computed hash
hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
return hash_bytes_224(transmute([]byte)(data))
}
// hash_bytes_224 will hash the given input and return the
// computed hash
hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
hash: [DIGEST_SIZE_224]byte
ctx: Jh_Context
ctx.hashbitlen = 224
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_224 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_224 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
ctx: Jh_Context
ctx.hashbitlen = 224
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_224 will read the stream in chunks and compute a
// hash from its contents
hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
hash: [DIGEST_SIZE_224]byte
ctx: Jh_Context
ctx.hashbitlen = 224
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_224 will read the file provided by the given handle
// and compute a hash
hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
if !load_at_once {
return hash_stream_224(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_224(buf[:]), ok
}
}
return [DIGEST_SIZE_224]byte{}, false
}
hash_224 :: proc {
hash_stream_224,
hash_file_224,
hash_bytes_224,
hash_string_224,
hash_bytes_to_buffer_224,
hash_string_to_buffer_224,
}
// hash_string_256 will hash the given input and return the
// computed hash
hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
hash: [DIGEST_SIZE_256]byte
ctx: Jh_Context
ctx.hashbitlen = 256
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_256 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_256 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
ctx: Jh_Context
ctx.hashbitlen = 256
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
hash: [DIGEST_SIZE_256]byte
ctx: Jh_Context
ctx.hashbitlen = 256
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_256 will read the file provided by the given handle
// and compute a hash
hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_256(buf[:]), ok
}
}
return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
hash_stream_256,
hash_file_256,
hash_bytes_256,
hash_string_256,
hash_bytes_to_buffer_256,
hash_string_to_buffer_256,
}
// hash_string_384 will hash the given input and return the
// computed hash
hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
return hash_bytes_384(transmute([]byte)(data))
}
// hash_bytes_384 will hash the given input and return the
// computed hash
hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
hash: [DIGEST_SIZE_384]byte
ctx: Jh_Context
ctx.hashbitlen = 384
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_384 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_384 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
ctx: Jh_Context
ctx.hashbitlen = 384
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_384 will read the stream in chunks and compute a
// hash from its contents
hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
hash: [DIGEST_SIZE_384]byte
ctx: Jh_Context
ctx.hashbitlen = 384
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_384 will read the file provided by the given handle
// and compute a hash
hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
if !load_at_once {
return hash_stream_384(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_384(buf[:]), ok
}
}
return [DIGEST_SIZE_384]byte{}, false
}
hash_384 :: proc {
hash_stream_384,
hash_file_384,
hash_bytes_384,
hash_string_384,
hash_bytes_to_buffer_384,
hash_string_to_buffer_384,
}
// hash_string_512 will hash the given input and return the
// computed hash
hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
return hash_bytes_512(transmute([]byte)(data))
}
// hash_bytes_512 will hash the given input and return the
// computed hash
hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
hash: [DIGEST_SIZE_512]byte
ctx: Jh_Context
ctx.hashbitlen = 512
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_512 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_512 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
ctx: Jh_Context
ctx.hashbitlen = 512
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_512 will read the stream in chunks and compute a
// hash from its contents
hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
hash: [DIGEST_SIZE_512]byte
ctx: Jh_Context
ctx.hashbitlen = 512
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_512 will read the file provided by the given handle
// and compute a hash
hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
if !load_at_once {
return hash_stream_512(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_512(buf[:]), ok
}
}
return [DIGEST_SIZE_512]byte{}, false
}
hash_512 :: proc {
hash_stream_512,
hash_file_512,
hash_bytes_512,
hash_string_512,
hash_bytes_to_buffer_512,
hash_string_to_buffer_512,
}
/*
Low level API
*/
init :: proc(ctx: ^Jh_Context) {
assert(ctx.hashbitlen == 224 || ctx.hashbitlen == 256 || ctx.hashbitlen == 384 || ctx.hashbitlen == 512, "hashbitlen must be set to 224, 256, 384 or 512")
ctx.H[1] = byte(ctx.hashbitlen) & 0xff
ctx.H[0] = byte(ctx.hashbitlen >> 8) & 0xff
F8(ctx)
}
update :: proc(ctx: ^Jh_Context, data: []byte) {
databitlen := u64(len(data)) * 8
ctx.databitlen += databitlen
i := u64(0)
if (ctx.buffer_size > 0) && ((ctx.buffer_size + databitlen) < 512) {
if (databitlen & 7) == 0 {
copy(ctx.buffer[ctx.buffer_size >> 3:], data[:64 - (ctx.buffer_size >> 3)])
} else {
copy(ctx.buffer[ctx.buffer_size >> 3:], data[:64 - (ctx.buffer_size >> 3) + 1])
}
ctx.buffer_size += databitlen
databitlen = 0
}
if (ctx.buffer_size > 0 ) && ((ctx.buffer_size + databitlen) >= 512) {
copy(ctx.buffer[ctx.buffer_size >> 3:], data[:64 - (ctx.buffer_size >> 3)])
i = 64 - (ctx.buffer_size >> 3)
databitlen = databitlen - (512 - ctx.buffer_size)
F8(ctx)
ctx.buffer_size = 0
}
for databitlen >= 512 {
copy(ctx.buffer[:], data[i:i + 64])
F8(ctx)
i += 64
databitlen -= 512
}
if databitlen > 0 {
if (databitlen & 7) == 0 {
copy(ctx.buffer[:], data[i:i + ((databitlen & 0x1ff) >> 3)])
} else {
copy(ctx.buffer[:], data[i:i + ((databitlen & 0x1ff) >> 3) + 1])
}
ctx.buffer_size = databitlen
}
}
final :: proc(ctx: ^Jh_Context, hash: []byte) {
if ctx.databitlen & 0x1ff == 0 {
for i := 0; i < 64; i += 1 {
ctx.buffer[i] = 0
}
ctx.buffer[0] = 0x80
ctx.buffer[63] = byte(ctx.databitlen) & 0xff
ctx.buffer[62] = byte(ctx.databitlen >> 8) & 0xff
ctx.buffer[61] = byte(ctx.databitlen >> 16) & 0xff
ctx.buffer[60] = byte(ctx.databitlen >> 24) & 0xff
ctx.buffer[59] = byte(ctx.databitlen >> 32) & 0xff
ctx.buffer[58] = byte(ctx.databitlen >> 40) & 0xff
ctx.buffer[57] = byte(ctx.databitlen >> 48) & 0xff
ctx.buffer[56] = byte(ctx.databitlen >> 56) & 0xff
F8(ctx)
} else {
if ctx.buffer_size & 7 == 0 {
for i := (ctx.databitlen & 0x1ff) >> 3; i < 64; i += 1 {
ctx.buffer[i] = 0
}
} else {
for i := ((ctx.databitlen & 0x1ff) >> 3) + 1; i < 64; i += 1 {
ctx.buffer[i] = 0
}
}
ctx.buffer[(ctx.databitlen & 0x1ff) >> 3] |= 1 << (7 - (ctx.databitlen & 7))
F8(ctx)
for i := 0; i < 64; i += 1 {
ctx.buffer[i] = 0
}
ctx.buffer[63] = byte(ctx.databitlen) & 0xff
ctx.buffer[62] = byte(ctx.databitlen >> 8) & 0xff
ctx.buffer[61] = byte(ctx.databitlen >> 16) & 0xff
ctx.buffer[60] = byte(ctx.databitlen >> 24) & 0xff
ctx.buffer[59] = byte(ctx.databitlen >> 32) & 0xff
ctx.buffer[58] = byte(ctx.databitlen >> 40) & 0xff
ctx.buffer[57] = byte(ctx.databitlen >> 48) & 0xff
ctx.buffer[56] = byte(ctx.databitlen >> 56) & 0xff
F8(ctx)
}
switch ctx.hashbitlen {
case 224: copy(hash[:], ctx.H[100:128])
case 256: copy(hash[:], ctx.H[96:128])
case 384: copy(hash[:], ctx.H[80:128])
case 512: copy(hash[:], ctx.H[64:128])
}
}
/*
JH implementation
*/
ROUNDCONSTANT_ZERO := [64]byte {
0x6, 0xa, 0x0, 0x9, 0xe, 0x6, 0x6, 0x7,
0xf, 0x3, 0xb, 0xc, 0xc, 0x9, 0x0, 0x8,
0xb, 0x2, 0xf, 0xb, 0x1, 0x3, 0x6, 0x6,
0xe, 0xa, 0x9, 0x5, 0x7, 0xd, 0x3, 0xe,
0x3, 0xa, 0xd, 0xe, 0xc, 0x1, 0x7, 0x5,
0x1, 0x2, 0x7, 0x7, 0x5, 0x0, 0x9, 0x9,
0xd, 0xa, 0x2, 0xf, 0x5, 0x9, 0x0, 0xb,
0x0, 0x6, 0x6, 0x7, 0x3, 0x2, 0x2, 0xa,
}
SBOX := [2][16]byte {
{9, 0, 4, 11, 13, 12, 3, 15, 1, 10, 2, 6, 7, 5, 8, 14},
{3, 12, 6, 13, 5, 7, 1, 9, 15, 2, 0, 4, 11, 10, 14, 8},
}
Jh_Context :: struct {
hashbitlen: int,
databitlen: u64,
buffer_size: u64,
H: [128]byte,
A: [256]byte,
roundconstant: [64]byte,
buffer: [64]byte,
}
E8_finaldegroup :: proc(ctx: ^Jh_Context) {
t0,t1,t2,t3: byte
tem: [256]byte
for i := 0; i < 128; i += 1 {
tem[i] = ctx.A[i << 1]
tem[i + 128] = ctx.A[(i << 1) + 1]
}
for i := 0; i < 128; i += 1 {
ctx.H[i] = 0
}
for i := 0; i < 256; i += 1 {
t0 = (tem[i] >> 3) & 1
t1 = (tem[i] >> 2) & 1
t2 = (tem[i] >> 1) & 1
t3 = (tem[i] >> 0) & 1
ctx.H[uint(i) >> 3] |= t0 << (7 - (uint(i) & 7))
ctx.H[(uint(i) + 256) >> 3] |= t1 << (7 - (uint(i) & 7))
ctx.H[(uint(i) + 512) >> 3] |= t2 << (7 - (uint(i) & 7))
ctx.H[(uint(i) + 768) >> 3] |= t3 << (7 - (uint(i) & 7))
}
}
update_roundconstant :: proc(ctx: ^Jh_Context) {
tem: [64]byte
t: byte
for i := 0; i < 64; i += 1 {
tem[i] = SBOX[0][ctx.roundconstant[i]]
}
for i := 0; i < 64; i += 2 {
tem[i + 1] ~= ((tem[i] << 1) ~ (tem[i] >> 3) ~ ((tem[i] >> 2) & 2)) & 0xf
tem[i] ~= ((tem[i + 1] << 1) ~ (tem[i + 1] >> 3) ~ ((tem[i + 1] >> 2) & 2)) & 0xf
}
for i := 0; i < 64; i += 4 {
t = tem[i + 2]
tem[i + 2] = tem[i + 3]
tem[i + 3] = t
}
for i := 0; i < 32; i += 1 {
ctx.roundconstant[i] = tem[i << 1]
ctx.roundconstant[i + 32] = tem[(i << 1) + 1]
}
for i := 32; i < 64; i += 2 {
t = ctx.roundconstant[i]
ctx.roundconstant[i] = ctx.roundconstant[i + 1]
ctx.roundconstant[i + 1] = t
}
}
R8 :: proc(ctx: ^Jh_Context) {
t: byte
tem, roundconstant_expanded: [256]byte
for i := u32(0); i < 256; i += 1 {
roundconstant_expanded[i] = (ctx.roundconstant[i >> 2] >> (3 - (i & 3)) ) & 1
}
for i := 0; i < 256; i += 1 {
tem[i] = SBOX[roundconstant_expanded[i]][ctx.A[i]]
}
for i := 0; i < 256; i += 2 {
tem[i+1] ~= ((tem[i] << 1) ~ (tem[i] >> 3) ~ ((tem[i] >> 2) & 2)) & 0xf
tem[i] ~= ((tem[i + 1] << 1) ~ (tem[i + 1] >> 3) ~ ((tem[i + 1] >> 2) & 2)) & 0xf
}
for i := 0; i < 256; i += 4 {
t = tem[i + 2]
tem[i+2] = tem[i + 3]
tem[i+3] = t
}
for i := 0; i < 128; i += 1 {
ctx.A[i] = tem[i << 1]
ctx.A[i + 128] = tem[(i << 1) + 1]
}
for i := 128; i < 256; i += 2 {
t = ctx.A[i]
ctx.A[i] = ctx.A[i + 1]
ctx.A[i + 1] = t
}
}
E8_initialgroup :: proc(ctx: ^Jh_Context) {
t0, t1, t2, t3: byte
tem: [256]byte
for i := u32(0); i < 256; i += 1 {
t0 = (ctx.H[i >> 3] >> (7 - (i & 7))) & 1
t1 = (ctx.H[(i + 256) >> 3] >> (7 - (i & 7))) & 1
t2 = (ctx.H[(i + 512) >> 3] >> (7 - (i & 7))) & 1
t3 = (ctx.H[(i + 768) >> 3] >> (7 - (i & 7))) & 1
tem[i] = (t0 << 3) | (t1 << 2) | (t2 << 1) | (t3 << 0)
}
for i := 0; i < 128; i += 1 {
ctx.A[i << 1] = tem[i]
ctx.A[(i << 1) + 1] = tem[i + 128]
}
}
E8 :: proc(ctx: ^Jh_Context) {
for i := 0; i < 64; i += 1 {
ctx.roundconstant[i] = ROUNDCONSTANT_ZERO[i]
}
E8_initialgroup(ctx)
for i := 0; i < 42; i += 1 {
R8(ctx)
update_roundconstant(ctx)
}
E8_finaldegroup(ctx)
}
F8 :: proc(ctx: ^Jh_Context) {
for i := 0; i < 64; i += 1 {
ctx.H[i] ~= ctx.buffer[i]
}
E8(ctx)
for i := 0; i < 64; i += 1 {
ctx.H[i + 64] ~= ctx.buffer[i]
}
}

View File

@@ -1,374 +0,0 @@
package keccak
/*
Copyright 2021 zhibog
Made available under the BSD-3 license.
List of contributors:
zhibog, dotbmp: Initial implementation.
Interface for the Keccak hashing algorithm.
This is done because the padding in the SHA3 standard was changed by the NIST, resulting in a different output.
*/
import "core:os"
import "core:io"
import "../_sha3"
/*
High level API
*/
DIGEST_SIZE_224 :: 28
DIGEST_SIZE_256 :: 32
DIGEST_SIZE_384 :: 48
DIGEST_SIZE_512 :: 64
// hash_string_224 will hash the given input and return the
// computed hash
hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
return hash_bytes_224(transmute([]byte)(data))
}
// hash_bytes_224 will hash the given input and return the
// computed hash
hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
hash: [DIGEST_SIZE_224]byte
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_224
ctx.is_keccak = true
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_224 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_224 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_224
ctx.is_keccak = true
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.final(&ctx, hash)
}
// hash_stream_224 will read the stream in chunks and compute a
// hash from its contents
hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
hash: [DIGEST_SIZE_224]byte
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_224
ctx.is_keccak = true
_sha3.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
_sha3.update(&ctx, buf[:read])
}
}
_sha3.final(&ctx, hash[:])
return hash, true
}
// hash_file_224 will read the file provided by the given handle
// and compute a hash
hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
if !load_at_once {
return hash_stream_224(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_224(buf[:]), ok
}
}
return [DIGEST_SIZE_224]byte{}, false
}
hash_224 :: proc {
hash_stream_224,
hash_file_224,
hash_bytes_224,
hash_string_224,
hash_bytes_to_buffer_224,
hash_string_to_buffer_224,
}
// hash_string_256 will hash the given input and return the
// computed hash
hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
hash: [DIGEST_SIZE_256]byte
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_256
ctx.is_keccak = true
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_256 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_256 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_256
ctx.is_keccak = true
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.final(&ctx, hash)
}
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
hash: [DIGEST_SIZE_256]byte
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_256
ctx.is_keccak = true
_sha3.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
_sha3.update(&ctx, buf[:read])
}
}
_sha3.final(&ctx, hash[:])
return hash, true
}
// hash_file_256 will read the file provided by the given handle
// and compute a hash
hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_256(buf[:]), ok
}
}
return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
hash_stream_256,
hash_file_256,
hash_bytes_256,
hash_string_256,
hash_bytes_to_buffer_256,
hash_string_to_buffer_256,
}
// hash_string_384 will hash the given input and return the
// computed hash
hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
return hash_bytes_384(transmute([]byte)(data))
}
// hash_bytes_384 will hash the given input and return the
// computed hash
hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
hash: [DIGEST_SIZE_384]byte
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_384
ctx.is_keccak = true
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_384 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_384 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_384
ctx.is_keccak = true
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.final(&ctx, hash)
}
// hash_stream_384 will read the stream in chunks and compute a
// hash from its contents
hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
hash: [DIGEST_SIZE_384]byte
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_384
ctx.is_keccak = true
_sha3.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
_sha3.update(&ctx, buf[:read])
}
}
_sha3.final(&ctx, hash[:])
return hash, true
}
// hash_file_384 will read the file provided by the given handle
// and compute a hash
hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
if !load_at_once {
return hash_stream_384(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_384(buf[:]), ok
}
}
return [DIGEST_SIZE_384]byte{}, false
}
hash_384 :: proc {
hash_stream_384,
hash_file_384,
hash_bytes_384,
hash_string_384,
hash_bytes_to_buffer_384,
hash_string_to_buffer_384,
}
// hash_string_512 will hash the given input and return the
// computed hash
hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
return hash_bytes_512(transmute([]byte)(data))
}
// hash_bytes_512 will hash the given input and return the
// computed hash
hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
hash: [DIGEST_SIZE_512]byte
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_512
ctx.is_keccak = true
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_512 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_512 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_512
ctx.is_keccak = true
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.final(&ctx, hash)
}
// hash_stream_512 will read the stream in chunks and compute a
// hash from its contents
hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
hash: [DIGEST_SIZE_512]byte
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_512
ctx.is_keccak = true
_sha3.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
_sha3.update(&ctx, buf[:read])
}
}
_sha3.final(&ctx, hash[:])
return hash, true
}
// hash_file_512 will read the file provided by the given handle
// and compute a hash
hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
if !load_at_once {
return hash_stream_512(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_512(buf[:]), ok
}
}
return [DIGEST_SIZE_512]byte{}, false
}
hash_512 :: proc {
hash_stream_512,
hash_file_512,
hash_bytes_512,
hash_string_512,
hash_bytes_to_buffer_512,
hash_string_to_buffer_512,
}
/*
Low level API
*/
Keccak_Context :: _sha3.Sha3_Context
init :: proc(ctx: ^_sha3.Sha3_Context) {
ctx.is_keccak = true
_sha3.init(ctx)
}
update :: proc "contextless" (ctx: ^_sha3.Sha3_Context, data: []byte) {
_sha3.update(ctx, data)
}
final :: proc "contextless" (ctx: ^_sha3.Sha3_Context, hash: []byte) {
_sha3.final(ctx, hash)
}

View File

@@ -0,0 +1,10 @@
# crypto/legacy
These are algorithms that are shipped solely for the purpose of
interoperability with legacy systems. The use of these packages in
any other capacity is discouraged, especially those that are known
to be broken.
- keccak - The draft version of the algorithm that became SHA-3
- MD5 - Broken (https://eprint.iacr.org/2005/075)
- SHA-1 - Broken (https://eprint.iacr.org/2017/190)

View File

@@ -0,0 +1,377 @@
package keccak
/*
Copyright 2021 zhibog
Made available under the BSD-3 license.
List of contributors:
zhibog, dotbmp: Initial implementation.
Interface for the Keccak hashing algorithm.
This is done because the padding in the SHA3 standard was changed by the NIST, resulting in a different output.
*/
import "core:io"
import "core:os"
import "../../_sha3"
/*
High level API
*/
DIGEST_SIZE_224 :: 28
DIGEST_SIZE_256 :: 32
DIGEST_SIZE_384 :: 48
DIGEST_SIZE_512 :: 64
// hash_string_224 will hash the given input and return the
// computed hash
hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
return hash_bytes_224(transmute([]byte)(data))
}
// hash_bytes_224 will hash the given input and return the
// computed hash
hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
hash: [DIGEST_SIZE_224]byte
ctx: Context
ctx.mdlen = DIGEST_SIZE_224
ctx.is_keccak = true
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_224 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_224 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
ctx: Context
ctx.mdlen = DIGEST_SIZE_224
ctx.is_keccak = true
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_224 will read the stream in chunks and compute a
// hash from its contents
hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
hash: [DIGEST_SIZE_224]byte
ctx: Context
ctx.mdlen = DIGEST_SIZE_224
ctx.is_keccak = true
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_224 will read the file provided by the given handle
// and compute a hash
hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
if !load_at_once {
return hash_stream_224(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_224(buf[:]), ok
}
}
return [DIGEST_SIZE_224]byte{}, false
}
hash_224 :: proc {
hash_stream_224,
hash_file_224,
hash_bytes_224,
hash_string_224,
hash_bytes_to_buffer_224,
hash_string_to_buffer_224,
}
// hash_string_256 will hash the given input and return the
// computed hash
hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
hash: [DIGEST_SIZE_256]byte
ctx: Context
ctx.mdlen = DIGEST_SIZE_256
ctx.is_keccak = true
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_256 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_256 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
ctx: Context
ctx.mdlen = DIGEST_SIZE_256
ctx.is_keccak = true
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
hash: [DIGEST_SIZE_256]byte
ctx: Context
ctx.mdlen = DIGEST_SIZE_256
ctx.is_keccak = true
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_256 will read the file provided by the given handle
// and compute a hash
hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_256(buf[:]), ok
}
}
return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
hash_stream_256,
hash_file_256,
hash_bytes_256,
hash_string_256,
hash_bytes_to_buffer_256,
hash_string_to_buffer_256,
}
// hash_string_384 will hash the given input and return the
// computed hash
hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
return hash_bytes_384(transmute([]byte)(data))
}
// hash_bytes_384 will hash the given input and return the
// computed hash
hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
hash: [DIGEST_SIZE_384]byte
ctx: Context
ctx.mdlen = DIGEST_SIZE_384
ctx.is_keccak = true
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_384 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_384 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
ctx: Context
ctx.mdlen = DIGEST_SIZE_384
ctx.is_keccak = true
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_384 will read the stream in chunks and compute a
// hash from its contents
hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
hash: [DIGEST_SIZE_384]byte
ctx: Context
ctx.mdlen = DIGEST_SIZE_384
ctx.is_keccak = true
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_384 will read the file provided by the given handle
// and compute a hash
hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
if !load_at_once {
return hash_stream_384(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_384(buf[:]), ok
}
}
return [DIGEST_SIZE_384]byte{}, false
}
hash_384 :: proc {
hash_stream_384,
hash_file_384,
hash_bytes_384,
hash_string_384,
hash_bytes_to_buffer_384,
hash_string_to_buffer_384,
}
// hash_string_512 will hash the given input and return the
// computed hash
hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
return hash_bytes_512(transmute([]byte)(data))
}
// hash_bytes_512 will hash the given input and return the
// computed hash
hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
hash: [DIGEST_SIZE_512]byte
ctx: Context
ctx.mdlen = DIGEST_SIZE_512
ctx.is_keccak = true
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_512 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_512 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
ctx: Context
ctx.mdlen = DIGEST_SIZE_512
ctx.is_keccak = true
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_512 will read the stream in chunks and compute a
// hash from its contents
hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
hash: [DIGEST_SIZE_512]byte
ctx: Context
ctx.mdlen = DIGEST_SIZE_512
ctx.is_keccak = true
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_512 will read the file provided by the given handle
// and compute a hash
hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
if !load_at_once {
return hash_stream_512(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_512(buf[:]), ok
}
}
return [DIGEST_SIZE_512]byte{}, false
}
hash_512 :: proc {
hash_stream_512,
hash_file_512,
hash_bytes_512,
hash_string_512,
hash_bytes_to_buffer_512,
hash_string_to_buffer_512,
}
/*
Low level API
*/
Context :: _sha3.Sha3_Context
init :: proc(ctx: ^Context) {
ctx.is_keccak = true
_sha3.init(ctx)
}
update :: proc(ctx: ^Context, data: []byte) {
_sha3.update(ctx, data)
}
final :: proc(ctx: ^Context, hash: []byte) {
_sha3.final(ctx, hash)
}

View File

@@ -0,0 +1,295 @@
package md5
/*
Copyright 2021 zhibog
Made available under the BSD-3 license.
List of contributors:
zhibog, dotbmp: Initial implementation.
Implementation of the MD5 hashing algorithm, as defined in RFC 1321 <https://datatracker.ietf.org/doc/html/rfc1321>
*/
import "core:encoding/endian"
import "core:io"
import "core:math/bits"
import "core:mem"
import "core:os"
/*
High level API
*/
DIGEST_SIZE :: 16
// hash_string will hash the given input and return the
// computed hash
hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
hash: [DIGEST_SIZE]byte
ctx: Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer :: proc(data, hash: []byte) {
ctx: Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream will read the stream in chunks and compute a
// hash from its contents
hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
hash: [DIGEST_SIZE]byte
ctx: Context
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file will read the file provided by the given handle
// and compute a hash
hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes(buf[:]), ok
}
}
return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
hash_stream,
hash_file,
hash_bytes,
hash_string,
hash_bytes_to_buffer,
hash_string_to_buffer,
}
/*
Low level API
*/
init :: proc(ctx: ^Context) {
ctx.state[0] = 0x67452301
ctx.state[1] = 0xefcdab89
ctx.state[2] = 0x98badcfe
ctx.state[3] = 0x10325476
ctx.bitlen = 0
ctx.datalen = 0
ctx.is_initialized = true
}
update :: proc(ctx: ^Context, data: []byte) {
assert(ctx.is_initialized)
for i := 0; i < len(data); i += 1 {
ctx.data[ctx.datalen] = data[i]
ctx.datalen += 1
if (ctx.datalen == BLOCK_SIZE) {
transform(ctx, ctx.data[:])
ctx.bitlen += 512
ctx.datalen = 0
}
}
}
final :: proc(ctx: ^Context, hash: []byte) {
assert(ctx.is_initialized)
if len(hash) < DIGEST_SIZE {
panic("crypto/md5: invalid destination digest size")
}
i := ctx.datalen
if ctx.datalen < 56 {
ctx.data[i] = 0x80
i += 1
for i < 56 {
ctx.data[i] = 0x00
i += 1
}
} else if ctx.datalen >= 56 {
ctx.data[i] = 0x80
i += 1
for i < BLOCK_SIZE {
ctx.data[i] = 0x00
i += 1
}
transform(ctx, ctx.data[:])
mem.set(&ctx.data, 0, 56)
}
ctx.bitlen += u64(ctx.datalen * 8)
endian.unchecked_put_u64le(ctx.data[56:], ctx.bitlen)
transform(ctx, ctx.data[:])
for i = 0; i < DIGEST_SIZE / 4; i += 1 {
endian.unchecked_put_u32le(hash[i * 4:], ctx.state[i])
}
ctx.is_initialized = false
}
/*
MD5 implementation
*/
BLOCK_SIZE :: 64
Context :: struct {
data: [BLOCK_SIZE]byte,
state: [4]u32,
bitlen: u64,
datalen: u32,
is_initialized: bool,
}
/*
@note(zh): F, G, H and I, as mentioned in the RFC, have been inlined into FF, GG, HH
and II respectively, instead of declaring them separately.
*/
@(private)
FF :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u32 {
return b + bits.rotate_left32(a + ((b & c) | (~b & d)) + m + t, s)
}
@(private)
GG :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u32 {
return b + bits.rotate_left32(a + ((b & d) | (c & ~d)) + m + t, s)
}
@(private)
HH :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u32 {
return b + bits.rotate_left32(a + (b ~ c ~ d) + m + t, s)
}
@(private)
II :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u32 {
return b + bits.rotate_left32(a + (c ~ (b | ~d)) + m + t, s)
}
@(private)
transform :: proc "contextless" (ctx: ^Context, data: []byte) {
m: [DIGEST_SIZE]u32
for i := 0; i < DIGEST_SIZE; i += 1 {
m[i] = endian.unchecked_get_u32le(data[i * 4:])
}
a := ctx.state[0]
b := ctx.state[1]
c := ctx.state[2]
d := ctx.state[3]
a = FF(a, b, c, d, m[0], 7, 0xd76aa478)
d = FF(d, a, b, c, m[1], 12, 0xe8c7b756)
c = FF(c, d, a, b, m[2], 17, 0x242070db)
b = FF(b, c, d, a, m[3], 22, 0xc1bdceee)
a = FF(a, b, c, d, m[4], 7, 0xf57c0faf)
d = FF(d, a, b, c, m[5], 12, 0x4787c62a)
c = FF(c, d, a, b, m[6], 17, 0xa8304613)
b = FF(b, c, d, a, m[7], 22, 0xfd469501)
a = FF(a, b, c, d, m[8], 7, 0x698098d8)
d = FF(d, a, b, c, m[9], 12, 0x8b44f7af)
c = FF(c, d, a, b, m[10], 17, 0xffff5bb1)
b = FF(b, c, d, a, m[11], 22, 0x895cd7be)
a = FF(a, b, c, d, m[12], 7, 0x6b901122)
d = FF(d, a, b, c, m[13], 12, 0xfd987193)
c = FF(c, d, a, b, m[14], 17, 0xa679438e)
b = FF(b, c, d, a, m[15], 22, 0x49b40821)
a = GG(a, b, c, d, m[1], 5, 0xf61e2562)
d = GG(d, a, b, c, m[6], 9, 0xc040b340)
c = GG(c, d, a, b, m[11], 14, 0x265e5a51)
b = GG(b, c, d, a, m[0], 20, 0xe9b6c7aa)
a = GG(a, b, c, d, m[5], 5, 0xd62f105d)
d = GG(d, a, b, c, m[10], 9, 0x02441453)
c = GG(c, d, a, b, m[15], 14, 0xd8a1e681)
b = GG(b, c, d, a, m[4], 20, 0xe7d3fbc8)
a = GG(a, b, c, d, m[9], 5, 0x21e1cde6)
d = GG(d, a, b, c, m[14], 9, 0xc33707d6)
c = GG(c, d, a, b, m[3], 14, 0xf4d50d87)
b = GG(b, c, d, a, m[8], 20, 0x455a14ed)
a = GG(a, b, c, d, m[13], 5, 0xa9e3e905)
d = GG(d, a, b, c, m[2], 9, 0xfcefa3f8)
c = GG(c, d, a, b, m[7], 14, 0x676f02d9)
b = GG(b, c, d, a, m[12], 20, 0x8d2a4c8a)
a = HH(a, b, c, d, m[5], 4, 0xfffa3942)
d = HH(d, a, b, c, m[8], 11, 0x8771f681)
c = HH(c, d, a, b, m[11], 16, 0x6d9d6122)
b = HH(b, c, d, a, m[14], 23, 0xfde5380c)
a = HH(a, b, c, d, m[1], 4, 0xa4beea44)
d = HH(d, a, b, c, m[4], 11, 0x4bdecfa9)
c = HH(c, d, a, b, m[7], 16, 0xf6bb4b60)
b = HH(b, c, d, a, m[10], 23, 0xbebfbc70)
a = HH(a, b, c, d, m[13], 4, 0x289b7ec6)
d = HH(d, a, b, c, m[0], 11, 0xeaa127fa)
c = HH(c, d, a, b, m[3], 16, 0xd4ef3085)
b = HH(b, c, d, a, m[6], 23, 0x04881d05)
a = HH(a, b, c, d, m[9], 4, 0xd9d4d039)
d = HH(d, a, b, c, m[12], 11, 0xe6db99e5)
c = HH(c, d, a, b, m[15], 16, 0x1fa27cf8)
b = HH(b, c, d, a, m[2], 23, 0xc4ac5665)
a = II(a, b, c, d, m[0], 6, 0xf4292244)
d = II(d, a, b, c, m[7], 10, 0x432aff97)
c = II(c, d, a, b, m[14], 15, 0xab9423a7)
b = II(b, c, d, a, m[5], 21, 0xfc93a039)
a = II(a, b, c, d, m[12], 6, 0x655b59c3)
d = II(d, a, b, c, m[3], 10, 0x8f0ccc92)
c = II(c, d, a, b, m[10], 15, 0xffeff47d)
b = II(b, c, d, a, m[1], 21, 0x85845dd1)
a = II(a, b, c, d, m[8], 6, 0x6fa87e4f)
d = II(d, a, b, c, m[15], 10, 0xfe2ce6e0)
c = II(c, d, a, b, m[6], 15, 0xa3014314)
b = II(b, c, d, a, m[13], 21, 0x4e0811a1)
a = II(a, b, c, d, m[4], 6, 0xf7537e82)
d = II(d, a, b, c, m[11], 10, 0xbd3af235)
c = II(c, d, a, b, m[2], 15, 0x2ad7d2bb)
b = II(b, c, d, a, m[9], 21, 0xeb86d391)
ctx.state[0] += a
ctx.state[1] += b
ctx.state[2] += c
ctx.state[3] += d
}

View File

@@ -0,0 +1,252 @@
package sha1
/*
Copyright 2021 zhibog
Made available under the BSD-3 license.
List of contributors:
zhibog, dotbmp: Initial implementation.
Implementation of the SHA1 hashing algorithm, as defined in RFC 3174 <https://datatracker.ietf.org/doc/html/rfc3174>
*/
import "core:encoding/endian"
import "core:io"
import "core:math/bits"
import "core:mem"
import "core:os"
/*
High level API
*/
DIGEST_SIZE :: 20
// hash_string will hash the given input and return the
// computed hash
hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
hash: [DIGEST_SIZE]byte
ctx: Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer :: proc(data, hash: []byte) {
ctx: Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream will read the stream in chunks and compute a
// hash from its contents
hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
hash: [DIGEST_SIZE]byte
ctx: Context
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file will read the file provided by the given handle
// and compute a hash
hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes(buf[:]), ok
}
}
return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
hash_stream,
hash_file,
hash_bytes,
hash_string,
hash_bytes_to_buffer,
hash_string_to_buffer,
}
/*
Low level API
*/
init :: proc(ctx: ^Context) {
ctx.state[0] = 0x67452301
ctx.state[1] = 0xefcdab89
ctx.state[2] = 0x98badcfe
ctx.state[3] = 0x10325476
ctx.state[4] = 0xc3d2e1f0
ctx.k[0] = 0x5a827999
ctx.k[1] = 0x6ed9eba1
ctx.k[2] = 0x8f1bbcdc
ctx.k[3] = 0xca62c1d6
ctx.datalen = 0
ctx.bitlen = 0
ctx.is_initialized = true
}
update :: proc(ctx: ^Context, data: []byte) {
assert(ctx.is_initialized)
for i := 0; i < len(data); i += 1 {
ctx.data[ctx.datalen] = data[i]
ctx.datalen += 1
if (ctx.datalen == BLOCK_SIZE) {
transform(ctx, ctx.data[:])
ctx.bitlen += 512
ctx.datalen = 0
}
}
}
final :: proc(ctx: ^Context, hash: []byte) {
assert(ctx.is_initialized)
if len(hash) < DIGEST_SIZE {
panic("crypto/sha1: invalid destination digest size")
}
i := ctx.datalen
if ctx.datalen < 56 {
ctx.data[i] = 0x80
i += 1
for i < 56 {
ctx.data[i] = 0x00
i += 1
}
} else {
ctx.data[i] = 0x80
i += 1
for i < BLOCK_SIZE {
ctx.data[i] = 0x00
i += 1
}
transform(ctx, ctx.data[:])
mem.set(&ctx.data, 0, 56)
}
ctx.bitlen += u64(ctx.datalen * 8)
endian.unchecked_put_u64be(ctx.data[56:], ctx.bitlen)
transform(ctx, ctx.data[:])
for i = 0; i < DIGEST_SIZE / 4; i += 1 {
endian.unchecked_put_u32be(hash[i * 4:], ctx.state[i])
}
ctx.is_initialized = false
}
/*
SHA1 implementation
*/
BLOCK_SIZE :: 64
Context :: struct {
data: [BLOCK_SIZE]byte,
datalen: u32,
bitlen: u64,
state: [5]u32,
k: [4]u32,
is_initialized: bool,
}
@(private)
transform :: proc "contextless" (ctx: ^Context, data: []byte) {
a, b, c, d, e, i, t: u32
m: [80]u32
for i = 0; i < 16; i += 1 {
m[i] = endian.unchecked_get_u32be(data[i * 4:])
}
for i < 80 {
m[i] = (m[i - 3] ~ m[i - 8] ~ m[i - 14] ~ m[i - 16])
m[i] = (m[i] << 1) | (m[i] >> 31)
i += 1
}
a = ctx.state[0]
b = ctx.state[1]
c = ctx.state[2]
d = ctx.state[3]
e = ctx.state[4]
for i = 0; i < 20; i += 1 {
t = bits.rotate_left32(a, 5) + ((b & c) ~ (~b & d)) + e + ctx.k[0] + m[i]
e = d
d = c
c = bits.rotate_left32(b, 30)
b = a
a = t
}
for i < 40 {
t = bits.rotate_left32(a, 5) + (b ~ c ~ d) + e + ctx.k[1] + m[i]
e = d
d = c
c = bits.rotate_left32(b, 30)
b = a
a = t
i += 1
}
for i < 60 {
t = bits.rotate_left32(a, 5) + ((b & c) ~ (b & d) ~ (c & d)) + e + ctx.k[2] + m[i]
e = d
d = c
c = bits.rotate_left32(b, 30)
b = a
a = t
i += 1
}
for i < 80 {
t = bits.rotate_left32(a, 5) + (b ~ c ~ d) + e + ctx.k[3] + m[i]
e = d
d = c
c = bits.rotate_left32(b, 30)
b = a
a = t
i += 1
}
ctx.state[0] += a
ctx.state[1] += b
ctx.state[2] += c
ctx.state[3] += d
ctx.state[4] += e
}

View File

@@ -1,182 +0,0 @@
package md2
/*
Copyright 2021 zhibog
Made available under the BSD-3 license.
List of contributors:
zhibog, dotbmp: Initial implementation.
Implementation of the MD2 hashing algorithm, as defined in RFC 1319 <https://datatracker.ietf.org/doc/html/rfc1319>
*/
import "core:os"
import "core:io"
/*
High level API
*/
DIGEST_SIZE :: 16
// hash_string will hash the given input and return the
// computed hash
hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
hash: [DIGEST_SIZE]byte
ctx: Md2_Context
// init(&ctx) No-op
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
ctx: Md2_Context
// init(&ctx) No-op
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream will read the stream in chunks and compute a
// hash from its contents
hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
hash: [DIGEST_SIZE]byte
ctx: Md2_Context
// init(&ctx) No-op
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file will read the file provided by the given handle
// and compute a hash
hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes(buf[:]), ok
}
}
return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
hash_stream,
hash_file,
hash_bytes,
hash_string,
hash_bytes_to_buffer,
hash_string_to_buffer,
}
/*
Low level API
*/
@(warning="Init is a no-op for MD2")
init :: proc(ctx: ^Md2_Context) {
// No action needed here
}
update :: proc(ctx: ^Md2_Context, data: []byte) {
for i := 0; i < len(data); i += 1 {
ctx.data[ctx.datalen] = data[i]
ctx.datalen += 1
if (ctx.datalen == DIGEST_SIZE) {
transform(ctx, ctx.data[:])
ctx.datalen = 0
}
}
}
final :: proc(ctx: ^Md2_Context, hash: []byte) {
to_pad := byte(DIGEST_SIZE - ctx.datalen)
for ctx.datalen < DIGEST_SIZE {
ctx.data[ctx.datalen] = to_pad
ctx.datalen += 1
}
transform(ctx, ctx.data[:])
transform(ctx, ctx.checksum[:])
for i := 0; i < DIGEST_SIZE; i += 1 {
hash[i] = ctx.state[i]
}
}
/*
MD2 implementation
*/
Md2_Context :: struct {
data: [DIGEST_SIZE]byte,
state: [DIGEST_SIZE * 3]byte,
checksum: [DIGEST_SIZE]byte,
datalen: int,
}
PI_TABLE := [?]byte {
41, 46, 67, 201, 162, 216, 124, 1, 61, 54, 84, 161, 236, 240, 6,
19, 98, 167, 5, 243, 192, 199, 115, 140, 152, 147, 43, 217, 188, 76,
130, 202, 30, 155, 87, 60, 253, 212, 224, 22, 103, 66, 111, 24, 138,
23, 229, 18, 190, 78, 196, 214, 218, 158, 222, 73, 160, 251, 245, 142,
187, 47, 238, 122, 169, 104, 121, 145, 21, 178, 7, 63, 148, 194, 16,
137, 11, 34, 95, 33, 128, 127, 93, 154, 90, 144, 50, 39, 53, 62,
204, 231, 191, 247, 151, 3, 255, 25, 48, 179, 72, 165, 181, 209, 215,
94, 146, 42, 172, 86, 170, 198, 79, 184, 56, 210, 150, 164, 125, 182,
118, 252, 107, 226, 156, 116, 4, 241, 69, 157, 112, 89, 100, 113, 135,
32, 134, 91, 207, 101, 230, 45, 168, 2, 27, 96, 37, 173, 174, 176,
185, 246, 28, 70, 97, 105, 52, 64, 126, 15, 85, 71, 163, 35, 221,
81, 175, 58, 195, 92, 249, 206, 186, 197, 234, 38, 44, 83, 13, 110,
133, 40, 132, 9, 211, 223, 205, 244, 65, 129, 77, 82, 106, 220, 55,
200, 108, 193, 171, 250, 36, 225, 123, 8, 12, 189, 177, 74, 120, 136,
149, 139, 227, 99, 232, 109, 233, 203, 213, 254, 59, 0, 29, 57, 242,
239, 183, 14, 102, 88, 208, 228, 166, 119, 114, 248, 235, 117, 75, 10,
49, 68, 80, 180, 143, 237, 31, 26, 219, 153, 141, 51, 159, 17, 131,
20,
}
transform :: proc(ctx: ^Md2_Context, data: []byte) {
j,k,t: byte
for j = 0; j < DIGEST_SIZE; j += 1 {
ctx.state[j + DIGEST_SIZE] = data[j]
ctx.state[j + DIGEST_SIZE * 2] = (ctx.state[j + DIGEST_SIZE] ~ ctx.state[j])
}
t = 0
for j = 0; j < DIGEST_SIZE + 2; j += 1 {
for k = 0; k < DIGEST_SIZE * 3; k += 1 {
ctx.state[k] ~= PI_TABLE[t]
t = ctx.state[k]
}
t = (t + j) & 0xff
}
t = ctx.checksum[DIGEST_SIZE - 1]
for j = 0; j < DIGEST_SIZE; j += 1 {
ctx.checksum[j] ~= PI_TABLE[data[j] ~ t]
t = ctx.checksum[j]
}
}

View File

@@ -1,263 +0,0 @@
package md4
/*
Copyright 2021 zhibog
Made available under the BSD-3 license.
List of contributors:
zhibog, dotbmp: Initial implementation.
Jeroen van Rijn: Context design to be able to change from Odin implementation to bindings.
Implementation of the MD4 hashing algorithm, as defined in RFC 1320 <https://datatracker.ietf.org/doc/html/rfc1320>
*/
import "core:mem"
import "core:os"
import "core:io"
import "../util"
/*
High level API
*/
DIGEST_SIZE :: 16
// hash_string will hash the given input and return the
// computed hash
hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
hash: [DIGEST_SIZE]byte
ctx: Md4_Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
ctx: Md4_Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream will read the stream in chunks and compute a
// hash from its contents
hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
hash: [DIGEST_SIZE]byte
ctx: Md4_Context
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file will read the file provided by the given handle
// and compute a hash
hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes(buf[:]), ok
}
}
return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
hash_stream,
hash_file,
hash_bytes,
hash_string,
hash_bytes_to_buffer,
hash_string_to_buffer,
}
/*
Low level API
*/
init :: proc(ctx: ^Md4_Context) {
ctx.state[0] = 0x67452301
ctx.state[1] = 0xefcdab89
ctx.state[2] = 0x98badcfe
ctx.state[3] = 0x10325476
}
update :: proc(ctx: ^Md4_Context, data: []byte) {
for i := 0; i < len(data); i += 1 {
ctx.data[ctx.datalen] = data[i]
ctx.datalen += 1
if(ctx.datalen == BLOCK_SIZE) {
transform(ctx, ctx.data[:])
ctx.bitlen += 512
ctx.datalen = 0
}
}
}
final :: proc(ctx: ^Md4_Context, hash: []byte) {
i := ctx.datalen
if ctx.datalen < 56 {
ctx.data[i] = 0x80
i += 1
for i < 56 {
ctx.data[i] = 0x00
i += 1
}
} else if ctx.datalen >= 56 {
ctx.data[i] = 0x80
i += 1
for i < BLOCK_SIZE {
ctx.data[i] = 0x00
i += 1
}
transform(ctx, ctx.data[:])
mem.set(&ctx.data, 0, 56)
}
ctx.bitlen += u64(ctx.datalen * 8)
ctx.data[56] = byte(ctx.bitlen)
ctx.data[57] = byte(ctx.bitlen >> 8)
ctx.data[58] = byte(ctx.bitlen >> 16)
ctx.data[59] = byte(ctx.bitlen >> 24)
ctx.data[60] = byte(ctx.bitlen >> 32)
ctx.data[61] = byte(ctx.bitlen >> 40)
ctx.data[62] = byte(ctx.bitlen >> 48)
ctx.data[63] = byte(ctx.bitlen >> 56)
transform(ctx, ctx.data[:])
for i = 0; i < 4; i += 1 {
hash[i] = byte(ctx.state[0] >> (i * 8)) & 0x000000ff
hash[i + 4] = byte(ctx.state[1] >> (i * 8)) & 0x000000ff
hash[i + 8] = byte(ctx.state[2] >> (i * 8)) & 0x000000ff
hash[i + 12] = byte(ctx.state[3] >> (i * 8)) & 0x000000ff
}
}
/*
MD4 implementation
*/
BLOCK_SIZE :: 64
Md4_Context :: struct {
data: [64]byte,
state: [4]u32,
bitlen: u64,
datalen: u32,
}
/*
@note(zh): F, G and H, as mentioned in the RFC, have been inlined into FF, GG
and HH respectively, instead of declaring them separately.
*/
FF :: #force_inline proc "contextless"(a, b, c, d, x: u32, s : int) -> u32 {
return util.ROTL32(a + ((b & c) | (~b & d)) + x, s)
}
GG :: #force_inline proc "contextless"(a, b, c, d, x: u32, s : int) -> u32 {
return util.ROTL32(a + ((b & c) | (b & d) | (c & d)) + x + 0x5a827999, s)
}
HH :: #force_inline proc "contextless"(a, b, c, d, x: u32, s : int) -> u32 {
return util.ROTL32(a + (b ~ c ~ d) + x + 0x6ed9eba1, s)
}
transform :: proc(ctx: ^Md4_Context, data: []byte) {
a, b, c, d, i, j: u32
m: [DIGEST_SIZE]u32
for i, j = 0, 0; i < DIGEST_SIZE; i += 1 {
m[i] = u32(data[j]) | (u32(data[j + 1]) << 8) | (u32(data[j + 2]) << 16) | (u32(data[j + 3]) << 24)
j += 4
}
a = ctx.state[0]
b = ctx.state[1]
c = ctx.state[2]
d = ctx.state[3]
a = FF(a, b, c, d, m[0], 3)
d = FF(d, a, b, c, m[1], 7)
c = FF(c, d, a, b, m[2], 11)
b = FF(b, c, d, a, m[3], 19)
a = FF(a, b, c, d, m[4], 3)
d = FF(d, a, b, c, m[5], 7)
c = FF(c, d, a, b, m[6], 11)
b = FF(b, c, d, a, m[7], 19)
a = FF(a, b, c, d, m[8], 3)
d = FF(d, a, b, c, m[9], 7)
c = FF(c, d, a, b, m[10], 11)
b = FF(b, c, d, a, m[11], 19)
a = FF(a, b, c, d, m[12], 3)
d = FF(d, a, b, c, m[13], 7)
c = FF(c, d, a, b, m[14], 11)
b = FF(b, c, d, a, m[15], 19)
a = GG(a, b, c, d, m[0], 3)
d = GG(d, a, b, c, m[4], 5)
c = GG(c, d, a, b, m[8], 9)
b = GG(b, c, d, a, m[12], 13)
a = GG(a, b, c, d, m[1], 3)
d = GG(d, a, b, c, m[5], 5)
c = GG(c, d, a, b, m[9], 9)
b = GG(b, c, d, a, m[13], 13)
a = GG(a, b, c, d, m[2], 3)
d = GG(d, a, b, c, m[6], 5)
c = GG(c, d, a, b, m[10], 9)
b = GG(b, c, d, a, m[14], 13)
a = GG(a, b, c, d, m[3], 3)
d = GG(d, a, b, c, m[7], 5)
c = GG(c, d, a, b, m[11], 9)
b = GG(b, c, d, a, m[15], 13)
a = HH(a, b, c, d, m[0], 3)
d = HH(d, a, b, c, m[8], 9)
c = HH(c, d, a, b, m[4], 11)
b = HH(b, c, d, a, m[12], 15)
a = HH(a, b, c, d, m[2], 3)
d = HH(d, a, b, c, m[10], 9)
c = HH(c, d, a, b, m[6], 11)
b = HH(b, c, d, a, m[14], 15)
a = HH(a, b, c, d, m[1], 3)
d = HH(d, a, b, c, m[9], 9)
c = HH(c, d, a, b, m[5], 11)
b = HH(b, c, d, a, m[13], 15)
a = HH(a, b, c, d, m[3], 3)
d = HH(d, a, b, c, m[11], 9)
c = HH(c, d, a, b, m[7], 11)
b = HH(b, c, d, a, m[15], 15)
ctx.state[0] += a
ctx.state[1] += b
ctx.state[2] += c
ctx.state[3] += d
}

View File

@@ -1,285 +0,0 @@
package md5
/*
Copyright 2021 zhibog
Made available under the BSD-3 license.
List of contributors:
zhibog, dotbmp: Initial implementation.
Implementation of the MD5 hashing algorithm, as defined in RFC 1321 <https://datatracker.ietf.org/doc/html/rfc1321>
*/
import "core:mem"
import "core:os"
import "core:io"
import "../util"
/*
High level API
*/
DIGEST_SIZE :: 16
// hash_string will hash the given input and return the
// computed hash
hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
hash: [DIGEST_SIZE]byte
ctx: Md5_Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
ctx: Md5_Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream will read the stream in chunks and compute a
// hash from its contents
hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
hash: [DIGEST_SIZE]byte
ctx: Md5_Context
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file will read the file provided by the given handle
// and compute a hash
hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes(buf[:]), ok
}
}
return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
hash_stream,
hash_file,
hash_bytes,
hash_string,
hash_bytes_to_buffer,
hash_string_to_buffer,
}
/*
Low level API
*/
init :: proc(ctx: ^Md5_Context) {
ctx.state[0] = 0x67452301
ctx.state[1] = 0xefcdab89
ctx.state[2] = 0x98badcfe
ctx.state[3] = 0x10325476
}
update :: proc(ctx: ^Md5_Context, data: []byte) {
for i := 0; i < len(data); i += 1 {
ctx.data[ctx.datalen] = data[i]
ctx.datalen += 1
if(ctx.datalen == BLOCK_SIZE) {
transform(ctx, ctx.data[:])
ctx.bitlen += 512
ctx.datalen = 0
}
}
}
final :: proc(ctx: ^Md5_Context, hash: []byte){
i : u32
i = ctx.datalen
if ctx.datalen < 56 {
ctx.data[i] = 0x80
i += 1
for i < 56 {
ctx.data[i] = 0x00
i += 1
}
} else if ctx.datalen >= 56 {
ctx.data[i] = 0x80
i += 1
for i < BLOCK_SIZE {
ctx.data[i] = 0x00
i += 1
}
transform(ctx, ctx.data[:])
mem.set(&ctx.data, 0, 56)
}
ctx.bitlen += u64(ctx.datalen * 8)
ctx.data[56] = byte(ctx.bitlen)
ctx.data[57] = byte(ctx.bitlen >> 8)
ctx.data[58] = byte(ctx.bitlen >> 16)
ctx.data[59] = byte(ctx.bitlen >> 24)
ctx.data[60] = byte(ctx.bitlen >> 32)
ctx.data[61] = byte(ctx.bitlen >> 40)
ctx.data[62] = byte(ctx.bitlen >> 48)
ctx.data[63] = byte(ctx.bitlen >> 56)
transform(ctx, ctx.data[:])
for i = 0; i < 4; i += 1 {
hash[i] = byte(ctx.state[0] >> (i * 8)) & 0x000000ff
hash[i + 4] = byte(ctx.state[1] >> (i * 8)) & 0x000000ff
hash[i + 8] = byte(ctx.state[2] >> (i * 8)) & 0x000000ff
hash[i + 12] = byte(ctx.state[3] >> (i * 8)) & 0x000000ff
}
}
/*
MD4 implementation
*/
BLOCK_SIZE :: 64
Md5_Context :: struct {
data: [BLOCK_SIZE]byte,
state: [4]u32,
bitlen: u64,
datalen: u32,
}
/*
@note(zh): F, G, H and I, as mentioned in the RFC, have been inlined into FF, GG, HH
and II respectively, instead of declaring them separately.
*/
FF :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u32 {
return b + util.ROTL32(a + ((b & c) | (~b & d)) + m + t, s)
}
GG :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u32 {
return b + util.ROTL32(a + ((b & d) | (c & ~d)) + m + t, s)
}
HH :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u32 {
return b + util.ROTL32(a + (b ~ c ~ d) + m + t, s)
}
II :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u32 {
return b + util.ROTL32(a + (c ~ (b | ~d)) + m + t, s)
}
transform :: proc(ctx: ^Md5_Context, data: []byte) {
i, j: u32
m: [DIGEST_SIZE]u32
for i, j = 0, 0; i < DIGEST_SIZE; i+=1 {
m[i] = u32(data[j]) + u32(data[j + 1]) << 8 + u32(data[j + 2]) << 16 + u32(data[j + 3]) << 24
j += 4
}
a := ctx.state[0]
b := ctx.state[1]
c := ctx.state[2]
d := ctx.state[3]
a = FF(a, b, c, d, m[0], 7, 0xd76aa478)
d = FF(d, a, b, c, m[1], 12, 0xe8c7b756)
c = FF(c, d, a, b, m[2], 17, 0x242070db)
b = FF(b, c, d, a, m[3], 22, 0xc1bdceee)
a = FF(a, b, c, d, m[4], 7, 0xf57c0faf)
d = FF(d, a, b, c, m[5], 12, 0x4787c62a)
c = FF(c, d, a, b, m[6], 17, 0xa8304613)
b = FF(b, c, d, a, m[7], 22, 0xfd469501)
a = FF(a, b, c, d, m[8], 7, 0x698098d8)
d = FF(d, a, b, c, m[9], 12, 0x8b44f7af)
c = FF(c, d, a, b, m[10], 17, 0xffff5bb1)
b = FF(b, c, d, a, m[11], 22, 0x895cd7be)
a = FF(a, b, c, d, m[12], 7, 0x6b901122)
d = FF(d, a, b, c, m[13], 12, 0xfd987193)
c = FF(c, d, a, b, m[14], 17, 0xa679438e)
b = FF(b, c, d, a, m[15], 22, 0x49b40821)
a = GG(a, b, c, d, m[1], 5, 0xf61e2562)
d = GG(d, a, b, c, m[6], 9, 0xc040b340)
c = GG(c, d, a, b, m[11], 14, 0x265e5a51)
b = GG(b, c, d, a, m[0], 20, 0xe9b6c7aa)
a = GG(a, b, c, d, m[5], 5, 0xd62f105d)
d = GG(d, a, b, c, m[10], 9, 0x02441453)
c = GG(c, d, a, b, m[15], 14, 0xd8a1e681)
b = GG(b, c, d, a, m[4], 20, 0xe7d3fbc8)
a = GG(a, b, c, d, m[9], 5, 0x21e1cde6)
d = GG(d, a, b, c, m[14], 9, 0xc33707d6)
c = GG(c, d, a, b, m[3], 14, 0xf4d50d87)
b = GG(b, c, d, a, m[8], 20, 0x455a14ed)
a = GG(a, b, c, d, m[13], 5, 0xa9e3e905)
d = GG(d, a, b, c, m[2], 9, 0xfcefa3f8)
c = GG(c, d, a, b, m[7], 14, 0x676f02d9)
b = GG(b, c, d, a, m[12], 20, 0x8d2a4c8a)
a = HH(a, b, c, d, m[5], 4, 0xfffa3942)
d = HH(d, a, b, c, m[8], 11, 0x8771f681)
c = HH(c, d, a, b, m[11], 16, 0x6d9d6122)
b = HH(b, c, d, a, m[14], 23, 0xfde5380c)
a = HH(a, b, c, d, m[1], 4, 0xa4beea44)
d = HH(d, a, b, c, m[4], 11, 0x4bdecfa9)
c = HH(c, d, a, b, m[7], 16, 0xf6bb4b60)
b = HH(b, c, d, a, m[10], 23, 0xbebfbc70)
a = HH(a, b, c, d, m[13], 4, 0x289b7ec6)
d = HH(d, a, b, c, m[0], 11, 0xeaa127fa)
c = HH(c, d, a, b, m[3], 16, 0xd4ef3085)
b = HH(b, c, d, a, m[6], 23, 0x04881d05)
a = HH(a, b, c, d, m[9], 4, 0xd9d4d039)
d = HH(d, a, b, c, m[12], 11, 0xe6db99e5)
c = HH(c, d, a, b, m[15], 16, 0x1fa27cf8)
b = HH(b, c, d, a, m[2], 23, 0xc4ac5665)
a = II(a, b, c, d, m[0], 6, 0xf4292244)
d = II(d, a, b, c, m[7], 10, 0x432aff97)
c = II(c, d, a, b, m[14], 15, 0xab9423a7)
b = II(b, c, d, a, m[5], 21, 0xfc93a039)
a = II(a, b, c, d, m[12], 6, 0x655b59c3)
d = II(d, a, b, c, m[3], 10, 0x8f0ccc92)
c = II(c, d, a, b, m[10], 15, 0xffeff47d)
b = II(b, c, d, a, m[1], 21, 0x85845dd1)
a = II(a, b, c, d, m[8], 6, 0x6fa87e4f)
d = II(d, a, b, c, m[15], 10, 0xfe2ce6e0)
c = II(c, d, a, b, m[6], 15, 0xa3014314)
b = II(b, c, d, a, m[13], 21, 0x4e0811a1)
a = II(a, b, c, d, m[4], 6, 0xf7537e82)
d = II(d, a, b, c, m[11], 10, 0xbd3af235)
c = II(c, d, a, b, m[2], 15, 0x2ad7d2bb)
b = II(b, c, d, a, m[9], 21, 0xeb86d391)
ctx.state[0] += a
ctx.state[1] += b
ctx.state[2] += c
ctx.state[3] += d
}

View File

@@ -1,8 +1,8 @@
package poly1305
import "core:crypto"
import "core:crypto/util"
import field "core:crypto/_fiat/field_poly1305"
import "core:encoding/endian"
import "core:mem"
KEY_SIZE :: 32
@@ -52,8 +52,8 @@ init :: proc (ctx: ^Context, key: []byte) {
// r = le_bytes_to_num(key[0..15])
// r = clamp(r) (r &= 0xffffffc0ffffffc0ffffffc0fffffff)
tmp_lo := util.U64_LE(key[0:8]) & 0x0ffffffc0fffffff
tmp_hi := util.U64_LE(key[8:16]) & 0xffffffc0ffffffc
tmp_lo := endian.unchecked_get_u64le(key[0:]) & 0x0ffffffc0fffffff
tmp_hi := endian.unchecked_get_u64le(key[8:]) & 0xffffffc0ffffffc
field.fe_from_u64s(&ctx._r, tmp_lo, tmp_hi)
// s = le_bytes_to_num(key[16..31])
@@ -151,7 +151,7 @@ _blocks :: proc (ctx: ^Context, msg: []byte, final := false) {
data_len := len(data)
for data_len >= _BLOCK_SIZE {
// n = le_bytes_to_num(msg[((i-1)*16)..*i*16] | [0x01])
field.fe_from_bytes(&n, data[:_BLOCK_SIZE], final_byte, false)
field.fe_from_bytes(&n, data[:_BLOCK_SIZE], final_byte)
// a += n
field.fe_add(field.fe_relax_cast(&ctx._a), &ctx._a, &n) // _a unreduced

View File

@@ -1,919 +0,0 @@
package ripemd
/*
Copyright 2021 zhibog
Made available under the BSD-3 license.
List of contributors:
zhibog, dotbmp: Initial implementation.
Implementation for the RIPEMD hashing algorithm as defined in <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
*/
import "core:os"
import "core:io"
import "../util"
/*
High level API
*/
DIGEST_SIZE_128 :: 16
DIGEST_SIZE_160 :: 20
DIGEST_SIZE_256 :: 32
DIGEST_SIZE_320 :: 40
// hash_string_128 will hash the given input and return the
// computed hash
hash_string_128 :: proc(data: string) -> [DIGEST_SIZE_128]byte {
return hash_bytes_128(transmute([]byte)(data))
}
// hash_bytes_128 will hash the given input and return the
// computed hash
hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
hash: [DIGEST_SIZE_128]byte
ctx: Ripemd128_Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_128 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_128(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_128 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_128 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_128, "Size of destination buffer is smaller than the digest size")
ctx: Ripemd128_Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_128 will read the stream in chunks and compute a
// hash from its contents
hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
hash: [DIGEST_SIZE_128]byte
ctx: Ripemd128_Context
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_128 will read the file provided by the given handle
// and compute a hash
hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_128]byte, bool) {
if !load_at_once {
return hash_stream_128(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_128(buf[:]), ok
}
}
return [DIGEST_SIZE_128]byte{}, false
}
hash_128 :: proc {
hash_stream_128,
hash_file_128,
hash_bytes_128,
hash_string_128,
hash_bytes_to_buffer_128,
hash_string_to_buffer_128,
}
// hash_string_160 will hash the given input and return the
// computed hash
hash_string_160 :: proc(data: string) -> [DIGEST_SIZE_160]byte {
return hash_bytes_160(transmute([]byte)(data))
}
// hash_bytes_160 will hash the given input and return the
// computed hash
hash_bytes_160 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
hash: [DIGEST_SIZE_160]byte
ctx: Ripemd160_Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_160 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_160 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_160(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_160 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_160 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_160, "Size of destination buffer is smaller than the digest size")
ctx: Ripemd160_Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_160 will read the stream in chunks and compute a
// hash from its contents
hash_stream_160 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
hash: [DIGEST_SIZE_160]byte
ctx: Ripemd160_Context
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_160 will read the file provided by the given handle
// and compute a hash
hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_160]byte, bool) {
if !load_at_once {
return hash_stream_160(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_160(buf[:]), ok
}
}
return [DIGEST_SIZE_160]byte{}, false
}
hash_160 :: proc {
hash_stream_160,
hash_file_160,
hash_bytes_160,
hash_string_160,
hash_bytes_to_buffer_160,
hash_string_to_buffer_160,
}
// hash_string_256 will hash the given input and return the
// computed hash
hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
hash: [DIGEST_SIZE_256]byte
ctx: Ripemd256_Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_256 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_256 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
ctx: Ripemd256_Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
hash: [DIGEST_SIZE_256]byte
ctx: Ripemd256_Context
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_256 will read the file provided by the given handle
// and compute a hash
hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_256(buf[:]), ok
}
}
return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
hash_stream_256,
hash_file_256,
hash_bytes_256,
hash_string_256,
hash_bytes_to_buffer_256,
hash_string_to_buffer_256,
}
// hash_string_320 will hash the given input and return the
// computed hash
hash_string_320 :: proc(data: string) -> [DIGEST_SIZE_320]byte {
return hash_bytes_320(transmute([]byte)(data))
}
// hash_bytes_320 will hash the given input and return the
// computed hash
hash_bytes_320 :: proc(data: []byte) -> [DIGEST_SIZE_320]byte {
hash: [DIGEST_SIZE_320]byte
ctx: Ripemd320_Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_320 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_320 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_320(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_320 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_320 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_320, "Size of destination buffer is smaller than the digest size")
ctx: Ripemd320_Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_320 will read the stream in chunks and compute a
// hash from its contents
hash_stream_320 :: proc(s: io.Stream) -> ([DIGEST_SIZE_320]byte, bool) {
hash: [DIGEST_SIZE_320]byte
ctx: Ripemd320_Context
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_320 will read the file provided by the given handle
// and compute a hash
hash_file_320 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_320]byte, bool) {
if !load_at_once {
return hash_stream_320(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_320(buf[:]), ok
}
}
return [DIGEST_SIZE_320]byte{}, false
}
hash_320 :: proc {
hash_stream_320,
hash_file_320,
hash_bytes_320,
hash_string_320,
hash_bytes_to_buffer_320,
hash_string_to_buffer_320,
}
/*
Low level API
*/
init :: proc(ctx: ^$T) {
when T == Ripemd128_Context {
ctx.s[0], ctx.s[1], ctx.s[2], ctx.s[3] = S0, S1, S2, S3
} else when T == Ripemd160_Context {
ctx.s[0], ctx.s[1], ctx.s[2], ctx.s[3], ctx.s[4] = S0, S1, S2, S3, S4
} else when T == Ripemd256_Context {
ctx.s[0], ctx.s[1], ctx.s[2], ctx.s[3] = S0, S1, S2, S3
ctx.s[4], ctx.s[5], ctx.s[6], ctx.s[7] = S5, S6, S7, S8
} else when T == Ripemd320_Context {
ctx.s[0], ctx.s[1], ctx.s[2], ctx.s[3], ctx.s[4] = S0, S1, S2, S3, S4
ctx.s[5], ctx.s[6], ctx.s[7], ctx.s[8], ctx.s[9] = S5, S6, S7, S8, S9
}
}
update :: proc(ctx: ^$T, data: []byte) {
ctx.tc += u64(len(data))
data := data
if ctx.nx > 0 {
n := len(data)
when T == Ripemd128_Context {
if n > RIPEMD_128_BLOCK_SIZE - ctx.nx {
n = RIPEMD_128_BLOCK_SIZE - ctx.nx
}
} else when T == Ripemd160_Context {
if n > RIPEMD_160_BLOCK_SIZE - ctx.nx {
n = RIPEMD_160_BLOCK_SIZE - ctx.nx
}
} else when T == Ripemd256_Context{
if n > RIPEMD_256_BLOCK_SIZE - ctx.nx {
n = RIPEMD_256_BLOCK_SIZE - ctx.nx
}
} else when T == Ripemd320_Context{
if n > RIPEMD_320_BLOCK_SIZE - ctx.nx {
n = RIPEMD_320_BLOCK_SIZE - ctx.nx
}
}
for i := 0; i < n; i += 1 {
ctx.x[ctx.nx + i] = data[i]
}
ctx.nx += n
when T == Ripemd128_Context {
if ctx.nx == RIPEMD_128_BLOCK_SIZE {
block(ctx, ctx.x[0:])
ctx.nx = 0
}
} else when T == Ripemd160_Context {
if ctx.nx == RIPEMD_160_BLOCK_SIZE {
block(ctx, ctx.x[0:])
ctx.nx = 0
}
} else when T == Ripemd256_Context{
if ctx.nx == RIPEMD_256_BLOCK_SIZE {
block(ctx, ctx.x[0:])
ctx.nx = 0
}
} else when T == Ripemd320_Context{
if ctx.nx == RIPEMD_320_BLOCK_SIZE {
block(ctx, ctx.x[0:])
ctx.nx = 0
}
}
data = data[n:]
}
n := block(ctx, data)
data = data[n:]
if len(data) > 0 {
ctx.nx = copy(ctx.x[:], data)
}
}
final :: proc(ctx: ^$T, hash: []byte) {
d := ctx
tc := d.tc
tmp: [64]byte
tmp[0] = 0x80
if tc % 64 < 56 {
update(d, tmp[0:56 - tc % 64])
} else {
update(d, tmp[0:64 + 56 - tc % 64])
}
tc <<= 3
for i : u32 = 0; i < 8; i += 1 {
tmp[i] = byte(tc >> (8 * i))
}
update(d, tmp[0:8])
when T == Ripemd128_Context {
size :: RIPEMD_128_SIZE
} else when T == Ripemd160_Context {
size :: RIPEMD_160_SIZE
} else when T == Ripemd256_Context{
size :: RIPEMD_256_SIZE
} else when T == Ripemd320_Context{
size :: RIPEMD_320_SIZE
}
digest: [size]byte
for s, i in d.s {
digest[i * 4] = byte(s)
digest[i * 4 + 1] = byte(s >> 8)
digest[i * 4 + 2] = byte(s >> 16)
digest[i * 4 + 3] = byte(s >> 24)
}
copy(hash[:], digest[:])
}
/*
RIPEMD implementation
*/
Ripemd128_Context :: struct {
s: [4]u32,
x: [RIPEMD_128_BLOCK_SIZE]byte,
nx: int,
tc: u64,
}
Ripemd160_Context :: struct {
s: [5]u32,
x: [RIPEMD_160_BLOCK_SIZE]byte,
nx: int,
tc: u64,
}
Ripemd256_Context :: struct {
s: [8]u32,
x: [RIPEMD_256_BLOCK_SIZE]byte,
nx: int,
tc: u64,
}
Ripemd320_Context :: struct {
s: [10]u32,
x: [RIPEMD_320_BLOCK_SIZE]byte,
nx: int,
tc: u64,
}
RIPEMD_128_SIZE :: 16
RIPEMD_128_BLOCK_SIZE :: 64
RIPEMD_160_SIZE :: 20
RIPEMD_160_BLOCK_SIZE :: 64
RIPEMD_256_SIZE :: 32
RIPEMD_256_BLOCK_SIZE :: 64
RIPEMD_320_SIZE :: 40
RIPEMD_320_BLOCK_SIZE :: 64
S0 :: 0x67452301
S1 :: 0xefcdab89
S2 :: 0x98badcfe
S3 :: 0x10325476
S4 :: 0xc3d2e1f0
S5 :: 0x76543210
S6 :: 0xfedcba98
S7 :: 0x89abcdef
S8 :: 0x01234567
S9 :: 0x3c2d1e0f
RIPEMD_128_N0 := [64]uint {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8,
3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12,
1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2,
}
RIPEMD_128_R0 := [64]uint {
11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8,
7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12,
11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5,
11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12,
}
RIPEMD_128_N1 := [64]uint {
5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12,
6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2,
15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13,
8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14,
}
RIPEMD_128_R1 := [64]uint {
8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6,
9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11,
9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5,
15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8,
}
RIPEMD_160_N0 := [80]uint {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8,
3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12,
1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2,
4, 0, 5, 9, 7, 12, 2, 10, 14, 1, 3, 8, 11, 6, 15, 13,
}
RIPEMD_160_R0 := [80]uint {
11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8,
7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12,
11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5,
11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12,
9, 15, 5, 11, 6, 8, 13, 12, 5, 12, 13, 14, 11, 8, 5, 6,
}
RIPEMD_160_N1 := [80]uint {
5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12,
6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2,
15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13,
8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14,
12, 15, 10, 4, 1, 5, 8, 7, 6, 2, 13, 14, 0, 3, 9, 11,
}
RIPEMD_160_R1 := [80]uint {
8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6,
9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11,
9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5,
15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8,
8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11,
}
block :: #force_inline proc (ctx: ^$T, p: []byte) -> int {
when T == Ripemd128_Context {
return ripemd_128_block(ctx, p)
}
else when T == Ripemd160_Context {
return ripemd_160_block(ctx, p)
}
else when T == Ripemd256_Context {
return ripemd_256_block(ctx, p)
}
else when T == Ripemd320_Context {
return ripemd_320_block(ctx, p)
}
}
ripemd_128_block :: proc(ctx: ^$T, p: []byte) -> int {
n := 0
x: [16]u32 = ---
alpha: u32 = ---
p := p
for len(p) >= RIPEMD_128_BLOCK_SIZE {
a, b, c, d := ctx.s[0], ctx.s[1], ctx.s[2], ctx.s[3]
aa, bb, cc, dd := a, b, c, d
for i,j := 0, 0; i < 16; i, j = i+1, j+4 {
x[i] = u32(p[j]) | u32(p[j+1])<<8 | u32(p[j+2])<<16 | u32(p[j+3])<<24
}
i := 0
for i < 16 {
alpha = a + (b ~ c ~ d) + x[RIPEMD_128_N0[i]]
s := int(RIPEMD_128_R0[i])
alpha = util.ROTL32(alpha, s)
a, b, c, d = d, alpha, b, c
alpha = aa + (bb & dd | cc &~ dd) + x[RIPEMD_128_N1[i]] + 0x50a28be6
s = int(RIPEMD_128_R1[i])
alpha = util.ROTL32(alpha, s)
aa, bb, cc, dd= dd, alpha, bb, cc
i += 1
}
for i < 32 {
alpha = a + (d ~ (b & (c~d))) + x[RIPEMD_128_N0[i]] + 0x5a827999
s := int(RIPEMD_128_R0[i])
alpha = util.ROTL32(alpha, s)
a, b, c, d = d, alpha, b, c
alpha = aa + (dd ~ (bb | ~cc)) + x[RIPEMD_128_N1[i]] + 0x5c4dd124
s = int(RIPEMD_128_R1[i])
alpha = util.ROTL32(alpha, s)
aa, bb, cc, dd = dd, alpha, bb, cc
i += 1
}
for i < 48 {
alpha = a + (d ~ (b | ~c)) + x[RIPEMD_128_N0[i]] + 0x6ed9eba1
s := int(RIPEMD_128_R0[i])
alpha = util.ROTL32(alpha, s)
a, b, c, d = d, alpha, b, c
alpha = aa + (dd ~ (bb & (cc~dd))) + x[RIPEMD_128_N1[i]] + 0x6d703ef3
s = int(RIPEMD_128_R1[i])
alpha = util.ROTL32(alpha, s)
aa, bb, cc, dd = dd, alpha, bb, cc
i += 1
}
for i < 64 {
alpha = a + (c ~ (d & (b~c))) + x[RIPEMD_128_N0[i]] + 0x8f1bbcdc
s := int(RIPEMD_128_R0[i])
alpha = util.ROTL32(alpha, s)
a, b, c, d = d, alpha, b, c
alpha = aa + (bb ~ cc ~ dd) + x[RIPEMD_128_N1[i]]
s = int(RIPEMD_128_R1[i])
alpha = util.ROTL32(alpha, s)
aa, bb, cc, dd = dd, alpha, bb, cc
i += 1
}
c = ctx.s[1] + c + dd
ctx.s[1] = ctx.s[2] + d + aa
ctx.s[2] = ctx.s[3] + a + bb
ctx.s[3] = ctx.s[0] + b + cc
ctx.s[0] = c
p = p[RIPEMD_128_BLOCK_SIZE:]
n += RIPEMD_128_BLOCK_SIZE
}
return n
}
ripemd_160_block :: proc(ctx: ^$T, p: []byte) -> int {
n := 0
x: [16]u32 = ---
alpha, beta: u32 = ---, ---
p := p
for len(p) >= RIPEMD_160_BLOCK_SIZE {
a, b, c, d, e := ctx.s[0], ctx.s[1], ctx.s[2], ctx.s[3], ctx.s[4]
aa, bb, cc, dd, ee := a, b, c, d, e
for i,j := 0, 0; i < 16; i, j = i+1, j+4 {
x[i] = u32(p[j]) | u32(p[j+1])<<8 | u32(p[j+2])<<16 | u32(p[j+3])<<24
}
i := 0
for i < 16 {
alpha = a + (b ~ c ~ d) + x[RIPEMD_160_N0[i]]
s := int(RIPEMD_160_R0[i])
alpha = util.ROTL32(alpha, s) + e
beta = util.ROTL32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
alpha = aa + (bb ~ (cc | ~dd)) + x[RIPEMD_160_N1[i]] + 0x50a28be6
s = int(RIPEMD_160_R1[i])
alpha = util.ROTL32(alpha, s) + ee
beta = util.ROTL32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i += 1
}
for i < 32 {
alpha = a + (b&c | ~b&d) + x[RIPEMD_160_N0[i]] + 0x5a827999
s := int(RIPEMD_160_R0[i])
alpha = util.ROTL32(alpha, s) + e
beta = util.ROTL32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
alpha = aa + (bb&dd | cc&~dd) + x[RIPEMD_160_N1[i]] + 0x5c4dd124
s = int(RIPEMD_160_R1[i])
alpha = util.ROTL32(alpha, s) + ee
beta = util.ROTL32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i += 1
}
for i < 48 {
alpha = a + (b | ~c ~ d) + x[RIPEMD_160_N0[i]] + 0x6ed9eba1
s := int(RIPEMD_160_R0[i])
alpha = util.ROTL32(alpha, s) + e
beta = util.ROTL32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
alpha = aa + (bb | ~cc ~ dd) + x[RIPEMD_160_N1[i]] + 0x6d703ef3
s = int(RIPEMD_160_R1[i])
alpha = util.ROTL32(alpha, s) + ee
beta = util.ROTL32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i += 1
}
for i < 64 {
alpha = a + (b&d | c&~d) + x[RIPEMD_160_N0[i]] + 0x8f1bbcdc
s := int(RIPEMD_160_R0[i])
alpha = util.ROTL32(alpha, s) + e
beta = util.ROTL32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
alpha = aa + (bb&cc | ~bb&dd) + x[RIPEMD_160_N1[i]] + 0x7a6d76e9
s = int(RIPEMD_160_R1[i])
alpha = util.ROTL32(alpha, s) + ee
beta = util.ROTL32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i += 1
}
for i < 80 {
alpha = a + (b ~ (c | ~d)) + x[RIPEMD_160_N0[i]] + 0xa953fd4e
s := int(RIPEMD_160_R0[i])
alpha = util.ROTL32(alpha, s) + e
beta = util.ROTL32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
alpha = aa + (bb ~ cc ~ dd) + x[RIPEMD_160_N1[i]]
s = int(RIPEMD_160_R1[i])
alpha = util.ROTL32(alpha, s) + ee
beta = util.ROTL32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i += 1
}
dd += c + ctx.s[1]
ctx.s[1] = ctx.s[2] + d + ee
ctx.s[2] = ctx.s[3] + e + aa
ctx.s[3] = ctx.s[4] + a + bb
ctx.s[4] = ctx.s[0] + b + cc
ctx.s[0] = dd
p = p[RIPEMD_160_BLOCK_SIZE:]
n += RIPEMD_160_BLOCK_SIZE
}
return n
}
ripemd_256_block :: proc(ctx: ^$T, p: []byte) -> int {
n := 0
x: [16]u32 = ---
alpha: u32 = ---
p := p
for len(p) >= RIPEMD_256_BLOCK_SIZE {
a, b, c, d := ctx.s[0], ctx.s[1], ctx.s[2], ctx.s[3]
aa, bb, cc, dd := ctx.s[4], ctx.s[5], ctx.s[6], ctx.s[7]
for i,j := 0, 0; i < 16; i, j = i+1, j+4 {
x[i] = u32(p[j]) | u32(p[j+1])<<8 | u32(p[j+2])<<16 | u32(p[j+3])<<24
}
i := 0
for i < 16 {
alpha = a + (b ~ c ~ d) + x[RIPEMD_128_N0[i]]
s := int(RIPEMD_128_R0[i])
alpha = util.ROTL32(alpha, s)
a, b, c, d = d, alpha, b, c
alpha = aa + (bb & dd | cc &~ dd) + x[RIPEMD_128_N1[i]] + 0x50a28be6
s = int(RIPEMD_128_R1[i])
alpha = util.ROTL32(alpha, s)
aa, bb, cc, dd= dd, alpha, bb, cc
i += 1
}
t := a
a = aa
aa = t
for i < 32 {
alpha = a + (d ~ (b & (c~d))) + x[RIPEMD_128_N0[i]] + 0x5a827999
s := int(RIPEMD_128_R0[i])
alpha = util.ROTL32(alpha, s)
a, b, c, d = d, alpha, b, c
alpha = aa + (dd ~ (bb | ~cc)) + x[RIPEMD_128_N1[i]] + 0x5c4dd124
s = int(RIPEMD_128_R1[i])
alpha = util.ROTL32(alpha, s)
aa, bb, cc, dd = dd, alpha, bb, cc
i += 1
}
t = b
b = bb
bb = t
for i < 48 {
alpha = a + (d ~ (b | ~c)) + x[RIPEMD_128_N0[i]] + 0x6ed9eba1
s := int(RIPEMD_128_R0[i])
alpha = util.ROTL32(alpha, s)
a, b, c, d = d, alpha, b, c
alpha = aa + (dd ~ (bb & (cc~dd))) + x[RIPEMD_128_N1[i]] + 0x6d703ef3
s = int(RIPEMD_128_R1[i])
alpha = util.ROTL32(alpha, s)
aa, bb, cc, dd = dd, alpha, bb, cc
i += 1
}
t = c
c = cc
cc = t
for i < 64 {
alpha = a + (c ~ (d & (b~c))) + x[RIPEMD_128_N0[i]] + 0x8f1bbcdc
s := int(RIPEMD_128_R0[i])
alpha = util.ROTL32(alpha, s)
a, b, c, d = d, alpha, b, c
alpha = aa + (bb ~ cc ~ dd) + x[RIPEMD_128_N1[i]]
s = int(RIPEMD_128_R1[i])
alpha = util.ROTL32(alpha, s)
aa, bb, cc, dd = dd, alpha, bb, cc
i += 1
}
t = d
d = dd
dd = t
ctx.s[0] += a
ctx.s[1] += b
ctx.s[2] += c
ctx.s[3] += d
ctx.s[4] += aa
ctx.s[5] += bb
ctx.s[6] += cc
ctx.s[7] += dd
p = p[RIPEMD_256_BLOCK_SIZE:]
n += RIPEMD_256_BLOCK_SIZE
}
return n
}
ripemd_320_block :: proc(ctx: ^$T, p: []byte) -> int {
n := 0
x: [16]u32 = ---
alpha, beta: u32 = ---, ---
p := p
for len(p) >= RIPEMD_320_BLOCK_SIZE {
a, b, c, d, e := ctx.s[0], ctx.s[1], ctx.s[2], ctx.s[3], ctx.s[4]
aa, bb, cc, dd, ee := ctx.s[5], ctx.s[6], ctx.s[7], ctx.s[8], ctx.s[9]
for i,j := 0, 0; i < 16; i, j = i+1, j+4 {
x[i] = u32(p[j]) | u32(p[j+1])<<8 | u32(p[j+2])<<16 | u32(p[j+3])<<24
}
i := 0
for i < 16 {
alpha = a + (b ~ c ~ d) + x[RIPEMD_160_N0[i]]
s := int(RIPEMD_160_R0[i])
alpha = util.ROTL32(alpha, s) + e
beta = util.ROTL32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
alpha = aa + (bb ~ (cc | ~dd)) + x[RIPEMD_160_N1[i]] + 0x50a28be6
s = int(RIPEMD_160_R1[i])
alpha = util.ROTL32(alpha, s) + ee
beta = util.ROTL32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i += 1
}
t := b
b = bb
bb = t
for i < 32 {
alpha = a + (b&c | ~b&d) + x[RIPEMD_160_N0[i]] + 0x5a827999
s := int(RIPEMD_160_R0[i])
alpha = util.ROTL32(alpha, s) + e
beta = util.ROTL32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
alpha = aa + (bb&dd | cc&~dd) + x[RIPEMD_160_N1[i]] + 0x5c4dd124
s = int(RIPEMD_160_R1[i])
alpha = util.ROTL32(alpha, s) + ee
beta = util.ROTL32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i += 1
}
t = d
d = dd
dd = t
for i < 48 {
alpha = a + (b | ~c ~ d) + x[RIPEMD_160_N0[i]] + 0x6ed9eba1
s := int(RIPEMD_160_R0[i])
alpha = util.ROTL32(alpha, s) + e
beta = util.ROTL32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
alpha = aa + (bb | ~cc ~ dd) + x[RIPEMD_160_N1[i]] + 0x6d703ef3
s = int(RIPEMD_160_R1[i])
alpha = util.ROTL32(alpha, s) + ee
beta = util.ROTL32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i += 1
}
t = a
a = aa
aa = t
for i < 64 {
alpha = a + (b&d | c&~d) + x[RIPEMD_160_N0[i]] + 0x8f1bbcdc
s := int(RIPEMD_160_R0[i])
alpha = util.ROTL32(alpha, s) + e
beta = util.ROTL32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
alpha = aa + (bb&cc | ~bb&dd) + x[RIPEMD_160_N1[i]] + 0x7a6d76e9
s = int(RIPEMD_160_R1[i])
alpha = util.ROTL32(alpha, s) + ee
beta = util.ROTL32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i += 1
}
t = c
c = cc
cc = t
for i < 80 {
alpha = a + (b ~ (c | ~d)) + x[RIPEMD_160_N0[i]] + 0xa953fd4e
s := int(RIPEMD_160_R0[i])
alpha = util.ROTL32(alpha, s) + e
beta = util.ROTL32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
alpha = aa + (bb ~ cc ~ dd) + x[RIPEMD_160_N1[i]]
s = int(RIPEMD_160_R1[i])
alpha = util.ROTL32(alpha, s) + ee
beta = util.ROTL32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i += 1
}
t = e
e = ee
ee = t
ctx.s[0] += a
ctx.s[1] += b
ctx.s[2] += c
ctx.s[3] += d
ctx.s[4] += e
ctx.s[5] += aa
ctx.s[6] += bb
ctx.s[7] += cc
ctx.s[8] += dd
ctx.s[9] += ee
p = p[RIPEMD_320_BLOCK_SIZE:]
n += RIPEMD_320_BLOCK_SIZE
}
return n
}

View File

@@ -1,246 +0,0 @@
package sha1
/*
Copyright 2021 zhibog
Made available under the BSD-3 license.
List of contributors:
zhibog, dotbmp: Initial implementation.
Implementation of the SHA1 hashing algorithm, as defined in RFC 3174 <https://datatracker.ietf.org/doc/html/rfc3174>
*/
import "core:mem"
import "core:os"
import "core:io"
import "../util"
/*
High level API
*/
DIGEST_SIZE :: 20
// hash_string will hash the given input and return the
// computed hash
hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
hash: [DIGEST_SIZE]byte
ctx: Sha1_Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
ctx: Sha1_Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream will read the stream in chunks and compute a
// hash from its contents
hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
hash: [DIGEST_SIZE]byte
ctx: Sha1_Context
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file will read the file provided by the given handle
// and compute a hash
hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes(buf[:]), ok
}
}
return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
hash_stream,
hash_file,
hash_bytes,
hash_string,
hash_bytes_to_buffer,
hash_string_to_buffer,
}
/*
Low level API
*/
init :: proc(ctx: ^Sha1_Context) {
ctx.state[0] = 0x67452301
ctx.state[1] = 0xefcdab89
ctx.state[2] = 0x98badcfe
ctx.state[3] = 0x10325476
ctx.state[4] = 0xc3d2e1f0
ctx.k[0] = 0x5a827999
ctx.k[1] = 0x6ed9eba1
ctx.k[2] = 0x8f1bbcdc
ctx.k[3] = 0xca62c1d6
}
update :: proc(ctx: ^Sha1_Context, data: []byte) {
for i := 0; i < len(data); i += 1 {
ctx.data[ctx.datalen] = data[i]
ctx.datalen += 1
if (ctx.datalen == BLOCK_SIZE) {
transform(ctx, ctx.data[:])
ctx.bitlen += 512
ctx.datalen = 0
}
}
}
final :: proc(ctx: ^Sha1_Context, hash: []byte) {
i := ctx.datalen
if ctx.datalen < 56 {
ctx.data[i] = 0x80
i += 1
for i < 56 {
ctx.data[i] = 0x00
i += 1
}
}
else {
ctx.data[i] = 0x80
i += 1
for i < BLOCK_SIZE {
ctx.data[i] = 0x00
i += 1
}
transform(ctx, ctx.data[:])
mem.set(&ctx.data, 0, 56)
}
ctx.bitlen += u64(ctx.datalen * 8)
ctx.data[63] = u8(ctx.bitlen)
ctx.data[62] = u8(ctx.bitlen >> 8)
ctx.data[61] = u8(ctx.bitlen >> 16)
ctx.data[60] = u8(ctx.bitlen >> 24)
ctx.data[59] = u8(ctx.bitlen >> 32)
ctx.data[58] = u8(ctx.bitlen >> 40)
ctx.data[57] = u8(ctx.bitlen >> 48)
ctx.data[56] = u8(ctx.bitlen >> 56)
transform(ctx, ctx.data[:])
for j: u32 = 0; j < 4; j += 1 {
hash[j] = u8(ctx.state[0] >> (24 - j * 8)) & 0x000000ff
hash[j + 4] = u8(ctx.state[1] >> (24 - j * 8)) & 0x000000ff
hash[j + 8] = u8(ctx.state[2] >> (24 - j * 8)) & 0x000000ff
hash[j + 12] = u8(ctx.state[3] >> (24 - j * 8)) & 0x000000ff
hash[j + 16] = u8(ctx.state[4] >> (24 - j * 8)) & 0x000000ff
}
}
/*
SHA1 implementation
*/
BLOCK_SIZE :: 64
Sha1_Context :: struct {
data: [BLOCK_SIZE]byte,
datalen: u32,
bitlen: u64,
state: [5]u32,
k: [4]u32,
}
transform :: proc(ctx: ^Sha1_Context, data: []byte) {
a, b, c, d, e, i, j, t: u32
m: [80]u32
for i, j = 0, 0; i < 16; i += 1 {
m[i] = u32(data[j]) << 24 + u32(data[j + 1]) << 16 + u32(data[j + 2]) << 8 + u32(data[j + 3])
j += 4
}
for i < 80 {
m[i] = (m[i - 3] ~ m[i - 8] ~ m[i - 14] ~ m[i - 16])
m[i] = (m[i] << 1) | (m[i] >> 31)
i += 1
}
a = ctx.state[0]
b = ctx.state[1]
c = ctx.state[2]
d = ctx.state[3]
e = ctx.state[4]
for i = 0; i < 20; i += 1 {
t = util.ROTL32(a, 5) + ((b & c) ~ (~b & d)) + e + ctx.k[0] + m[i]
e = d
d = c
c = util.ROTL32(b, 30)
b = a
a = t
}
for i < 40 {
t = util.ROTL32(a, 5) + (b ~ c ~ d) + e + ctx.k[1] + m[i]
e = d
d = c
c = util.ROTL32(b, 30)
b = a
a = t
i += 1
}
for i < 60 {
t = util.ROTL32(a, 5) + ((b & c) ~ (b & d) ~ (c & d)) + e + ctx.k[2] + m[i]
e = d
d = c
c = util.ROTL32(b, 30)
b = a
a = t
i += 1
}
for i < 80 {
t = util.ROTL32(a, 5) + (b ~ c ~ d) + e + ctx.k[3] + m[i]
e = d
d = c
c = util.ROTL32(b, 30)
b = a
a = t
i += 1
}
ctx.state[0] += a
ctx.state[1] += b
ctx.state[2] += c
ctx.state[3] += d
ctx.state[4] += e
}

File diff suppressed because it is too large Load Diff

View File

@@ -11,8 +11,8 @@ package sha3
If you wish to compute a Keccak hash, you can use the keccak package, it will use the original padding.
*/
import "core:os"
import "core:io"
import "core:os"
import "../_sha3"
@@ -28,333 +28,337 @@ DIGEST_SIZE_512 :: 64
// hash_string_224 will hash the given input and return the
// computed hash
hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
return hash_bytes_224(transmute([]byte)(data))
return hash_bytes_224(transmute([]byte)(data))
}
// hash_bytes_224 will hash the given input and return the
// computed hash
hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
hash: [DIGEST_SIZE_224]byte
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_224
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.final(&ctx, hash[:])
return hash
hash: [DIGEST_SIZE_224]byte
ctx: Context
ctx.mdlen = DIGEST_SIZE_224
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_224 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_224 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_224
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.final(&ctx, hash)
ctx: Context
ctx.mdlen = DIGEST_SIZE_224
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_224 will read the stream in chunks and compute a
// hash from its contents
hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
hash: [DIGEST_SIZE_224]byte
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_224
_sha3.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
_sha3.update(&ctx, buf[:read])
}
}
_sha3.final(&ctx, hash[:])
return hash, true
hash: [DIGEST_SIZE_224]byte
ctx: Context
ctx.mdlen = DIGEST_SIZE_224
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_224 will read the file provided by the given handle
// and compute a hash
hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
if !load_at_once {
return hash_stream_224(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_224(buf[:]), ok
}
}
return [DIGEST_SIZE_224]byte{}, false
if !load_at_once {
return hash_stream_224(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_224(buf[:]), ok
}
}
return [DIGEST_SIZE_224]byte{}, false
}
hash_224 :: proc {
hash_stream_224,
hash_file_224,
hash_bytes_224,
hash_string_224,
hash_bytes_to_buffer_224,
hash_string_to_buffer_224,
hash_stream_224,
hash_file_224,
hash_bytes_224,
hash_string_224,
hash_bytes_to_buffer_224,
hash_string_to_buffer_224,
}
// hash_string_256 will hash the given input and return the
// computed hash
hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
hash: [DIGEST_SIZE_256]byte
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_256
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.final(&ctx, hash[:])
return hash
hash: [DIGEST_SIZE_256]byte
ctx: Context
ctx.mdlen = DIGEST_SIZE_256
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_256 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_256 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_256
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.final(&ctx, hash)
ctx: Context
ctx.mdlen = DIGEST_SIZE_256
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
hash: [DIGEST_SIZE_256]byte
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_256
_sha3.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
_sha3.update(&ctx, buf[:read])
}
}
_sha3.final(&ctx, hash[:])
return hash, true
hash: [DIGEST_SIZE_256]byte
ctx: Context
ctx.mdlen = DIGEST_SIZE_256
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_256 will read the file provided by the given handle
// and compute a hash
hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_256(buf[:]), ok
}
}
return [DIGEST_SIZE_256]byte{}, false
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_256(buf[:]), ok
}
}
return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
hash_stream_256,
hash_file_256,
hash_bytes_256,
hash_string_256,
hash_bytes_to_buffer_256,
hash_string_to_buffer_256,
hash_stream_256,
hash_file_256,
hash_bytes_256,
hash_string_256,
hash_bytes_to_buffer_256,
hash_string_to_buffer_256,
}
// hash_string_384 will hash the given input and return the
// computed hash
hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
return hash_bytes_384(transmute([]byte)(data))
return hash_bytes_384(transmute([]byte)(data))
}
// hash_bytes_384 will hash the given input and return the
// computed hash
hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
hash: [DIGEST_SIZE_384]byte
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_384
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.final(&ctx, hash[:])
return hash
hash: [DIGEST_SIZE_384]byte
ctx: Context
ctx.mdlen = DIGEST_SIZE_384
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_384 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_384 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_384
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.final(&ctx, hash)
ctx: Context
ctx.mdlen = DIGEST_SIZE_384
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_384 will read the stream in chunks and compute a
// hash from its contents
hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
hash: [DIGEST_SIZE_384]byte
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_384
_sha3.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
_sha3.update(&ctx, buf[:read])
}
}
_sha3.final(&ctx, hash[:])
return hash, true
hash: [DIGEST_SIZE_384]byte
ctx: Context
ctx.mdlen = DIGEST_SIZE_384
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_384 will read the file provided by the given handle
// and compute a hash
hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
if !load_at_once {
return hash_stream_384(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_384(buf[:]), ok
}
}
return [DIGEST_SIZE_384]byte{}, false
if !load_at_once {
return hash_stream_384(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_384(buf[:]), ok
}
}
return [DIGEST_SIZE_384]byte{}, false
}
hash_384 :: proc {
hash_stream_384,
hash_file_384,
hash_bytes_384,
hash_string_384,
hash_bytes_to_buffer_384,
hash_string_to_buffer_384,
hash_stream_384,
hash_file_384,
hash_bytes_384,
hash_string_384,
hash_bytes_to_buffer_384,
hash_string_to_buffer_384,
}
// hash_string_512 will hash the given input and return the
// computed hash
hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
return hash_bytes_512(transmute([]byte)(data))
return hash_bytes_512(transmute([]byte)(data))
}
// hash_bytes_512 will hash the given input and return the
// computed hash
hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
hash: [DIGEST_SIZE_512]byte
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_512
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.final(&ctx, hash[:])
return hash
hash: [DIGEST_SIZE_512]byte
ctx: Context
ctx.mdlen = DIGEST_SIZE_512
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_512 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_512 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_512
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.final(&ctx, hash)
ctx: Context
ctx.mdlen = DIGEST_SIZE_512
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_512 will read the stream in chunks and compute a
// hash from its contents
hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
hash: [DIGEST_SIZE_512]byte
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_512
_sha3.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
_sha3.update(&ctx, buf[:read])
}
}
_sha3.final(&ctx, hash[:])
return hash, true
hash: [DIGEST_SIZE_512]byte
ctx: Context
ctx.mdlen = DIGEST_SIZE_512
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_512 will read the file provided by the given handle
// and compute a hash
hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
if !load_at_once {
return hash_stream_512(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_512(buf[:]), ok
}
}
return [DIGEST_SIZE_512]byte{}, false
if !load_at_once {
return hash_stream_512(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_512(buf[:]), ok
}
}
return [DIGEST_SIZE_512]byte{}, false
}
hash_512 :: proc {
hash_stream_512,
hash_file_512,
hash_bytes_512,
hash_string_512,
hash_bytes_to_buffer_512,
hash_string_to_buffer_512,
hash_stream_512,
hash_file_512,
hash_bytes_512,
hash_string_512,
hash_bytes_to_buffer_512,
hash_string_to_buffer_512,
}
/*
Low level API
*/
Sha3_Context :: _sha3.Sha3_Context
Context :: _sha3.Sha3_Context
init :: proc(ctx: ^_sha3.Sha3_Context) {
_sha3.init(ctx)
init :: proc(ctx: ^Context) {
_sha3.init(ctx)
}
update :: proc "contextless" (ctx: ^_sha3.Sha3_Context, data: []byte) {
_sha3.update(ctx, data)
update :: proc(ctx: ^Context, data: []byte) {
_sha3.update(ctx, data)
}
final :: proc "contextless" (ctx: ^_sha3.Sha3_Context, hash: []byte) {
_sha3.final(ctx, hash)
final :: proc(ctx: ^Context, hash: []byte) {
_sha3.final(ctx, hash)
}

View File

@@ -9,10 +9,13 @@ package shake
Interface for the SHAKE hashing algorithm.
The SHA3 functionality can be found in package sha3.
TODO: This should provide an incremental squeeze interface, in addition
to the one-shot final call.
*/
import "core:os"
import "core:io"
import "core:os"
import "../_sha3"
@@ -26,182 +29,178 @@ DIGEST_SIZE_256 :: 32
// hash_string_128 will hash the given input and return the
// computed hash
hash_string_128 :: proc(data: string) -> [DIGEST_SIZE_128]byte {
return hash_bytes_128(transmute([]byte)(data))
return hash_bytes_128(transmute([]byte)(data))
}
// hash_bytes_128 will hash the given input and return the
// computed hash
hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
hash: [DIGEST_SIZE_128]byte
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_128
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.shake_xof(&ctx)
_sha3.shake_out(&ctx, hash[:])
return hash
hash: [DIGEST_SIZE_128]byte
ctx: Context
ctx.mdlen = DIGEST_SIZE_128
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_128 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_128(transmute([]byte)(data), hash)
hash_bytes_to_buffer_128(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_128 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_128 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_128, "Size of destination buffer is smaller than the digest size")
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_128
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.shake_xof(&ctx)
_sha3.shake_out(&ctx, hash)
ctx: Context
ctx.mdlen = DIGEST_SIZE_128
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream_128 will read the stream in chunks and compute a
// hash from its contents
hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
hash: [DIGEST_SIZE_128]byte
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_128
_sha3.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
_sha3.update(&ctx, buf[:read])
}
}
_sha3.shake_xof(&ctx)
_sha3.shake_out(&ctx, hash[:])
return hash, true
hash: [DIGEST_SIZE_128]byte
ctx: Context
ctx.mdlen = DIGEST_SIZE_128
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_128 will read the file provided by the given handle
// and compute a hash
hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_128]byte, bool) {
if !load_at_once {
return hash_stream_128(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_128(buf[:]), ok
}
}
return [DIGEST_SIZE_128]byte{}, false
if !load_at_once {
return hash_stream_128(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_128(buf[:]), ok
}
}
return [DIGEST_SIZE_128]byte{}, false
}
hash_128 :: proc {
hash_stream_128,
hash_file_128,
hash_bytes_128,
hash_string_128,
hash_bytes_to_buffer_128,
hash_string_to_buffer_128,
hash_stream_128,
hash_file_128,
hash_bytes_128,
hash_string_128,
hash_bytes_to_buffer_128,
hash_string_to_buffer_128,
}
// hash_string_256 will hash the given input and return the
// computed hash
hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
hash: [DIGEST_SIZE_256]byte
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_256
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.shake_xof(&ctx)
_sha3.shake_out(&ctx, hash[:])
return hash
hash: [DIGEST_SIZE_256]byte
ctx: Context
ctx.mdlen = DIGEST_SIZE_256
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_256 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_256 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_256
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.shake_xof(&ctx)
_sha3.shake_out(&ctx, hash)
ctx: Context
ctx.mdlen = DIGEST_SIZE_256
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
}
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
hash: [DIGEST_SIZE_256]byte
ctx: _sha3.Sha3_Context
ctx.mdlen = DIGEST_SIZE_256
_sha3.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
_sha3.update(&ctx, buf[:read])
}
}
_sha3.shake_xof(&ctx)
_sha3.shake_out(&ctx, hash[:])
return hash, true
hash: [DIGEST_SIZE_256]byte
ctx: Context
ctx.mdlen = DIGEST_SIZE_256
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_256 will read the file provided by the given handle
// and compute a hash
hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_256(buf[:]), ok
}
}
return [DIGEST_SIZE_256]byte{}, false
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_256(buf[:]), ok
}
}
return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
hash_stream_256,
hash_file_256,
hash_bytes_256,
hash_string_256,
hash_bytes_to_buffer_256,
hash_string_to_buffer_256,
hash_stream_256,
hash_file_256,
hash_bytes_256,
hash_string_256,
hash_bytes_to_buffer_256,
hash_string_to_buffer_256,
}
/*
Low level API
*/
Shake_Context :: _sha3.Sha3_Context
Context :: _sha3.Sha3_Context
init :: proc(ctx: ^_sha3.Sha3_Context) {
_sha3.init(ctx)
init :: proc(ctx: ^Context) {
_sha3.init(ctx)
}
update :: proc "contextless" (ctx: ^_sha3.Sha3_Context, data: []byte) {
_sha3.update(ctx, data)
update :: proc(ctx: ^Context, data: []byte) {
_sha3.update(ctx, data)
}
final :: proc "contextless" (ctx: ^_sha3.Sha3_Context, hash: []byte) {
_sha3.shake_xof(ctx)
_sha3.shake_out(ctx, hash[:])
final :: proc(ctx: ^Context, hash: []byte) {
_sha3.shake_xof(ctx)
_sha3.shake_out(ctx, hash[:])
}

View File

@@ -13,202 +13,200 @@ package siphash
*/
import "core:crypto"
import "core:crypto/util"
import "core:encoding/endian"
import "core:math/bits"
/*
High level API
*/
KEY_SIZE :: 16
KEY_SIZE :: 16
DIGEST_SIZE :: 8
// sum_string_1_3 will hash the given message with the key and return
// the computed hash as a u64
sum_string_1_3 :: proc(msg, key: string) -> u64 {
return sum_bytes_1_3(transmute([]byte)(msg), transmute([]byte)(key))
return sum_bytes_1_3(transmute([]byte)(msg), transmute([]byte)(key))
}
// sum_bytes_1_3 will hash the given message with the key and return
// the computed hash as a u64
sum_bytes_1_3 :: proc (msg, key: []byte) -> u64 {
ctx: Context
hash: u64
init(&ctx, key, 1, 3)
update(&ctx, msg)
final(&ctx, &hash)
return hash
sum_bytes_1_3 :: proc(msg, key: []byte) -> u64 {
ctx: Context
hash: u64
init(&ctx, key, 1, 3)
update(&ctx, msg)
final(&ctx, &hash)
return hash
}
// sum_string_to_buffer_1_3 will hash the given message with the key and write
// the computed hash into the provided destination buffer
sum_string_to_buffer_1_3 :: proc(msg, key: string, dst: []byte) {
sum_bytes_to_buffer_1_3(transmute([]byte)(msg), transmute([]byte)(key), dst)
sum_bytes_to_buffer_1_3(transmute([]byte)(msg), transmute([]byte)(key), dst)
}
// sum_bytes_to_buffer_1_3 will hash the given message with the key and write
// the computed hash into the provided destination buffer
sum_bytes_to_buffer_1_3 :: proc(msg, key, dst: []byte) {
assert(len(dst) >= DIGEST_SIZE, "crypto/siphash: Destination buffer needs to be at least of size 8")
hash := sum_bytes_1_3(msg, key)
_collect_output(dst[:], hash)
hash := sum_bytes_1_3(msg, key)
_collect_output(dst[:], hash)
}
sum_1_3 :: proc {
sum_string_1_3,
sum_bytes_1_3,
sum_string_to_buffer_1_3,
sum_bytes_to_buffer_1_3,
sum_string_1_3,
sum_bytes_1_3,
sum_string_to_buffer_1_3,
sum_bytes_to_buffer_1_3,
}
// verify_u64_1_3 will check if the supplied tag matches with the output you
// verify_u64_1_3 will check if the supplied tag matches with the output you
// will get from the provided message and key
verify_u64_1_3 :: proc (tag: u64 msg, key: []byte) -> bool {
return sum_bytes_1_3(msg, key) == tag
verify_u64_1_3 :: proc(tag: u64, msg, key: []byte) -> bool {
return sum_bytes_1_3(msg, key) == tag
}
// verify_bytes will check if the supplied tag matches with the output you
// verify_bytes will check if the supplied tag matches with the output you
// will get from the provided message and key
verify_bytes_1_3 :: proc (tag, msg, key: []byte) -> bool {
derived_tag: [8]byte
sum_bytes_to_buffer_1_3(msg, key, derived_tag[:])
return crypto.compare_constant_time(derived_tag[:], tag) == 1
verify_bytes_1_3 :: proc(tag, msg, key: []byte) -> bool {
derived_tag: [8]byte
sum_bytes_to_buffer_1_3(msg, key, derived_tag[:])
return crypto.compare_constant_time(derived_tag[:], tag) == 1
}
verify_1_3 :: proc {
verify_bytes_1_3,
verify_u64_1_3,
verify_bytes_1_3,
verify_u64_1_3,
}
// sum_string_2_4 will hash the given message with the key and return
// the computed hash as a u64
sum_string_2_4 :: proc(msg, key: string) -> u64 {
return sum_bytes_2_4(transmute([]byte)(msg), transmute([]byte)(key))
return sum_bytes_2_4(transmute([]byte)(msg), transmute([]byte)(key))
}
// sum_bytes_2_4 will hash the given message with the key and return
// the computed hash as a u64
sum_bytes_2_4 :: proc (msg, key: []byte) -> u64 {
ctx: Context
hash: u64
init(&ctx, key, 2, 4)
update(&ctx, msg)
final(&ctx, &hash)
return hash
sum_bytes_2_4 :: proc(msg, key: []byte) -> u64 {
ctx: Context
hash: u64
init(&ctx, key, 2, 4)
update(&ctx, msg)
final(&ctx, &hash)
return hash
}
// sum_string_to_buffer_2_4 will hash the given message with the key and write
// the computed hash into the provided destination buffer
sum_string_to_buffer_2_4 :: proc(msg, key: string, dst: []byte) {
sum_bytes_to_buffer_2_4(transmute([]byte)(msg), transmute([]byte)(key), dst)
sum_bytes_to_buffer_2_4(transmute([]byte)(msg), transmute([]byte)(key), dst)
}
// sum_bytes_to_buffer_2_4 will hash the given message with the key and write
// the computed hash into the provided destination buffer
sum_bytes_to_buffer_2_4 :: proc(msg, key, dst: []byte) {
assert(len(dst) >= DIGEST_SIZE, "crypto/siphash: Destination buffer needs to be at least of size 8")
hash := sum_bytes_2_4(msg, key)
_collect_output(dst[:], hash)
hash := sum_bytes_2_4(msg, key)
_collect_output(dst[:], hash)
}
sum_2_4 :: proc {
sum_string_2_4,
sum_bytes_2_4,
sum_string_to_buffer_2_4,
sum_bytes_to_buffer_2_4,
sum_string_2_4,
sum_bytes_2_4,
sum_string_to_buffer_2_4,
sum_bytes_to_buffer_2_4,
}
sum_string :: sum_string_2_4
sum_bytes :: sum_bytes_2_4
sum_string :: sum_string_2_4
sum_bytes :: sum_bytes_2_4
sum_string_to_buffer :: sum_string_to_buffer_2_4
sum_bytes_to_buffer :: sum_bytes_to_buffer_2_4
sum_bytes_to_buffer :: sum_bytes_to_buffer_2_4
sum :: proc {
sum_string,
sum_bytes,
sum_string_to_buffer,
sum_bytes_to_buffer,
sum_string,
sum_bytes,
sum_string_to_buffer,
sum_bytes_to_buffer,
}
// verify_u64_2_4 will check if the supplied tag matches with the output you
// verify_u64_2_4 will check if the supplied tag matches with the output you
// will get from the provided message and key
verify_u64_2_4 :: proc (tag: u64 msg, key: []byte) -> bool {
return sum_bytes_2_4(msg, key) == tag
verify_u64_2_4 :: proc(tag: u64, msg, key: []byte) -> bool {
return sum_bytes_2_4(msg, key) == tag
}
// verify_bytes will check if the supplied tag matches with the output you
// verify_bytes will check if the supplied tag matches with the output you
// will get from the provided message and key
verify_bytes_2_4 :: proc (tag, msg, key: []byte) -> bool {
derived_tag: [8]byte
sum_bytes_to_buffer_2_4(msg, key, derived_tag[:])
return crypto.compare_constant_time(derived_tag[:], tag) == 1
verify_bytes_2_4 :: proc(tag, msg, key: []byte) -> bool {
derived_tag: [8]byte
sum_bytes_to_buffer_2_4(msg, key, derived_tag[:])
return crypto.compare_constant_time(derived_tag[:], tag) == 1
}
verify_2_4 :: proc {
verify_bytes_2_4,
verify_u64_2_4,
verify_bytes_2_4,
verify_u64_2_4,
}
verify_bytes :: verify_bytes_2_4
verify_u64 :: verify_u64_2_4
verify_u64 :: verify_u64_2_4
verify :: proc {
verify_bytes,
verify_u64,
verify_bytes,
verify_u64,
}
// sum_string_4_8 will hash the given message with the key and return
// the computed hash as a u64
sum_string_4_8 :: proc(msg, key: string) -> u64 {
return sum_bytes_4_8(transmute([]byte)(msg), transmute([]byte)(key))
return sum_bytes_4_8(transmute([]byte)(msg), transmute([]byte)(key))
}
// sum_bytes_4_8 will hash the given message with the key and return
// the computed hash as a u64
sum_bytes_4_8 :: proc (msg, key: []byte) -> u64 {
ctx: Context
hash: u64
init(&ctx, key, 4, 8)
update(&ctx, msg)
final(&ctx, &hash)
return hash
sum_bytes_4_8 :: proc(msg, key: []byte) -> u64 {
ctx: Context
hash: u64
init(&ctx, key, 4, 8)
update(&ctx, msg)
final(&ctx, &hash)
return hash
}
// sum_string_to_buffer_4_8 will hash the given message with the key and write
// the computed hash into the provided destination buffer
sum_string_to_buffer_4_8 :: proc(msg, key: string, dst: []byte) {
sum_bytes_to_buffer_4_8(transmute([]byte)(msg), transmute([]byte)(key), dst)
sum_bytes_to_buffer_4_8(transmute([]byte)(msg), transmute([]byte)(key), dst)
}
// sum_bytes_to_buffer_4_8 will hash the given message with the key and write
// the computed hash into the provided destination buffer
sum_bytes_to_buffer_4_8 :: proc(msg, key, dst: []byte) {
assert(len(dst) >= DIGEST_SIZE, "crypto/siphash: Destination buffer needs to be at least of size 8")
hash := sum_bytes_4_8(msg, key)
_collect_output(dst[:], hash)
hash := sum_bytes_4_8(msg, key)
_collect_output(dst[:], hash)
}
sum_4_8 :: proc {
sum_string_4_8,
sum_bytes_4_8,
sum_string_to_buffer_4_8,
sum_bytes_to_buffer_4_8,
sum_string_4_8,
sum_bytes_4_8,
sum_string_to_buffer_4_8,
sum_bytes_to_buffer_4_8,
}
// verify_u64_4_8 will check if the supplied tag matches with the output you
// verify_u64_4_8 will check if the supplied tag matches with the output you
// will get from the provided message and key
verify_u64_4_8 :: proc (tag: u64 msg, key: []byte) -> bool {
return sum_bytes_4_8(msg, key) == tag
verify_u64_4_8 :: proc(tag: u64, msg, key: []byte) -> bool {
return sum_bytes_4_8(msg, key) == tag
}
// verify_bytes will check if the supplied tag matches with the output you
// verify_bytes will check if the supplied tag matches with the output you
// will get from the provided message and key
verify_bytes_4_8 :: proc (tag, msg, key: []byte) -> bool {
derived_tag: [8]byte
sum_bytes_to_buffer_4_8(msg, key, derived_tag[:])
return crypto.compare_constant_time(derived_tag[:], tag) == 1
verify_bytes_4_8 :: proc(tag, msg, key: []byte) -> bool {
derived_tag: [8]byte
sum_bytes_to_buffer_4_8(msg, key, derived_tag[:])
return crypto.compare_constant_time(derived_tag[:], tag) == 1
}
verify_4_8 :: proc {
verify_bytes_4_8,
verify_u64_4_8,
verify_bytes_4_8,
verify_u64_4_8,
}
/*
@@ -216,120 +214,150 @@ verify_4_8 :: proc {
*/
init :: proc(ctx: ^Context, key: []byte, c_rounds, d_rounds: int) {
assert(len(key) == KEY_SIZE, "crypto/siphash: Invalid key size, want 16")
ctx.c_rounds = c_rounds
ctx.d_rounds = d_rounds
is_valid_setting := (ctx.c_rounds == 1 && ctx.d_rounds == 3) ||
(ctx.c_rounds == 2 && ctx.d_rounds == 4) ||
(ctx.c_rounds == 4 && ctx.d_rounds == 8)
assert(is_valid_setting, "crypto/siphash: Incorrect rounds set up. Valid pairs are (1,3), (2,4) and (4,8)")
ctx.k0 = util.U64_LE(key[:8])
ctx.k1 = util.U64_LE(key[8:])
ctx.v0 = 0x736f6d6570736575 ~ ctx.k0
ctx.v1 = 0x646f72616e646f6d ~ ctx.k1
ctx.v2 = 0x6c7967656e657261 ~ ctx.k0
ctx.v3 = 0x7465646279746573 ~ ctx.k1
ctx.is_initialized = true
if len(key) != KEY_SIZE {
panic("crypto/siphash; invalid key size")
}
ctx.c_rounds = c_rounds
ctx.d_rounds = d_rounds
is_valid_setting :=
(ctx.c_rounds == 1 && ctx.d_rounds == 3) ||
(ctx.c_rounds == 2 && ctx.d_rounds == 4) ||
(ctx.c_rounds == 4 && ctx.d_rounds == 8)
if !is_valid_setting {
panic("crypto/siphash: incorrect rounds set up")
}
ctx.k0 = endian.unchecked_get_u64le(key[:8])
ctx.k1 = endian.unchecked_get_u64le(key[8:])
ctx.v0 = 0x736f6d6570736575 ~ ctx.k0
ctx.v1 = 0x646f72616e646f6d ~ ctx.k1
ctx.v2 = 0x6c7967656e657261 ~ ctx.k0
ctx.v3 = 0x7465646279746573 ~ ctx.k1
ctx.last_block = 0
ctx.total_length = 0
ctx.is_initialized = true
}
update :: proc(ctx: ^Context, data: []byte) {
assert(ctx.is_initialized, "crypto/siphash: Context is not initialized")
ctx.last_block = len(data) / 8 * 8
ctx.buf = data
i := 0
m: u64
for i < ctx.last_block {
m = u64(ctx.buf[i] & 0xff)
i += 1
assert(ctx.is_initialized, "crypto/siphash: context is not initialized")
for r in u64(1)..<8 {
m |= u64(ctx.buf[i] & 0xff) << (r * 8)
i += 1
}
ctx.v3 ~= m
for _ in 0..<ctx.c_rounds {
_compress(ctx)
}
ctx.v0 ~= m
}
data := data
ctx.total_length += len(data)
if ctx.last_block > 0 {
n := copy(ctx.buf[ctx.last_block:], data)
ctx.last_block += n
if ctx.last_block == BLOCK_SIZE {
block(ctx, ctx.buf[:])
ctx.last_block = 0
}
data = data[n:]
}
if len(data) >= BLOCK_SIZE {
n := len(data) &~ (BLOCK_SIZE - 1)
block(ctx, data[:n])
data = data[n:]
}
if len(data) > 0 {
ctx.last_block = copy(ctx.buf[:], data)
}
}
final :: proc(ctx: ^Context, dst: ^u64) {
m: u64
for i := len(ctx.buf) - 1; i >= ctx.last_block; i -= 1 {
m <<= 8
m |= u64(ctx.buf[i] & 0xff)
}
m |= u64(len(ctx.buf) << 56)
assert(ctx.is_initialized, "crypto/siphash: context is not initialized")
ctx.v3 ~= m
tmp: [BLOCK_SIZE]byte
copy(tmp[:], ctx.buf[:ctx.last_block])
tmp[7] = byte(ctx.total_length & 0xff)
block(ctx, tmp[:])
for _ in 0..<ctx.c_rounds {
_compress(ctx)
}
ctx.v2 ~= 0xff
ctx.v0 ~= m
ctx.v2 ~= 0xff
for _ in 0 ..< ctx.d_rounds {
_compress(ctx)
}
for _ in 0..<ctx.d_rounds {
_compress(ctx)
}
dst^ = ctx.v0 ~ ctx.v1 ~ ctx.v2 ~ ctx.v3
dst^ = ctx.v0 ~ ctx.v1 ~ ctx.v2 ~ ctx.v3
reset(ctx)
reset(ctx)
}
reset :: proc(ctx: ^Context) {
ctx.k0, ctx.k1 = 0, 0
ctx.v0, ctx.v1 = 0, 0
ctx.v2, ctx.v3 = 0, 0
ctx.last_block = 0
ctx.c_rounds = 0
ctx.d_rounds = 0
ctx.is_initialized = false
ctx.k0, ctx.k1 = 0, 0
ctx.v0, ctx.v1 = 0, 0
ctx.v2, ctx.v3 = 0, 0
ctx.last_block = 0
ctx.total_length = 0
ctx.c_rounds = 0
ctx.d_rounds = 0
ctx.is_initialized = false
}
BLOCK_SIZE :: 8
Context :: struct {
v0, v1, v2, v3: u64, // State values
k0, k1: u64, // Split key
c_rounds: int, // Number of message rounds
d_rounds: int, // Number of finalization rounds
buf: []byte, // Provided data
last_block: int, // Offset from the last block
is_initialized: bool,
v0, v1, v2, v3: u64, // State values
k0, k1: u64, // Split key
c_rounds: int, // Number of message rounds
d_rounds: int, // Number of finalization rounds
buf: [BLOCK_SIZE]byte, // Provided data
last_block: int, // Offset from the last block
total_length: int,
is_initialized: bool,
}
@(private)
block :: proc "contextless" (ctx: ^Context, buf: []byte) {
buf := buf
for len(buf) >= BLOCK_SIZE {
m := endian.unchecked_get_u64le(buf)
ctx.v3 ~= m
for _ in 0 ..< ctx.c_rounds {
_compress(ctx)
}
ctx.v0 ~= m
buf = buf[BLOCK_SIZE:]
}
}
@(private)
_get_byte :: #force_inline proc "contextless" (byte_num: byte, into: u64) -> byte {
return byte(into >> (((~byte_num) & (size_of(u64) - 1)) << 3))
return byte(into >> (((~byte_num) & (size_of(u64) - 1)) << 3))
}
_collect_output :: #force_inline proc "contextless" (dst: []byte, hash: u64) {
dst[0] = _get_byte(7, hash)
dst[1] = _get_byte(6, hash)
dst[2] = _get_byte(5, hash)
dst[3] = _get_byte(4, hash)
dst[4] = _get_byte(3, hash)
dst[5] = _get_byte(2, hash)
dst[6] = _get_byte(1, hash)
dst[7] = _get_byte(0, hash)
@(private)
_collect_output :: #force_inline proc(dst: []byte, hash: u64) {
if len(dst) < DIGEST_SIZE {
panic("crypto/siphash: invalid tag size")
}
dst[0] = _get_byte(7, hash)
dst[1] = _get_byte(6, hash)
dst[2] = _get_byte(5, hash)
dst[3] = _get_byte(4, hash)
dst[4] = _get_byte(3, hash)
dst[5] = _get_byte(2, hash)
dst[6] = _get_byte(1, hash)
dst[7] = _get_byte(0, hash)
}
@(private)
_compress :: #force_inline proc "contextless" (ctx: ^Context) {
ctx.v0 += ctx.v1
ctx.v1 = util.ROTL64(ctx.v1, 13)
ctx.v1 ~= ctx.v0
ctx.v0 = util.ROTL64(ctx.v0, 32)
ctx.v2 += ctx.v3
ctx.v3 = util.ROTL64(ctx.v3, 16)
ctx.v3 ~= ctx.v2
ctx.v0 += ctx.v3
ctx.v3 = util.ROTL64(ctx.v3, 21)
ctx.v3 ~= ctx.v0
ctx.v2 += ctx.v1
ctx.v1 = util.ROTL64(ctx.v1, 17)
ctx.v1 ~= ctx.v2
ctx.v2 = util.ROTL64(ctx.v2, 32)
ctx.v0 += ctx.v1
ctx.v1 = bits.rotate_left64(ctx.v1, 13)
ctx.v1 ~= ctx.v0
ctx.v0 = bits.rotate_left64(ctx.v0, 32)
ctx.v2 += ctx.v3
ctx.v3 = bits.rotate_left64(ctx.v3, 16)
ctx.v3 ~= ctx.v2
ctx.v0 += ctx.v3
ctx.v3 = bits.rotate_left64(ctx.v3, 21)
ctx.v3 ~= ctx.v0
ctx.v2 += ctx.v1
ctx.v1 = bits.rotate_left64(ctx.v1, 17)
ctx.v1 ~= ctx.v2
ctx.v2 = bits.rotate_left64(ctx.v2, 32)
}

View File

@@ -10,10 +10,10 @@ package sm3
Implementation of the SM3 hashing algorithm, as defined in <https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02>
*/
import "core:os"
import "core:encoding/endian"
import "core:io"
import "../util"
import "core:math/bits"
import "core:os"
/*
High level API
@@ -24,227 +24,256 @@ DIGEST_SIZE :: 32
// hash_string will hash the given input and return the
// computed hash
hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
hash: [DIGEST_SIZE]byte
ctx: Sm3_Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
hash: [DIGEST_SIZE]byte
ctx: Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer(transmute([]byte)(data), hash)
hash_bytes_to_buffer(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
ctx: Sm3_Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
ctx: Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream will read the stream in chunks and compute a
// hash from its contents
hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
hash: [DIGEST_SIZE]byte
ctx: Sm3_Context
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
hash: [DIGEST_SIZE]byte
ctx: Context
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file will read the file provided by the given handle
// and compute a hash
hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes(buf[:]), ok
}
}
return [DIGEST_SIZE]byte{}, false
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes(buf[:]), ok
}
}
return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
hash_stream,
hash_file,
hash_bytes,
hash_string,
hash_bytes_to_buffer,
hash_string_to_buffer,
hash_stream,
hash_file,
hash_bytes,
hash_string,
hash_bytes_to_buffer,
hash_string_to_buffer,
}
/*
Low level API
*/
init :: proc(ctx: ^Sm3_Context) {
ctx.state[0] = IV[0]
ctx.state[1] = IV[1]
ctx.state[2] = IV[2]
ctx.state[3] = IV[3]
ctx.state[4] = IV[4]
ctx.state[5] = IV[5]
ctx.state[6] = IV[6]
ctx.state[7] = IV[7]
init :: proc(ctx: ^Context) {
ctx.state[0] = IV[0]
ctx.state[1] = IV[1]
ctx.state[2] = IV[2]
ctx.state[3] = IV[3]
ctx.state[4] = IV[4]
ctx.state[5] = IV[5]
ctx.state[6] = IV[6]
ctx.state[7] = IV[7]
ctx.length = 0
ctx.bitlength = 0
ctx.is_initialized = true
}
update :: proc(ctx: ^Sm3_Context, data: []byte) {
data := data
ctx.length += u64(len(data))
update :: proc(ctx: ^Context, data: []byte) {
assert(ctx.is_initialized)
if ctx.bitlength > 0 {
n := copy(ctx.x[ctx.bitlength:], data[:])
ctx.bitlength += u64(n)
if ctx.bitlength == 64 {
block(ctx, ctx.x[:])
ctx.bitlength = 0
}
data = data[n:]
}
if len(data) >= 64 {
n := len(data) &~ (64 - 1)
block(ctx, data[:n])
data = data[n:]
}
if len(data) > 0 {
ctx.bitlength = u64(copy(ctx.x[:], data[:]))
}
data := data
ctx.length += u64(len(data))
if ctx.bitlength > 0 {
n := copy(ctx.x[ctx.bitlength:], data[:])
ctx.bitlength += u64(n)
if ctx.bitlength == BLOCK_SIZE {
block(ctx, ctx.x[:])
ctx.bitlength = 0
}
data = data[n:]
}
if len(data) >= BLOCK_SIZE {
n := len(data) &~ (BLOCK_SIZE - 1)
block(ctx, data[:n])
data = data[n:]
}
if len(data) > 0 {
ctx.bitlength = u64(copy(ctx.x[:], data[:]))
}
}
final :: proc(ctx: ^Sm3_Context, hash: []byte) {
length := ctx.length
final :: proc(ctx: ^Context, hash: []byte) {
assert(ctx.is_initialized)
pad: [64]byte
pad[0] = 0x80
if length % 64 < 56 {
update(ctx, pad[0: 56 - length % 64])
} else {
update(ctx, pad[0: 64 + 56 - length % 64])
}
if len(hash) < DIGEST_SIZE {
panic("crypto/sm3: invalid destination digest size")
}
length <<= 3
util.PUT_U64_BE(pad[:], length)
update(ctx, pad[0: 8])
assert(ctx.bitlength == 0)
length := ctx.length
util.PUT_U32_BE(hash[0:], ctx.state[0])
util.PUT_U32_BE(hash[4:], ctx.state[1])
util.PUT_U32_BE(hash[8:], ctx.state[2])
util.PUT_U32_BE(hash[12:], ctx.state[3])
util.PUT_U32_BE(hash[16:], ctx.state[4])
util.PUT_U32_BE(hash[20:], ctx.state[5])
util.PUT_U32_BE(hash[24:], ctx.state[6])
util.PUT_U32_BE(hash[28:], ctx.state[7])
pad: [BLOCK_SIZE]byte
pad[0] = 0x80
if length % BLOCK_SIZE < 56 {
update(ctx, pad[0:56 - length % BLOCK_SIZE])
} else {
update(ctx, pad[0:BLOCK_SIZE + 56 - length % BLOCK_SIZE])
}
length <<= 3
endian.unchecked_put_u64be(pad[:], length)
update(ctx, pad[0:8])
assert(ctx.bitlength == 0)
for i := 0; i < DIGEST_SIZE / 4; i += 1 {
endian.unchecked_put_u32be(hash[i * 4:], ctx.state[i])
}
ctx.is_initialized = false
}
/*
SM3 implementation
*/
Sm3_Context :: struct {
state: [8]u32,
x: [64]byte,
bitlength: u64,
length: u64,
BLOCK_SIZE :: 64
Context :: struct {
state: [8]u32,
x: [BLOCK_SIZE]byte,
bitlength: u64,
length: u64,
is_initialized: bool,
}
@(private)
IV := [8]u32 {
0x7380166f, 0x4914b2b9, 0x172442d7, 0xda8a0600,
0xa96f30bc, 0x163138aa, 0xe38dee4d, 0xb0fb0e4e,
0x7380166f, 0x4914b2b9, 0x172442d7, 0xda8a0600,
0xa96f30bc, 0x163138aa, 0xe38dee4d, 0xb0fb0e4e,
}
block :: proc "contextless" (ctx: ^Sm3_Context, buf: []byte) {
buf := buf
@(private)
block :: proc "contextless" (ctx: ^Context, buf: []byte) {
buf := buf
w: [68]u32
wp: [64]u32
w: [68]u32
wp: [64]u32
state0, state1, state2, state3 := ctx.state[0], ctx.state[1], ctx.state[2], ctx.state[3]
state4, state5, state6, state7 := ctx.state[4], ctx.state[5], ctx.state[6], ctx.state[7]
state0, state1, state2, state3 := ctx.state[0], ctx.state[1], ctx.state[2], ctx.state[3]
state4, state5, state6, state7 := ctx.state[4], ctx.state[5], ctx.state[6], ctx.state[7]
for len(buf) >= 64 {
for i := 0; i < 16; i += 1 {
j := i * 4
w[i] = u32(buf[j]) << 24 | u32(buf[j + 1]) << 16 | u32(buf[j + 2]) << 8 | u32(buf[j + 3])
}
for i := 16; i < 68; i += 1 {
p1v := w[i - 16] ~ w[i - 9] ~ util.ROTL32(w[i - 3], 15)
// @note(zh): inlined P1
w[i] = p1v ~ util.ROTL32(p1v, 15) ~ util.ROTL32(p1v, 23) ~ util.ROTL32(w[i - 13], 7) ~ w[i - 6]
}
for i := 0; i < 64; i += 1 {
wp[i] = w[i] ~ w[i + 4]
}
for len(buf) >= BLOCK_SIZE {
for i := 0; i < 16; i += 1 {
w[i] = endian.unchecked_get_u32be(buf[i * 4:])
}
for i := 16; i < 68; i += 1 {
p1v := w[i - 16] ~ w[i - 9] ~ bits.rotate_left32(w[i - 3], 15)
// @note(zh): inlined P1
w[i] =
p1v ~
bits.rotate_left32(p1v, 15) ~
bits.rotate_left32(p1v, 23) ~
bits.rotate_left32(w[i - 13], 7) ~
w[i - 6]
}
for i := 0; i < 64; i += 1 {
wp[i] = w[i] ~ w[i + 4]
}
a, b, c, d := state0, state1, state2, state3
e, f, g, h := state4, state5, state6, state7
a, b, c, d := state0, state1, state2, state3
e, f, g, h := state4, state5, state6, state7
for i := 0; i < 16; i += 1 {
v1 := util.ROTL32(u32(a), 12)
ss1 := util.ROTL32(v1 + u32(e) + util.ROTL32(0x79cc4519, i), 7)
ss2 := ss1 ~ v1
for i := 0; i < 16; i += 1 {
v1 := bits.rotate_left32(u32(a), 12)
ss1 := bits.rotate_left32(v1 + u32(e) + bits.rotate_left32(0x79cc4519, i), 7)
ss2 := ss1 ~ v1
// @note(zh): inlined FF1
tt1 := u32(a ~ b ~ c) + u32(d) + ss2 + wp[i]
// @note(zh): inlined GG1
tt2 := u32(e ~ f ~ g) + u32(h) + ss1 + w[i]
// @note(zh): inlined FF1
tt1 := u32(a ~ b ~ c) + u32(d) + ss2 + wp[i]
// @note(zh): inlined GG1
tt2 := u32(e ~ f ~ g) + u32(h) + ss1 + w[i]
a, b, c, d = tt1, a, util.ROTL32(u32(b), 9), c
// @note(zh): inlined P0
e, f, g, h = (tt2 ~ util.ROTL32(tt2, 9) ~ util.ROTL32(tt2, 17)), e, util.ROTL32(u32(f), 19), g
}
a, b, c, d = tt1, a, bits.rotate_left32(u32(b), 9), c
// @note(zh): inlined P0
e, f, g, h =
(tt2 ~ bits.rotate_left32(tt2, 9) ~ bits.rotate_left32(tt2, 17)),
e,
bits.rotate_left32(u32(f), 19),
g
}
for i := 16; i < 64; i += 1 {
v := util.ROTL32(u32(a), 12)
ss1 := util.ROTL32(v + u32(e) + util.ROTL32(0x7a879d8a, i % 32), 7)
ss2 := ss1 ~ v
for i := 16; i < 64; i += 1 {
v := bits.rotate_left32(u32(a), 12)
ss1 := bits.rotate_left32(v + u32(e) + bits.rotate_left32(0x7a879d8a, i % 32), 7)
ss2 := ss1 ~ v
// @note(zh): inlined FF2
tt1 := u32(((a & b) | (a & c) | (b & c)) + d) + ss2 + wp[i]
// @note(zh): inlined GG2
tt2 := u32(((e & f) | ((~e) & g)) + h) + ss1 + w[i]
// @note(zh): inlined FF2
tt1 := u32(((a & b) | (a & c) | (b & c)) + d) + ss2 + wp[i]
// @note(zh): inlined GG2
tt2 := u32(((e & f) | ((~e) & g)) + h) + ss1 + w[i]
a, b, c, d = tt1, a, util.ROTL32(u32(b), 9), c
// @note(zh): inlined P0
e, f, g, h = (tt2 ~ util.ROTL32(tt2, 9) ~ util.ROTL32(tt2, 17)), e, util.ROTL32(u32(f), 19), g
}
a, b, c, d = tt1, a, bits.rotate_left32(u32(b), 9), c
// @note(zh): inlined P0
e, f, g, h =
(tt2 ~ bits.rotate_left32(tt2, 9) ~ bits.rotate_left32(tt2, 17)),
e,
bits.rotate_left32(u32(f), 19),
g
}
state0 ~= a
state1 ~= b
state2 ~= c
state3 ~= d
state4 ~= e
state5 ~= f
state6 ~= g
state7 ~= h
state0 ~= a
state1 ~= b
state2 ~= c
state3 ~= d
state4 ~= e
state5 ~= f
state6 ~= g
state7 ~= h
buf = buf[64:]
}
buf = buf[BLOCK_SIZE:]
}
ctx.state[0], ctx.state[1], ctx.state[2], ctx.state[3] = state0, state1, state2, state3
ctx.state[4], ctx.state[5], ctx.state[6], ctx.state[7] = state4, state5, state6, state7
ctx.state[0], ctx.state[1], ctx.state[2], ctx.state[3] = state0, state1, state2, state3
ctx.state[4], ctx.state[5], ctx.state[6], ctx.state[7] = state4, state5, state6, state7
}

View File

@@ -1,517 +0,0 @@
package streebog
/*
Copyright 2021 zhibog
Made available under the BSD-3 license.
List of contributors:
zhibog, dotbmp: Initial implementation.
Implementation of the Streebog hashing algorithm, standardized as GOST R 34.11-2012 in RFC 6986 <https://datatracker.ietf.org/doc/html/rfc6986>
*/
import "core:os"
import "core:io"
import "../util"
/*
High level API
*/
DIGEST_SIZE_256 :: 32
DIGEST_SIZE_512 :: 64
// hash_string_256 will hash the given input and return the
// computed hash
hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
hash: [DIGEST_SIZE_256]byte
ctx: Streebog_Context
ctx.is256 = true
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_256 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_256 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
ctx: Streebog_Context
ctx.is256 = true
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
}
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
hash: [DIGEST_SIZE_256]byte
ctx: Streebog_Context
ctx.is256 = true
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_256 will read the file provided by the given handle
// and compute a hash
hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_256(buf[:]), ok
}
}
return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
hash_stream_256,
hash_file_256,
hash_bytes_256,
hash_string_256,
hash_bytes_to_buffer_256,
hash_string_to_buffer_256,
}
// hash_string_512 will hash the given input and return the
// computed hash
hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
return hash_bytes_512(transmute([]byte)(data))
}
// hash_bytes_512 will hash the given input and return the
// computed hash
hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
hash: [DIGEST_SIZE_512]byte
ctx: Streebog_Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_512 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_512 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
ctx: Streebog_Context
init(&ctx)
update(&ctx, data)
final(&ctx, hash[:])
}
// hash_stream_512 will read the stream in chunks and compute a
// hash from its contents
hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
hash: [DIGEST_SIZE_512]byte
ctx: Streebog_Context
init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file_512 will read the file provided by the given handle
// and compute a hash
hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
if !load_at_once {
return hash_stream_512(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_512(buf[:]), ok
}
}
return [DIGEST_SIZE_512]byte{}, false
}
hash_512 :: proc {
hash_stream_512,
hash_file_512,
hash_bytes_512,
hash_string_512,
hash_bytes_to_buffer_512,
hash_string_to_buffer_512,
}
/*
Low level API
*/
init :: proc(ctx: ^Streebog_Context) {
if ctx.is256 {
ctx.hash_size = 256
for _, i in ctx.h {
ctx.h[i] = 0x01
}
} else {
ctx.hash_size = 512
}
ctx.v_512[1] = 0x02
}
update :: proc(ctx: ^Streebog_Context, data: []byte) {
length := u64(len(data))
chk_size: u64
data := data
for (length > 63) && (ctx.buf_size == 0) {
stage2(ctx, data)
data = data[64:]
length -= 64
}
for length != 0 {
chk_size = 64 - ctx.buf_size
if chk_size > length {
chk_size = length
}
copy(ctx.buffer[ctx.buf_size:], data[:chk_size])
ctx.buf_size += chk_size
length -= chk_size
data = data[chk_size:]
if ctx.buf_size == 64 {
stage2(ctx, ctx.buffer[:])
ctx.buf_size = 0
}
}
}
final :: proc(ctx: ^Streebog_Context, hash: []byte) {
t: [64]byte
t[1] = byte((ctx.buf_size * 8) >> 8) & 0xff
t[0] = byte((ctx.buf_size) * 8) & 0xff
padding(ctx)
G(ctx.h[:], ctx.n[:], ctx.buffer[:])
add_mod_512(ctx.n[:], t[:], ctx.n[:])
add_mod_512(ctx.sigma[:], ctx.buffer[:], ctx.sigma[:])
G(ctx.h[:], ctx.v_0[:], ctx.n[:])
G(ctx.h[:], ctx.v_0[:], ctx.sigma[:])
if ctx.is256 {
copy(hash[:], ctx.h[32:])
} else {
copy(hash[:], ctx.h[:])
}
}
/*
Streebog implementation
*/
PI := [256]byte {
252, 238, 221, 17, 207, 110, 49, 22, 251, 196, 250, 218, 35, 197, 4, 77,
233, 119, 240, 219, 147, 46, 153, 186, 23, 54, 241, 187, 20, 205, 95, 193,
249, 24, 101, 90, 226, 92, 239, 33, 129, 28, 60, 66, 139, 1, 142, 79,
5, 132, 2, 174, 227, 106, 143, 160, 6, 11, 237, 152, 127, 212, 211, 31,
235, 52, 44, 81, 234, 200, 72, 171, 242, 42, 104, 162, 253, 58, 206, 204,
181, 112, 14, 86, 8, 12, 118, 18, 191, 114, 19, 71, 156, 183, 93, 135,
21, 161, 150, 41, 16, 123, 154, 199, 243, 145, 120, 111, 157, 158, 178, 177,
50, 117, 25, 61, 255, 53, 138, 126, 109, 84, 198, 128, 195, 189, 13, 87,
223, 245, 36, 169, 62, 168, 67, 201, 215, 121, 214, 246, 124, 34, 185, 3,
224, 15, 236, 222, 122, 148, 176, 188, 220, 232, 40, 80, 78, 51, 10, 74,
167, 151, 96, 115, 30, 0, 98, 68, 26, 184, 56, 130, 100, 159, 38, 65,
173, 69, 70, 146, 39, 94, 85, 47, 140, 163, 165, 125, 105, 213, 149, 59,
7, 88, 179, 64, 134, 172, 29, 247, 48, 55, 107, 228, 136, 217, 231, 137,
225, 27, 131, 73, 76, 63, 248, 254, 141, 83, 170, 144, 202, 216, 133, 97,
32, 113, 103, 164, 45, 43, 9, 91, 203, 155, 37, 208, 190, 229, 108, 82,
89, 166, 116, 210, 230, 244, 180, 192, 209, 102, 175, 194, 57, 75, 99, 182,
}
TAU := [64]byte {
0, 8, 16, 24, 32, 40, 48, 56,
1, 9, 17, 25, 33, 41, 49, 57,
2, 10, 18, 26, 34, 42, 50, 58,
3, 11, 19, 27, 35, 43, 51, 59,
4, 12, 20, 28, 36, 44, 52, 60,
5, 13, 21, 29, 37, 45, 53, 61,
6, 14, 22, 30, 38, 46, 54, 62,
7, 15, 23, 31, 39, 47, 55, 63,
}
STREEBOG_A := [64]u64 {
0x8e20faa72ba0b470, 0x47107ddd9b505a38, 0xad08b0e0c3282d1c, 0xd8045870ef14980e,
0x6c022c38f90a4c07, 0x3601161cf205268d, 0x1b8e0b0e798c13c8, 0x83478b07b2468764,
0xa011d380818e8f40, 0x5086e740ce47c920, 0x2843fd2067adea10, 0x14aff010bdd87508,
0x0ad97808d06cb404, 0x05e23c0468365a02, 0x8c711e02341b2d01, 0x46b60f011a83988e,
0x90dab52a387ae76f, 0x486dd4151c3dfdb9, 0x24b86a840e90f0d2, 0x125c354207487869,
0x092e94218d243cba, 0x8a174a9ec8121e5d, 0x4585254f64090fa0, 0xaccc9ca9328a8950,
0x9d4df05d5f661451, 0xc0a878a0a1330aa6, 0x60543c50de970553, 0x302a1e286fc58ca7,
0x18150f14b9ec46dd, 0x0c84890ad27623e0, 0x0642ca05693b9f70, 0x0321658cba93c138,
0x86275df09ce8aaa8, 0x439da0784e745554, 0xafc0503c273aa42a, 0xd960281e9d1d5215,
0xe230140fc0802984, 0x71180a8960409a42, 0xb60c05ca30204d21, 0x5b068c651810a89e,
0x456c34887a3805b9, 0xac361a443d1c8cd2, 0x561b0d22900e4669, 0x2b838811480723ba,
0x9bcf4486248d9f5d, 0xc3e9224312c8c1a0, 0xeffa11af0964ee50, 0xf97d86d98a327728,
0xe4fa2054a80b329c, 0x727d102a548b194e, 0x39b008152acb8227, 0x9258048415eb419d,
0x492c024284fbaec0, 0xaa16012142f35760, 0x550b8e9e21f7a530, 0xa48b474f9ef5dc18,
0x70a6a56e2440598e, 0x3853dc371220a247, 0x1ca76e95091051ad, 0x0edd37c48a08a6d8,
0x07e095624504536c, 0x8d70c431ac02a736, 0xc83862965601dd1b, 0x641c314b2b8ee083,
}
STREEBOG_C := [12][64]byte {
{
0x07, 0x45, 0xa6, 0xf2, 0x59, 0x65, 0x80, 0xdd,
0x23, 0x4d, 0x74, 0xcc, 0x36, 0x74, 0x76, 0x05,
0x15, 0xd3, 0x60, 0xa4, 0x08, 0x2a, 0x42, 0xa2,
0x01, 0x69, 0x67, 0x92, 0x91, 0xe0, 0x7c, 0x4b,
0xfc, 0xc4, 0x85, 0x75, 0x8d, 0xb8, 0x4e, 0x71,
0x16, 0xd0, 0x45, 0x2e, 0x43, 0x76, 0x6a, 0x2f,
0x1f, 0x7c, 0x65, 0xc0, 0x81, 0x2f, 0xcb, 0xeb,
0xe9, 0xda, 0xca, 0x1e, 0xda, 0x5b, 0x08, 0xb1,
},
{
0xb7, 0x9b, 0xb1, 0x21, 0x70, 0x04, 0x79, 0xe6,
0x56, 0xcd, 0xcb, 0xd7, 0x1b, 0xa2, 0xdd, 0x55,
0xca, 0xa7, 0x0a, 0xdb, 0xc2, 0x61, 0xb5, 0x5c,
0x58, 0x99, 0xd6, 0x12, 0x6b, 0x17, 0xb5, 0x9a,
0x31, 0x01, 0xb5, 0x16, 0x0f, 0x5e, 0xd5, 0x61,
0x98, 0x2b, 0x23, 0x0a, 0x72, 0xea, 0xfe, 0xf3,
0xd7, 0xb5, 0x70, 0x0f, 0x46, 0x9d, 0xe3, 0x4f,
0x1a, 0x2f, 0x9d, 0xa9, 0x8a, 0xb5, 0xa3, 0x6f,
},
{
0xb2, 0x0a, 0xba, 0x0a, 0xf5, 0x96, 0x1e, 0x99,
0x31, 0xdb, 0x7a, 0x86, 0x43, 0xf4, 0xb6, 0xc2,
0x09, 0xdb, 0x62, 0x60, 0x37, 0x3a, 0xc9, 0xc1,
0xb1, 0x9e, 0x35, 0x90, 0xe4, 0x0f, 0xe2, 0xd3,
0x7b, 0x7b, 0x29, 0xb1, 0x14, 0x75, 0xea, 0xf2,
0x8b, 0x1f, 0x9c, 0x52, 0x5f, 0x5e, 0xf1, 0x06,
0x35, 0x84, 0x3d, 0x6a, 0x28, 0xfc, 0x39, 0x0a,
0xc7, 0x2f, 0xce, 0x2b, 0xac, 0xdc, 0x74, 0xf5,
},
{
0x2e, 0xd1, 0xe3, 0x84, 0xbc, 0xbe, 0x0c, 0x22,
0xf1, 0x37, 0xe8, 0x93, 0xa1, 0xea, 0x53, 0x34,
0xbe, 0x03, 0x52, 0x93, 0x33, 0x13, 0xb7, 0xd8,
0x75, 0xd6, 0x03, 0xed, 0x82, 0x2c, 0xd7, 0xa9,
0x3f, 0x35, 0x5e, 0x68, 0xad, 0x1c, 0x72, 0x9d,
0x7d, 0x3c, 0x5c, 0x33, 0x7e, 0x85, 0x8e, 0x48,
0xdd, 0xe4, 0x71, 0x5d, 0xa0, 0xe1, 0x48, 0xf9,
0xd2, 0x66, 0x15, 0xe8, 0xb3, 0xdf, 0x1f, 0xef,
},
{
0x57, 0xfe, 0x6c, 0x7c, 0xfd, 0x58, 0x17, 0x60,
0xf5, 0x63, 0xea, 0xa9, 0x7e, 0xa2, 0x56, 0x7a,
0x16, 0x1a, 0x27, 0x23, 0xb7, 0x00, 0xff, 0xdf,
0xa3, 0xf5, 0x3a, 0x25, 0x47, 0x17, 0xcd, 0xbf,
0xbd, 0xff, 0x0f, 0x80, 0xd7, 0x35, 0x9e, 0x35,
0x4a, 0x10, 0x86, 0x16, 0x1f, 0x1c, 0x15, 0x7f,
0x63, 0x23, 0xa9, 0x6c, 0x0c, 0x41, 0x3f, 0x9a,
0x99, 0x47, 0x47, 0xad, 0xac, 0x6b, 0xea, 0x4b,
},
{
0x6e, 0x7d, 0x64, 0x46, 0x7a, 0x40, 0x68, 0xfa,
0x35, 0x4f, 0x90, 0x36, 0x72, 0xc5, 0x71, 0xbf,
0xb6, 0xc6, 0xbe, 0xc2, 0x66, 0x1f, 0xf2, 0x0a,
0xb4, 0xb7, 0x9a, 0x1c, 0xb7, 0xa6, 0xfa, 0xcf,
0xc6, 0x8e, 0xf0, 0x9a, 0xb4, 0x9a, 0x7f, 0x18,
0x6c, 0xa4, 0x42, 0x51, 0xf9, 0xc4, 0x66, 0x2d,
0xc0, 0x39, 0x30, 0x7a, 0x3b, 0xc3, 0xa4, 0x6f,
0xd9, 0xd3, 0x3a, 0x1d, 0xae, 0xae, 0x4f, 0xae,
},
{
0x93, 0xd4, 0x14, 0x3a, 0x4d, 0x56, 0x86, 0x88,
0xf3, 0x4a, 0x3c, 0xa2, 0x4c, 0x45, 0x17, 0x35,
0x04, 0x05, 0x4a, 0x28, 0x83, 0x69, 0x47, 0x06,
0x37, 0x2c, 0x82, 0x2d, 0xc5, 0xab, 0x92, 0x09,
0xc9, 0x93, 0x7a, 0x19, 0x33, 0x3e, 0x47, 0xd3,
0xc9, 0x87, 0xbf, 0xe6, 0xc7, 0xc6, 0x9e, 0x39,
0x54, 0x09, 0x24, 0xbf, 0xfe, 0x86, 0xac, 0x51,
0xec, 0xc5, 0xaa, 0xee, 0x16, 0x0e, 0xc7, 0xf4,
},
{
0x1e, 0xe7, 0x02, 0xbf, 0xd4, 0x0d, 0x7f, 0xa4,
0xd9, 0xa8, 0x51, 0x59, 0x35, 0xc2, 0xac, 0x36,
0x2f, 0xc4, 0xa5, 0xd1, 0x2b, 0x8d, 0xd1, 0x69,
0x90, 0x06, 0x9b, 0x92, 0xcb, 0x2b, 0x89, 0xf4,
0x9a, 0xc4, 0xdb, 0x4d, 0x3b, 0x44, 0xb4, 0x89,
0x1e, 0xde, 0x36, 0x9c, 0x71, 0xf8, 0xb7, 0x4e,
0x41, 0x41, 0x6e, 0x0c, 0x02, 0xaa, 0xe7, 0x03,
0xa7, 0xc9, 0x93, 0x4d, 0x42, 0x5b, 0x1f, 0x9b,
},
{
0xdb, 0x5a, 0x23, 0x83, 0x51, 0x44, 0x61, 0x72,
0x60, 0x2a, 0x1f, 0xcb, 0x92, 0xdc, 0x38, 0x0e,
0x54, 0x9c, 0x07, 0xa6, 0x9a, 0x8a, 0x2b, 0x7b,
0xb1, 0xce, 0xb2, 0xdb, 0x0b, 0x44, 0x0a, 0x80,
0x84, 0x09, 0x0d, 0xe0, 0xb7, 0x55, 0xd9, 0x3c,
0x24, 0x42, 0x89, 0x25, 0x1b, 0x3a, 0x7d, 0x3a,
0xde, 0x5f, 0x16, 0xec, 0xd8, 0x9a, 0x4c, 0x94,
0x9b, 0x22, 0x31, 0x16, 0x54, 0x5a, 0x8f, 0x37,
},
{
0xed, 0x9c, 0x45, 0x98, 0xfb, 0xc7, 0xb4, 0x74,
0xc3, 0xb6, 0x3b, 0x15, 0xd1, 0xfa, 0x98, 0x36,
0xf4, 0x52, 0x76, 0x3b, 0x30, 0x6c, 0x1e, 0x7a,
0x4b, 0x33, 0x69, 0xaf, 0x02, 0x67, 0xe7, 0x9f,
0x03, 0x61, 0x33, 0x1b, 0x8a, 0xe1, 0xff, 0x1f,
0xdb, 0x78, 0x8a, 0xff, 0x1c, 0xe7, 0x41, 0x89,
0xf3, 0xf3, 0xe4, 0xb2, 0x48, 0xe5, 0x2a, 0x38,
0x52, 0x6f, 0x05, 0x80, 0xa6, 0xde, 0xbe, 0xab,
},
{
0x1b, 0x2d, 0xf3, 0x81, 0xcd, 0xa4, 0xca, 0x6b,
0x5d, 0xd8, 0x6f, 0xc0, 0x4a, 0x59, 0xa2, 0xde,
0x98, 0x6e, 0x47, 0x7d, 0x1d, 0xcd, 0xba, 0xef,
0xca, 0xb9, 0x48, 0xea, 0xef, 0x71, 0x1d, 0x8a,
0x79, 0x66, 0x84, 0x14, 0x21, 0x80, 0x01, 0x20,
0x61, 0x07, 0xab, 0xeb, 0xbb, 0x6b, 0xfa, 0xd8,
0x94, 0xfe, 0x5a, 0x63, 0xcd, 0xc6, 0x02, 0x30,
0xfb, 0x89, 0xc8, 0xef, 0xd0, 0x9e, 0xcd, 0x7b,
},
{
0x20, 0xd7, 0x1b, 0xf1, 0x4a, 0x92, 0xbc, 0x48,
0x99, 0x1b, 0xb2, 0xd9, 0xd5, 0x17, 0xf4, 0xfa,
0x52, 0x28, 0xe1, 0x88, 0xaa, 0xa4, 0x1d, 0xe7,
0x86, 0xcc, 0x91, 0x18, 0x9d, 0xef, 0x80, 0x5d,
0x9b, 0x9f, 0x21, 0x30, 0xd4, 0x12, 0x20, 0xf8,
0x77, 0x1d, 0xdf, 0xbc, 0x32, 0x3c, 0xa4, 0xcd,
0x7a, 0xb1, 0x49, 0x04, 0xb0, 0x80, 0x13, 0xd2,
0xba, 0x31, 0x16, 0xf1, 0x67, 0xe7, 0x8e, 0x37,
},
}
Streebog_Context :: struct {
buffer: [64]byte,
h: [64]byte,
n: [64]byte,
sigma: [64]byte,
v_0: [64]byte,
v_512: [64]byte,
buf_size: u64,
hash_size: int,
is256: bool,
}
add_mod_512 :: proc(first_vector, second_vector, result_vector: []byte) {
t: i32 = 0
for i: i32 = 0; i < 64; i += 1 {
t = i32(first_vector[i]) + i32(second_vector[i]) + (t >> 8)
result_vector[i] = byte(t & 0xff)
}
}
X :: #force_inline proc(a, k, out: []byte) {
for i := 0; i < 64; i += 1 {
out[i] = a[i] ~ k[i]
}
}
S :: #force_inline proc(state: []byte) {
t: [64]byte
for i: i32 = 63; i >= 0; i -= 1 {
t[i] = PI[state[i]]
}
copy(state, t[:])
}
P :: #force_inline proc(state: []byte) {
t: [64]byte
for i: i32 = 63; i >= 0; i -= 1 {
t[i] = state[TAU[i]]
}
copy(state, t[:])
}
L :: #force_inline proc(state: []byte) {
ins := util.cast_slice([]u64, state)
out: [8]u64
for i: i32 = 7; i >= 0; i -= 1 {
for j: i32 = 63; j >= 0; j -= 1 {
if (ins[i] >> u32(j)) & 1 != 0 {
out[i] ~= STREEBOG_A[63 - j]
}
}
}
copy(state, util.cast_slice([]byte, out[:]))
}
E :: #force_inline proc(K, m, state: []byte) {
X(m, K, state)
for i: i32 = 0; i < 12; i += 1 {
S(state)
P(state)
L(state)
get_key(K, i)
X(state, K, state)
}
}
get_key :: #force_inline proc(K: []byte, i: i32) {
X(K, STREEBOG_C[i][:], K)
S(K)
P(K)
L(K)
}
G :: #force_inline proc(h, N, m: []byte) {
t, K: [64]byte
X(N, h, K[:])
S(K[:])
P(K[:])
L(K[:])
E(K[:], m, t[:])
X(t[:], h, t[:])
X(t[:], m, h)
}
stage2 :: proc(ctx: ^Streebog_Context, m: []byte) {
G(ctx.h[:], ctx.n[:], m)
add_mod_512(ctx.n[:], ctx.v_512[:], ctx.n[:])
add_mod_512(ctx.sigma[:], m, ctx.sigma[:])
}
padding :: proc(ctx: ^Streebog_Context) {
if ctx.buf_size < 64 {
t: [64]byte
copy(t[:], ctx.buffer[:int(ctx.buf_size)])
t[ctx.buf_size] = 0x01
copy(ctx.buffer[:], t[:])
}
}

View File

@@ -1,280 +0,0 @@
package tiger
/*
Copyright 2021 zhibog
Made available under the BSD-3 license.
List of contributors:
zhibog, dotbmp: Initial implementation.
Interface for the Tiger1 variant of the Tiger hashing algorithm as defined in <https://www.cs.technion.ac.il/~biham/Reports/Tiger/>
*/
import "core:os"
import "core:io"
import "../_tiger"
/*
High level API
*/
DIGEST_SIZE_128 :: 16
DIGEST_SIZE_160 :: 20
DIGEST_SIZE_192 :: 24
// hash_string_128 will hash the given input and return the
// computed hash
hash_string_128 :: proc(data: string) -> [DIGEST_SIZE_128]byte {
return hash_bytes_128(transmute([]byte)(data))
}
// hash_bytes_128 will hash the given input and return the
// computed hash
hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
hash: [DIGEST_SIZE_128]byte
ctx: _tiger.Tiger_Context
ctx.ver = 1
_tiger.init(&ctx)
_tiger.update(&ctx, data)
_tiger.final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_128 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_128(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_128 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_128 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_128, "Size of destination buffer is smaller than the digest size")
ctx: _tiger.Tiger_Context
ctx.ver = 1
_tiger.init(&ctx)
_tiger.update(&ctx, data)
_tiger.final(&ctx, hash)
}
// hash_stream_128 will read the stream in chunks and compute a
// hash from its contents
hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
hash: [DIGEST_SIZE_128]byte
ctx: _tiger.Tiger_Context
ctx.ver = 1
_tiger.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
_tiger.update(&ctx, buf[:read])
}
}
_tiger.final(&ctx, hash[:])
return hash, true
}
// hash_file_128 will read the file provided by the given handle
// and compute a hash
hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_128]byte, bool) {
if !load_at_once {
return hash_stream_128(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_128(buf[:]), ok
}
}
return [DIGEST_SIZE_128]byte{}, false
}
hash_128 :: proc {
hash_stream_128,
hash_file_128,
hash_bytes_128,
hash_string_128,
hash_bytes_to_buffer_128,
hash_string_to_buffer_128,
}
// hash_string_160 will hash the given input and return the
// computed hash
hash_string_160 :: proc(data: string) -> [DIGEST_SIZE_160]byte {
return hash_bytes_160(transmute([]byte)(data))
}
// hash_bytes_160 will hash the given input and return the
// computed hash
hash_bytes_160 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
hash: [DIGEST_SIZE_160]byte
ctx: _tiger.Tiger_Context
ctx.ver = 1
_tiger.init(&ctx)
_tiger.update(&ctx, data)
_tiger.final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_160 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_160 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_160(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_160 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_160 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_160, "Size of destination buffer is smaller than the digest size")
ctx: _tiger.Tiger_Context
ctx.ver = 1
_tiger.init(&ctx)
_tiger.update(&ctx, data)
_tiger.final(&ctx, hash)
}
// hash_stream_160 will read the stream in chunks and compute a
// hash from its contents
hash_stream_160 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
hash: [DIGEST_SIZE_160]byte
ctx: _tiger.Tiger_Context
ctx.ver = 1
_tiger.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
_tiger.update(&ctx, buf[:read])
}
}
_tiger.final(&ctx, hash[:])
return hash, true
}
// hash_file_160 will read the file provided by the given handle
// and compute a hash
hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_160]byte, bool) {
if !load_at_once {
return hash_stream_160(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_160(buf[:]), ok
}
}
return [DIGEST_SIZE_160]byte{}, false
}
hash_160 :: proc {
hash_stream_160,
hash_file_160,
hash_bytes_160,
hash_string_160,
hash_bytes_to_buffer_160,
hash_string_to_buffer_160,
}
// hash_string_192 will hash the given input and return the
// computed hash
hash_string_192 :: proc(data: string) -> [DIGEST_SIZE_192]byte {
return hash_bytes_192(transmute([]byte)(data))
}
// hash_bytes_192 will hash the given input and return the
// computed hash
hash_bytes_192 :: proc(data: []byte) -> [DIGEST_SIZE_192]byte {
hash: [DIGEST_SIZE_192]byte
ctx: _tiger.Tiger_Context
ctx.ver = 1
_tiger.init(&ctx)
_tiger.update(&ctx, data)
_tiger.final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_192 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_192 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_192(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_192 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_192 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_192, "Size of destination buffer is smaller than the digest size")
ctx: _tiger.Tiger_Context
ctx.ver = 1
_tiger.init(&ctx)
_tiger.update(&ctx, data)
_tiger.final(&ctx, hash)
}
// hash_stream_192 will read the stream in chunks and compute a
// hash from its contents
hash_stream_192 :: proc(s: io.Stream) -> ([DIGEST_SIZE_192]byte, bool) {
hash: [DIGEST_SIZE_192]byte
ctx: _tiger.Tiger_Context
ctx.ver = 1
_tiger.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
_tiger.update(&ctx, buf[:read])
}
}
_tiger.final(&ctx, hash[:])
return hash, true
}
// hash_file_192 will read the file provided by the given handle
// and compute a hash
hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_192]byte, bool) {
if !load_at_once {
return hash_stream_192(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_192(buf[:]), ok
}
}
return [DIGEST_SIZE_192]byte{}, false
}
hash_192 :: proc {
hash_stream_192,
hash_file_192,
hash_bytes_192,
hash_string_192,
hash_bytes_to_buffer_192,
hash_string_to_buffer_192,
}
/*
Low level API
*/
Tiger_Context :: _tiger.Tiger_Context
init :: proc(ctx: ^_tiger.Tiger_Context) {
ctx.ver = 1
_tiger.init(ctx)
}
update :: proc(ctx: ^_tiger.Tiger_Context, data: []byte) {
_tiger.update(ctx, data)
}
final :: proc(ctx: ^_tiger.Tiger_Context, hash: []byte) {
_tiger.final(ctx, hash)
}

View File

@@ -1,280 +0,0 @@
package tiger2
/*
Copyright 2021 zhibog
Made available under the BSD-3 license.
List of contributors:
zhibog, dotbmp: Initial implementation.
Interface for the Tiger2 variant of the Tiger hashing algorithm as defined in <https://www.cs.technion.ac.il/~biham/Reports/Tiger/>
*/
import "core:os"
import "core:io"
import "../_tiger"
/*
High level API
*/
DIGEST_SIZE_128 :: 16
DIGEST_SIZE_160 :: 20
DIGEST_SIZE_192 :: 24
// hash_string_128 will hash the given input and return the
// computed hash
hash_string_128 :: proc(data: string) -> [DIGEST_SIZE_128]byte {
return hash_bytes_128(transmute([]byte)(data))
}
// hash_bytes_128 will hash the given input and return the
// computed hash
hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
hash: [DIGEST_SIZE_128]byte
ctx: _tiger.Tiger_Context
ctx.ver = 2
_tiger.init(&ctx)
_tiger.update(&ctx, data)
_tiger.final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_128 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_128(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_128 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_128 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_128, "Size of destination buffer is smaller than the digest size")
ctx: _tiger.Tiger_Context
ctx.ver = 2
_tiger.init(&ctx)
_tiger.update(&ctx, data)
_tiger.final(&ctx, hash)
}
// hash_stream_128 will read the stream in chunks and compute a
// hash from its contents
hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
hash: [DIGEST_SIZE_128]byte
ctx: _tiger.Tiger_Context
ctx.ver = 2
_tiger.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
_tiger.update(&ctx, buf[:read])
}
}
_tiger.final(&ctx, hash[:])
return hash, true
}
// hash_file_128 will read the file provided by the given handle
// and compute a hash
hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_128]byte, bool) {
if !load_at_once {
return hash_stream_128(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_128(buf[:]), ok
}
}
return [DIGEST_SIZE_128]byte{}, false
}
hash_128 :: proc {
hash_stream_128,
hash_file_128,
hash_bytes_128,
hash_string_128,
hash_bytes_to_buffer_128,
hash_string_to_buffer_128,
}
// hash_string_160 will hash the given input and return the
// computed hash
hash_string_160 :: proc(data: string) -> [DIGEST_SIZE_160]byte {
return hash_bytes_160(transmute([]byte)(data))
}
// hash_bytes_160 will hash the given input and return the
// computed hash
hash_bytes_160 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
hash: [DIGEST_SIZE_160]byte
ctx: _tiger.Tiger_Context
ctx.ver = 2
_tiger.init(&ctx)
_tiger.update(&ctx, data)
_tiger.final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_160 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_160 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_160(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_160 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_160 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_160, "Size of destination buffer is smaller than the digest size")
ctx: _tiger.Tiger_Context
ctx.ver = 2
_tiger.init(&ctx)
_tiger.update(&ctx, data)
_tiger.final(&ctx, hash)
}
// hash_stream_160 will read the stream in chunks and compute a
// hash from its contents
hash_stream_160 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
hash: [DIGEST_SIZE_160]byte
ctx: _tiger.Tiger_Context
ctx.ver = 2
_tiger.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
_tiger.update(&ctx, buf[:read])
}
}
_tiger.final(&ctx, hash[:])
return hash, true
}
// hash_file_160 will read the file provided by the given handle
// and compute a hash
hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_160]byte, bool) {
if !load_at_once {
return hash_stream_160(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_160(buf[:]), ok
}
}
return [DIGEST_SIZE_160]byte{}, false
}
hash_160 :: proc {
hash_stream_160,
hash_file_160,
hash_bytes_160,
hash_string_160,
hash_bytes_to_buffer_160,
hash_string_to_buffer_160,
}
// hash_string_192 will hash the given input and return the
// computed hash
hash_string_192 :: proc(data: string) -> [DIGEST_SIZE_192]byte {
return hash_bytes_192(transmute([]byte)(data))
}
// hash_bytes_192 will hash the given input and return the
// computed hash
hash_bytes_192 :: proc(data: []byte) -> [DIGEST_SIZE_192]byte {
hash: [DIGEST_SIZE_192]byte
ctx: _tiger.Tiger_Context
ctx.ver = 2
_tiger.init(&ctx)
_tiger.update(&ctx, data)
_tiger.final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer_192 will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_192 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_192(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_192 will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer_192 :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE_192, "Size of destination buffer is smaller than the digest size")
ctx: _tiger.Tiger_Context
ctx.ver = 2
_tiger.init(&ctx)
_tiger.update(&ctx, data)
_tiger.final(&ctx, hash)
}
// hash_stream_192 will read the stream in chunks and compute a
// hash from its contents
hash_stream_192 :: proc(s: io.Stream) -> ([DIGEST_SIZE_192]byte, bool) {
hash: [DIGEST_SIZE_192]byte
ctx: _tiger.Tiger_Context
ctx.ver = 2
_tiger.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
_tiger.update(&ctx, buf[:read])
}
}
_tiger.final(&ctx, hash[:])
return hash, true
}
// hash_file_192 will read the file provided by the given handle
// and compute a hash
hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_192]byte, bool) {
if !load_at_once {
return hash_stream_192(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes_192(buf[:]), ok
}
}
return [DIGEST_SIZE_192]byte{}, false
}
hash_192 :: proc {
hash_stream_192,
hash_file_192,
hash_bytes_192,
hash_string_192,
hash_bytes_to_buffer_192,
hash_string_to_buffer_192,
}
/*
Low level API
*/
Tiger_Context :: _tiger.Tiger_Context
init :: proc(ctx: ^_tiger.Tiger_Context) {
ctx.ver = 2
_tiger.init(ctx)
}
update :: proc(ctx: ^_tiger.Tiger_Context, data: []byte) {
_tiger.update(ctx, data)
}
final :: proc(ctx: ^_tiger.Tiger_Context, hash: []byte) {
_tiger.final(ctx, hash)
}

View File

@@ -1,146 +0,0 @@
package util
/*
Copyright 2021 zhibog
Made available under the BSD-3 license.
List of contributors:
zhibog, dotbmp: Initial implementation.
Various utility procedures
*/
import "core:mem"
// Keep vet happy
_ :: mem
// @note(bp): this can replace the other two
cast_slice :: #force_inline proc "contextless" ($D: typeid/[]$DE, src: $S/[]$SE) -> D {
src := src
dst := (^mem.Raw_Slice)(&src)
when size_of(DE) < size_of(SE) {
when size_of(DE) % size_of(SE) == 0 {
dst.len /= size_of(SE) / size_of(DE)
} else {
dst.len *= size_of(SE)
dst.len /= size_of(DE)
}
} else when size_of(DE) > size_of(SE) {
when size_of(DE) % size_of(SE) == 0 {
dst.len *= size_of(DE) / size_of(SE)
} else {
dst.len *= size_of(SE)
dst.len /= size_of(DE)
}
} else when size_of(DE) != size_of(SE) {
#assert(size_of(DE) % size_of(SE) == 0, "Different size detected")
dst.len *= size_of(SE)
dst.len /= size_of(DE)
}
return (^D)(dst)^
}
bytes_to_slice :: #force_inline proc "contextless" ($T: typeid/[]$E, bytes: []byte) -> T {
s := transmute(mem.Raw_Slice)bytes
s.len /= size_of(E)
return transmute(T)s
}
slice_to_bytes :: #force_inline proc "contextless" (slice: $E/[]$T) -> []byte {
s := transmute(mem.Raw_Slice)slice
s.len *= size_of(T)
return transmute([]byte)s
}
ROTL16 :: #force_inline proc "contextless" (a, b: u16) -> u16 {
return ((a << b) | (a >> (16 - b)))
}
ROTR16 :: #force_inline proc "contextless" (a, b: u16) -> u16 {
return ((a >> b) | (a << (16 - b)))
}
ROTL32 :: #force_inline proc "contextless"(a: u32, b: int) -> u32 {
s := uint(b) & 31
return (a << s) | (a >> (32 - s))
}
ROTR32 :: #force_inline proc "contextless" (a: u32, b: int) -> u32 {
s := uint(b) & 31
return (a >> s) | (a << (32 - s))
}
ROTL64 :: #force_inline proc "contextless" (a, b: u64) -> u64 {
return ((a << b) | (a >> (64 - b)))
}
ROTR64 :: #force_inline proc "contextless" (a, b: u64) -> u64 {
return ((a >> b) | (a << (64 - b)))
}
ROTL128 :: #force_inline proc "contextless" (a, b, c, d: ^u32, n: uint) {
a, b, c, d := a, b, c, d
t := a^ >> (32 - n)
a^ = ((a^ << n) | (b^ >> (32 - n)))
b^ = ((b^ << n) | (c^ >> (32 - n)))
c^ = ((c^ << n) | (d^ >> (32 - n)))
d^ = ((d^ << n) | t)
}
U32_LE :: #force_inline proc "contextless" (b: []byte) -> u32 {
return u32(b[0]) | u32(b[1]) << 8 | u32(b[2]) << 16 | u32(b[3]) << 24
}
U64_LE :: #force_inline proc "contextless" (b: []byte) -> u64 {
return u64(b[0]) | u64(b[1]) << 8 | u64(b[2]) << 16 | u64(b[3]) << 24 |
u64(b[4]) << 32 | u64(b[5]) << 40 | u64(b[6]) << 48 | u64(b[7]) << 56
}
U64_BE :: #force_inline proc "contextless" (b: []byte) -> u64 {
return u64(b[7]) | u64(b[6]) << 8 | u64(b[5]) << 16 | u64(b[4]) << 24 |
u64(b[3]) << 32 | u64(b[2]) << 40 | u64(b[1]) << 48 | u64(b[0]) << 56
}
PUT_U64_LE :: #force_inline proc "contextless" (b: []byte, v: u64) {
b[0] = byte(v)
b[1] = byte(v >> 8)
b[2] = byte(v >> 16)
b[3] = byte(v >> 24)
b[4] = byte(v >> 32)
b[5] = byte(v >> 40)
b[6] = byte(v >> 48)
b[7] = byte(v >> 56)
}
PUT_U32_LE :: #force_inline proc "contextless" (b: []byte, v: u32) {
b[0] = byte(v)
b[1] = byte(v >> 8)
b[2] = byte(v >> 16)
b[3] = byte(v >> 24)
}
PUT_U32_BE :: #force_inline proc "contextless" (b: []byte, v: u32) {
b[0] = byte(v >> 24)
b[1] = byte(v >> 16)
b[2] = byte(v >> 8)
b[3] = byte(v)
}
PUT_U64_BE :: #force_inline proc "contextless" (b: []byte, v: u64) {
b[0] = byte(v >> 56)
b[1] = byte(v >> 48)
b[2] = byte(v >> 40)
b[3] = byte(v >> 32)
b[4] = byte(v >> 24)
b[5] = byte(v >> 16)
b[6] = byte(v >> 8)
b[7] = byte(v)
}
XOR_BUF :: #force_inline proc "contextless" (input, output: []byte) {
for i := 0; i < len(input); i += 1 {
output[i] ~= input[i]
}
}

View File

@@ -1,806 +0,0 @@
package whirlpool
/*
Copyright 2021 zhibog
Made available under the BSD-3 license.
List of contributors:
zhibog, dotbmp: Initial implementation.
Implementation of the Whirlpool hashing algorithm, as defined in <https://web.archive.org/web/20171129084214/http://www.larc.usp.br/~pbarreto/WhirlpoolPage.html>
*/
import "core:os"
import "core:io"
import "../util"
/*
High level API
*/
DIGEST_SIZE :: 64
// hash_string will hash the given input and return the
// computed hash
hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
hash: [DIGEST_SIZE]byte
ctx: Whirlpool_Context
// init(&ctx) No-op
update(&ctx, data)
final(&ctx, hash[:])
return hash
}
// hash_string_to_buffer will hash the given input and assign the
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer will hash the given input and write the
// computed hash into the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_bytes_to_buffer :: proc(data, hash: []byte) {
assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
ctx: Whirlpool_Context
// init(&ctx) No-op
update(&ctx, data)
final(&ctx, hash)
}
// hash_stream will read the stream in chunks and compute a
// hash from its contents
hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
hash: [DIGEST_SIZE]byte
ctx: Whirlpool_Context
// init(&ctx) No-op
buf := make([]byte, 512)
defer delete(buf)
read := 1
for read > 0 {
read, _ = io.read(s, buf)
if read > 0 {
update(&ctx, buf[:read])
}
}
final(&ctx, hash[:])
return hash, true
}
// hash_file will read the file provided by the given handle
// and compute a hash
hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
if buf, ok := os.read_entire_file(hd); ok {
return hash_bytes(buf[:]), ok
}
}
return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
hash_stream,
hash_file,
hash_bytes,
hash_string,
hash_bytes_to_buffer,
hash_string_to_buffer,
}
/*
Low level API
*/
@(warning="Init is a no-op for Whirlpool")
init :: proc(ctx: ^Whirlpool_Context) {
// No action needed here
}
update :: proc(ctx: ^Whirlpool_Context, source: []byte) {
source_pos: int
nn := len(source)
source_bits := u64(nn * 8)
source_gap := u32((8 - (int(source_bits & 7))) & 7)
buffer_rem := uint(ctx.buffer_bits & 7)
b: u32
for i, carry, value := 31, u32(0), u32(source_bits); i >= 0 && (carry != 0 || value != 0); i -= 1 {
carry += u32(ctx.bitlength[i]) + (u32(value & 0xff))
ctx.bitlength[i] = byte(carry)
carry >>= 8
value >>= 8
}
for source_bits > 8 {
b = u32(u32((source[source_pos] << source_gap) & 0xff) | u32((source[source_pos+1] & 0xff) >> (8 - source_gap)))
ctx.buffer[ctx.buffer_pos] |= u8(b >> buffer_rem)
ctx.buffer_pos += 1
ctx.buffer_bits += int(8 - buffer_rem)
if ctx.buffer_bits == 512 {
transform(ctx)
ctx.buffer_bits = 0
ctx.buffer_pos = 0
}
ctx.buffer[ctx.buffer_pos] = byte(b << (8 - buffer_rem))
ctx.buffer_bits += int(buffer_rem)
source_bits -= 8
source_pos += 1
}
if source_bits > 0 {
b = u32((source[source_pos] << source_gap) & 0xff)
ctx.buffer[ctx.buffer_pos] |= byte(b) >> buffer_rem
} else {b = 0}
if u64(buffer_rem) + source_bits < 8 {
ctx.buffer_bits += int(source_bits)
} else {
ctx.buffer_pos += 1
ctx.buffer_bits += 8 - int(buffer_rem)
source_bits -= u64(8 - buffer_rem)
if ctx.buffer_bits == 512 {
transform(ctx)
ctx.buffer_bits = 0
ctx.buffer_pos = 0
}
ctx.buffer[ctx.buffer_pos] = byte(b << (8 - buffer_rem))
ctx.buffer_bits += int(source_bits)
}
}
final :: proc(ctx: ^Whirlpool_Context, hash: []byte) {
n := ctx
n.buffer[n.buffer_pos] |= 0x80 >> (uint(n.buffer_bits) & 7)
n.buffer_pos += 1
if n.buffer_pos > 64 - 32 {
if n.buffer_pos < 64 {
for i := 0; i < 64 - n.buffer_pos; i += 1 {
n.buffer[n.buffer_pos + i] = 0
}
}
transform(ctx)
n.buffer_pos = 0
}
if n.buffer_pos < 64 - 32 {
for i := 0; i < (64 - 32) - n.buffer_pos; i += 1 {
n.buffer[n.buffer_pos + i] = 0
}
}
n.buffer_pos = 64 - 32
for i := 0; i < 32; i += 1 {
n.buffer[n.buffer_pos + i] = n.bitlength[i]
}
transform(ctx)
for i := 0; i < 8; i += 1 {
hash[i * 8] = byte(n.hash[i] >> 56)
hash[i * 8 + 1] = byte(n.hash[i] >> 48)
hash[i * 8 + 2] = byte(n.hash[i] >> 40)
hash[i * 8 + 3] = byte(n.hash[i] >> 32)
hash[i * 8 + 4] = byte(n.hash[i] >> 24)
hash[i * 8 + 5] = byte(n.hash[i] >> 16)
hash[i * 8 + 6] = byte(n.hash[i] >> 8)
hash[i * 8 + 7] = byte(n.hash[i])
}
}
/*
Whirlpool implementation
*/
ROUNDS :: 10
Whirlpool_Context :: struct {
bitlength: [32]byte,
buffer: [64]byte,
buffer_bits: int,
buffer_pos: int,
hash: [8]u64,
}
C0 := [256]u64 {
0x18186018c07830d8, 0x23238c2305af4626, 0xc6c63fc67ef991b8, 0xe8e887e8136fcdfb,
0x878726874ca113cb, 0xb8b8dab8a9626d11, 0x0101040108050209, 0x4f4f214f426e9e0d,
0x3636d836adee6c9b, 0xa6a6a2a6590451ff, 0xd2d26fd2debdb90c, 0xf5f5f3f5fb06f70e,
0x7979f979ef80f296, 0x6f6fa16f5fcede30, 0x91917e91fcef3f6d, 0x52525552aa07a4f8,
0x60609d6027fdc047, 0xbcbccabc89766535, 0x9b9b569baccd2b37, 0x8e8e028e048c018a,
0xa3a3b6a371155bd2, 0x0c0c300c603c186c, 0x7b7bf17bff8af684, 0x3535d435b5e16a80,
0x1d1d741de8693af5, 0xe0e0a7e05347ddb3, 0xd7d77bd7f6acb321, 0xc2c22fc25eed999c,
0x2e2eb82e6d965c43, 0x4b4b314b627a9629, 0xfefedffea321e15d, 0x575741578216aed5,
0x15155415a8412abd, 0x7777c1779fb6eee8, 0x3737dc37a5eb6e92, 0xe5e5b3e57b56d79e,
0x9f9f469f8cd92313, 0xf0f0e7f0d317fd23, 0x4a4a354a6a7f9420, 0xdada4fda9e95a944,
0x58587d58fa25b0a2, 0xc9c903c906ca8fcf, 0x2929a429558d527c, 0x0a0a280a5022145a,
0xb1b1feb1e14f7f50, 0xa0a0baa0691a5dc9, 0x6b6bb16b7fdad614, 0x85852e855cab17d9,
0xbdbdcebd8173673c, 0x5d5d695dd234ba8f, 0x1010401080502090, 0xf4f4f7f4f303f507,
0xcbcb0bcb16c08bdd, 0x3e3ef83eedc67cd3, 0x0505140528110a2d, 0x676781671fe6ce78,
0xe4e4b7e47353d597, 0x27279c2725bb4e02, 0x4141194132588273, 0x8b8b168b2c9d0ba7,
0xa7a7a6a7510153f6, 0x7d7de97dcf94fab2, 0x95956e95dcfb3749, 0xd8d847d88e9fad56,
0xfbfbcbfb8b30eb70, 0xeeee9fee2371c1cd, 0x7c7ced7cc791f8bb, 0x6666856617e3cc71,
0xdddd53dda68ea77b, 0x17175c17b84b2eaf, 0x4747014702468e45, 0x9e9e429e84dc211a,
0xcaca0fca1ec589d4, 0x2d2db42d75995a58, 0xbfbfc6bf9179632e, 0x07071c07381b0e3f,
0xadad8ead012347ac, 0x5a5a755aea2fb4b0, 0x838336836cb51bef, 0x3333cc3385ff66b6,
0x636391633ff2c65c, 0x02020802100a0412, 0xaaaa92aa39384993, 0x7171d971afa8e2de,
0xc8c807c80ecf8dc6, 0x19196419c87d32d1, 0x494939497270923b, 0xd9d943d9869aaf5f,
0xf2f2eff2c31df931, 0xe3e3abe34b48dba8, 0x5b5b715be22ab6b9, 0x88881a8834920dbc,
0x9a9a529aa4c8293e, 0x262698262dbe4c0b, 0x3232c8328dfa64bf, 0xb0b0fab0e94a7d59,
0xe9e983e91b6acff2, 0x0f0f3c0f78331e77, 0xd5d573d5e6a6b733, 0x80803a8074ba1df4,
0xbebec2be997c6127, 0xcdcd13cd26de87eb, 0x3434d034bde46889, 0x48483d487a759032,
0xffffdbffab24e354, 0x7a7af57af78ff48d, 0x90907a90f4ea3d64, 0x5f5f615fc23ebe9d,
0x202080201da0403d, 0x6868bd6867d5d00f, 0x1a1a681ad07234ca, 0xaeae82ae192c41b7,
0xb4b4eab4c95e757d, 0x54544d549a19a8ce, 0x93937693ece53b7f, 0x222288220daa442f,
0x64648d6407e9c863, 0xf1f1e3f1db12ff2a, 0x7373d173bfa2e6cc, 0x12124812905a2482,
0x40401d403a5d807a, 0x0808200840281048, 0xc3c32bc356e89b95, 0xecec97ec337bc5df,
0xdbdb4bdb9690ab4d, 0xa1a1bea1611f5fc0, 0x8d8d0e8d1c830791, 0x3d3df43df5c97ac8,
0x97976697ccf1335b, 0x0000000000000000, 0xcfcf1bcf36d483f9, 0x2b2bac2b4587566e,
0x7676c57697b3ece1, 0x8282328264b019e6, 0xd6d67fd6fea9b128, 0x1b1b6c1bd87736c3,
0xb5b5eeb5c15b7774, 0xafaf86af112943be, 0x6a6ab56a77dfd41d, 0x50505d50ba0da0ea,
0x45450945124c8a57, 0xf3f3ebf3cb18fb38, 0x3030c0309df060ad, 0xefef9bef2b74c3c4,
0x3f3ffc3fe5c37eda, 0x55554955921caac7, 0xa2a2b2a2791059db, 0xeaea8fea0365c9e9,
0x656589650fecca6a, 0xbabad2bab9686903, 0x2f2fbc2f65935e4a, 0xc0c027c04ee79d8e,
0xdede5fdebe81a160, 0x1c1c701ce06c38fc, 0xfdfdd3fdbb2ee746, 0x4d4d294d52649a1f,
0x92927292e4e03976, 0x7575c9758fbceafa, 0x06061806301e0c36, 0x8a8a128a249809ae,
0xb2b2f2b2f940794b, 0xe6e6bfe66359d185, 0x0e0e380e70361c7e, 0x1f1f7c1ff8633ee7,
0x6262956237f7c455, 0xd4d477d4eea3b53a, 0xa8a89aa829324d81, 0x96966296c4f43152,
0xf9f9c3f99b3aef62, 0xc5c533c566f697a3, 0x2525942535b14a10, 0x59597959f220b2ab,
0x84842a8454ae15d0, 0x7272d572b7a7e4c5, 0x3939e439d5dd72ec, 0x4c4c2d4c5a619816,
0x5e5e655eca3bbc94, 0x7878fd78e785f09f, 0x3838e038ddd870e5, 0x8c8c0a8c14860598,
0xd1d163d1c6b2bf17, 0xa5a5aea5410b57e4, 0xe2e2afe2434dd9a1, 0x616199612ff8c24e,
0xb3b3f6b3f1457b42, 0x2121842115a54234, 0x9c9c4a9c94d62508, 0x1e1e781ef0663cee,
0x4343114322528661, 0xc7c73bc776fc93b1, 0xfcfcd7fcb32be54f, 0x0404100420140824,
0x51515951b208a2e3, 0x99995e99bcc72f25, 0x6d6da96d4fc4da22, 0x0d0d340d68391a65,
0xfafacffa8335e979, 0xdfdf5bdfb684a369, 0x7e7ee57ed79bfca9, 0x242490243db44819,
0x3b3bec3bc5d776fe, 0xabab96ab313d4b9a, 0xcece1fce3ed181f0, 0x1111441188552299,
0x8f8f068f0c890383, 0x4e4e254e4a6b9c04, 0xb7b7e6b7d1517366, 0xebeb8beb0b60cbe0,
0x3c3cf03cfdcc78c1, 0x81813e817cbf1ffd, 0x94946a94d4fe3540, 0xf7f7fbf7eb0cf31c,
0xb9b9deb9a1676f18, 0x13134c13985f268b, 0x2c2cb02c7d9c5851, 0xd3d36bd3d6b8bb05,
0xe7e7bbe76b5cd38c, 0x6e6ea56e57cbdc39, 0xc4c437c46ef395aa, 0x03030c03180f061b,
0x565645568a13acdc, 0x44440d441a49885e, 0x7f7fe17fdf9efea0, 0xa9a99ea921374f88,
0x2a2aa82a4d825467, 0xbbbbd6bbb16d6b0a, 0xc1c123c146e29f87, 0x53535153a202a6f1,
0xdcdc57dcae8ba572, 0x0b0b2c0b58271653, 0x9d9d4e9d9cd32701, 0x6c6cad6c47c1d82b,
0x3131c43195f562a4, 0x7474cd7487b9e8f3, 0xf6f6fff6e309f115, 0x464605460a438c4c,
0xacac8aac092645a5, 0x89891e893c970fb5, 0x14145014a04428b4, 0xe1e1a3e15b42dfba,
0x16165816b04e2ca6, 0x3a3ae83acdd274f7, 0x6969b9696fd0d206, 0x09092409482d1241,
0x7070dd70a7ade0d7, 0xb6b6e2b6d954716f, 0xd0d067d0ceb7bd1e, 0xeded93ed3b7ec7d6,
0xcccc17cc2edb85e2, 0x424215422a578468, 0x98985a98b4c22d2c, 0xa4a4aaa4490e55ed,
0x2828a0285d885075, 0x5c5c6d5cda31b886, 0xf8f8c7f8933fed6b, 0x8686228644a411c2,
}
C1 := [256]u64 {
0xd818186018c07830, 0x2623238c2305af46, 0xb8c6c63fc67ef991, 0xfbe8e887e8136fcd,
0xcb878726874ca113, 0x11b8b8dab8a9626d, 0x0901010401080502, 0x0d4f4f214f426e9e,
0x9b3636d836adee6c, 0xffa6a6a2a6590451, 0x0cd2d26fd2debdb9, 0x0ef5f5f3f5fb06f7,
0x967979f979ef80f2, 0x306f6fa16f5fcede, 0x6d91917e91fcef3f, 0xf852525552aa07a4,
0x4760609d6027fdc0, 0x35bcbccabc897665, 0x379b9b569baccd2b, 0x8a8e8e028e048c01,
0xd2a3a3b6a371155b, 0x6c0c0c300c603c18, 0x847b7bf17bff8af6, 0x803535d435b5e16a,
0xf51d1d741de8693a, 0xb3e0e0a7e05347dd, 0x21d7d77bd7f6acb3, 0x9cc2c22fc25eed99,
0x432e2eb82e6d965c, 0x294b4b314b627a96, 0x5dfefedffea321e1, 0xd5575741578216ae,
0xbd15155415a8412a, 0xe87777c1779fb6ee, 0x923737dc37a5eb6e, 0x9ee5e5b3e57b56d7,
0x139f9f469f8cd923, 0x23f0f0e7f0d317fd, 0x204a4a354a6a7f94, 0x44dada4fda9e95a9,
0xa258587d58fa25b0, 0xcfc9c903c906ca8f, 0x7c2929a429558d52, 0x5a0a0a280a502214,
0x50b1b1feb1e14f7f, 0xc9a0a0baa0691a5d, 0x146b6bb16b7fdad6, 0xd985852e855cab17,
0x3cbdbdcebd817367, 0x8f5d5d695dd234ba, 0x9010104010805020, 0x07f4f4f7f4f303f5,
0xddcbcb0bcb16c08b, 0xd33e3ef83eedc67c, 0x2d0505140528110a, 0x78676781671fe6ce,
0x97e4e4b7e47353d5, 0x0227279c2725bb4e, 0x7341411941325882, 0xa78b8b168b2c9d0b,
0xf6a7a7a6a7510153, 0xb27d7de97dcf94fa, 0x4995956e95dcfb37, 0x56d8d847d88e9fad,
0x70fbfbcbfb8b30eb, 0xcdeeee9fee2371c1, 0xbb7c7ced7cc791f8, 0x716666856617e3cc,
0x7bdddd53dda68ea7, 0xaf17175c17b84b2e, 0x454747014702468e, 0x1a9e9e429e84dc21,
0xd4caca0fca1ec589, 0x582d2db42d75995a, 0x2ebfbfc6bf917963, 0x3f07071c07381b0e,
0xacadad8ead012347, 0xb05a5a755aea2fb4, 0xef838336836cb51b, 0xb63333cc3385ff66,
0x5c636391633ff2c6, 0x1202020802100a04, 0x93aaaa92aa393849, 0xde7171d971afa8e2,
0xc6c8c807c80ecf8d, 0xd119196419c87d32, 0x3b49493949727092, 0x5fd9d943d9869aaf,
0x31f2f2eff2c31df9, 0xa8e3e3abe34b48db, 0xb95b5b715be22ab6, 0xbc88881a8834920d,
0x3e9a9a529aa4c829, 0x0b262698262dbe4c, 0xbf3232c8328dfa64, 0x59b0b0fab0e94a7d,
0xf2e9e983e91b6acf, 0x770f0f3c0f78331e, 0x33d5d573d5e6a6b7, 0xf480803a8074ba1d,
0x27bebec2be997c61, 0xebcdcd13cd26de87, 0x893434d034bde468, 0x3248483d487a7590,
0x54ffffdbffab24e3, 0x8d7a7af57af78ff4, 0x6490907a90f4ea3d, 0x9d5f5f615fc23ebe,
0x3d202080201da040, 0x0f6868bd6867d5d0, 0xca1a1a681ad07234, 0xb7aeae82ae192c41,
0x7db4b4eab4c95e75, 0xce54544d549a19a8, 0x7f93937693ece53b, 0x2f222288220daa44,
0x6364648d6407e9c8, 0x2af1f1e3f1db12ff, 0xcc7373d173bfa2e6, 0x8212124812905a24,
0x7a40401d403a5d80, 0x4808082008402810, 0x95c3c32bc356e89b, 0xdfecec97ec337bc5,
0x4ddbdb4bdb9690ab, 0xc0a1a1bea1611f5f, 0x918d8d0e8d1c8307, 0xc83d3df43df5c97a,
0x5b97976697ccf133, 0x0000000000000000, 0xf9cfcf1bcf36d483, 0x6e2b2bac2b458756,
0xe17676c57697b3ec, 0xe68282328264b019, 0x28d6d67fd6fea9b1, 0xc31b1b6c1bd87736,
0x74b5b5eeb5c15b77, 0xbeafaf86af112943, 0x1d6a6ab56a77dfd4, 0xea50505d50ba0da0,
0x5745450945124c8a, 0x38f3f3ebf3cb18fb, 0xad3030c0309df060, 0xc4efef9bef2b74c3,
0xda3f3ffc3fe5c37e, 0xc755554955921caa, 0xdba2a2b2a2791059, 0xe9eaea8fea0365c9,
0x6a656589650fecca, 0x03babad2bab96869, 0x4a2f2fbc2f65935e, 0x8ec0c027c04ee79d,
0x60dede5fdebe81a1, 0xfc1c1c701ce06c38, 0x46fdfdd3fdbb2ee7, 0x1f4d4d294d52649a,
0x7692927292e4e039, 0xfa7575c9758fbcea, 0x3606061806301e0c, 0xae8a8a128a249809,
0x4bb2b2f2b2f94079, 0x85e6e6bfe66359d1, 0x7e0e0e380e70361c, 0xe71f1f7c1ff8633e,
0x556262956237f7c4, 0x3ad4d477d4eea3b5, 0x81a8a89aa829324d, 0x5296966296c4f431,
0x62f9f9c3f99b3aef, 0xa3c5c533c566f697, 0x102525942535b14a, 0xab59597959f220b2,
0xd084842a8454ae15, 0xc57272d572b7a7e4, 0xec3939e439d5dd72, 0x164c4c2d4c5a6198,
0x945e5e655eca3bbc, 0x9f7878fd78e785f0, 0xe53838e038ddd870, 0x988c8c0a8c148605,
0x17d1d163d1c6b2bf, 0xe4a5a5aea5410b57, 0xa1e2e2afe2434dd9, 0x4e616199612ff8c2,
0x42b3b3f6b3f1457b, 0x342121842115a542, 0x089c9c4a9c94d625, 0xee1e1e781ef0663c,
0x6143431143225286, 0xb1c7c73bc776fc93, 0x4ffcfcd7fcb32be5, 0x2404041004201408,
0xe351515951b208a2, 0x2599995e99bcc72f, 0x226d6da96d4fc4da, 0x650d0d340d68391a,
0x79fafacffa8335e9, 0x69dfdf5bdfb684a3, 0xa97e7ee57ed79bfc, 0x19242490243db448,
0xfe3b3bec3bc5d776, 0x9aabab96ab313d4b, 0xf0cece1fce3ed181, 0x9911114411885522,
0x838f8f068f0c8903, 0x044e4e254e4a6b9c, 0x66b7b7e6b7d15173, 0xe0ebeb8beb0b60cb,
0xc13c3cf03cfdcc78, 0xfd81813e817cbf1f, 0x4094946a94d4fe35, 0x1cf7f7fbf7eb0cf3,
0x18b9b9deb9a1676f, 0x8b13134c13985f26, 0x512c2cb02c7d9c58, 0x05d3d36bd3d6b8bb,
0x8ce7e7bbe76b5cd3, 0x396e6ea56e57cbdc, 0xaac4c437c46ef395, 0x1b03030c03180f06,
0xdc565645568a13ac, 0x5e44440d441a4988, 0xa07f7fe17fdf9efe, 0x88a9a99ea921374f,
0x672a2aa82a4d8254, 0x0abbbbd6bbb16d6b, 0x87c1c123c146e29f, 0xf153535153a202a6,
0x72dcdc57dcae8ba5, 0x530b0b2c0b582716, 0x019d9d4e9d9cd327, 0x2b6c6cad6c47c1d8,
0xa43131c43195f562, 0xf37474cd7487b9e8, 0x15f6f6fff6e309f1, 0x4c464605460a438c,
0xa5acac8aac092645, 0xb589891e893c970f, 0xb414145014a04428, 0xbae1e1a3e15b42df,
0xa616165816b04e2c, 0xf73a3ae83acdd274, 0x066969b9696fd0d2, 0x4109092409482d12,
0xd77070dd70a7ade0, 0x6fb6b6e2b6d95471, 0x1ed0d067d0ceb7bd, 0xd6eded93ed3b7ec7,
0xe2cccc17cc2edb85, 0x68424215422a5784, 0x2c98985a98b4c22d, 0xeda4a4aaa4490e55,
0x752828a0285d8850, 0x865c5c6d5cda31b8, 0x6bf8f8c7f8933fed, 0xc28686228644a411,
}
C2 := [256]u64 {
0x30d818186018c078, 0x462623238c2305af, 0x91b8c6c63fc67ef9, 0xcdfbe8e887e8136f,
0x13cb878726874ca1, 0x6d11b8b8dab8a962, 0x0209010104010805, 0x9e0d4f4f214f426e,
0x6c9b3636d836adee, 0x51ffa6a6a2a65904, 0xb90cd2d26fd2debd, 0xf70ef5f5f3f5fb06,
0xf2967979f979ef80, 0xde306f6fa16f5fce, 0x3f6d91917e91fcef, 0xa4f852525552aa07,
0xc04760609d6027fd, 0x6535bcbccabc8976, 0x2b379b9b569baccd, 0x018a8e8e028e048c,
0x5bd2a3a3b6a37115, 0x186c0c0c300c603c, 0xf6847b7bf17bff8a, 0x6a803535d435b5e1,
0x3af51d1d741de869, 0xddb3e0e0a7e05347, 0xb321d7d77bd7f6ac, 0x999cc2c22fc25eed,
0x5c432e2eb82e6d96, 0x96294b4b314b627a, 0xe15dfefedffea321, 0xaed5575741578216,
0x2abd15155415a841, 0xeee87777c1779fb6, 0x6e923737dc37a5eb, 0xd79ee5e5b3e57b56,
0x23139f9f469f8cd9, 0xfd23f0f0e7f0d317, 0x94204a4a354a6a7f, 0xa944dada4fda9e95,
0xb0a258587d58fa25, 0x8fcfc9c903c906ca, 0x527c2929a429558d, 0x145a0a0a280a5022,
0x7f50b1b1feb1e14f, 0x5dc9a0a0baa0691a, 0xd6146b6bb16b7fda, 0x17d985852e855cab,
0x673cbdbdcebd8173, 0xba8f5d5d695dd234, 0x2090101040108050, 0xf507f4f4f7f4f303,
0x8bddcbcb0bcb16c0, 0x7cd33e3ef83eedc6, 0x0a2d050514052811, 0xce78676781671fe6,
0xd597e4e4b7e47353, 0x4e0227279c2725bb, 0x8273414119413258, 0x0ba78b8b168b2c9d,
0x53f6a7a7a6a75101, 0xfab27d7de97dcf94, 0x374995956e95dcfb, 0xad56d8d847d88e9f,
0xeb70fbfbcbfb8b30, 0xc1cdeeee9fee2371, 0xf8bb7c7ced7cc791, 0xcc716666856617e3,
0xa77bdddd53dda68e, 0x2eaf17175c17b84b, 0x8e45474701470246, 0x211a9e9e429e84dc,
0x89d4caca0fca1ec5, 0x5a582d2db42d7599, 0x632ebfbfc6bf9179, 0x0e3f07071c07381b,
0x47acadad8ead0123, 0xb4b05a5a755aea2f, 0x1bef838336836cb5, 0x66b63333cc3385ff,
0xc65c636391633ff2, 0x041202020802100a, 0x4993aaaa92aa3938, 0xe2de7171d971afa8,
0x8dc6c8c807c80ecf, 0x32d119196419c87d, 0x923b494939497270, 0xaf5fd9d943d9869a,
0xf931f2f2eff2c31d, 0xdba8e3e3abe34b48, 0xb6b95b5b715be22a, 0x0dbc88881a883492,
0x293e9a9a529aa4c8, 0x4c0b262698262dbe, 0x64bf3232c8328dfa, 0x7d59b0b0fab0e94a,
0xcff2e9e983e91b6a, 0x1e770f0f3c0f7833, 0xb733d5d573d5e6a6, 0x1df480803a8074ba,
0x6127bebec2be997c, 0x87ebcdcd13cd26de, 0x68893434d034bde4, 0x903248483d487a75,
0xe354ffffdbffab24, 0xf48d7a7af57af78f, 0x3d6490907a90f4ea, 0xbe9d5f5f615fc23e,
0x403d202080201da0, 0xd00f6868bd6867d5, 0x34ca1a1a681ad072, 0x41b7aeae82ae192c,
0x757db4b4eab4c95e, 0xa8ce54544d549a19, 0x3b7f93937693ece5, 0x442f222288220daa,
0xc86364648d6407e9, 0xff2af1f1e3f1db12, 0xe6cc7373d173bfa2, 0x248212124812905a,
0x807a40401d403a5d, 0x1048080820084028, 0x9b95c3c32bc356e8, 0xc5dfecec97ec337b,
0xab4ddbdb4bdb9690, 0x5fc0a1a1bea1611f, 0x07918d8d0e8d1c83, 0x7ac83d3df43df5c9,
0x335b97976697ccf1, 0x0000000000000000, 0x83f9cfcf1bcf36d4, 0x566e2b2bac2b4587,
0xece17676c57697b3, 0x19e68282328264b0, 0xb128d6d67fd6fea9, 0x36c31b1b6c1bd877,
0x7774b5b5eeb5c15b, 0x43beafaf86af1129, 0xd41d6a6ab56a77df, 0xa0ea50505d50ba0d,
0x8a5745450945124c, 0xfb38f3f3ebf3cb18, 0x60ad3030c0309df0, 0xc3c4efef9bef2b74,
0x7eda3f3ffc3fe5c3, 0xaac755554955921c, 0x59dba2a2b2a27910, 0xc9e9eaea8fea0365,
0xca6a656589650fec, 0x6903babad2bab968, 0x5e4a2f2fbc2f6593, 0x9d8ec0c027c04ee7,
0xa160dede5fdebe81, 0x38fc1c1c701ce06c, 0xe746fdfdd3fdbb2e, 0x9a1f4d4d294d5264,
0x397692927292e4e0, 0xeafa7575c9758fbc, 0x0c3606061806301e, 0x09ae8a8a128a2498,
0x794bb2b2f2b2f940, 0xd185e6e6bfe66359, 0x1c7e0e0e380e7036, 0x3ee71f1f7c1ff863,
0xc4556262956237f7, 0xb53ad4d477d4eea3, 0x4d81a8a89aa82932, 0x315296966296c4f4,
0xef62f9f9c3f99b3a, 0x97a3c5c533c566f6, 0x4a102525942535b1, 0xb2ab59597959f220,
0x15d084842a8454ae, 0xe4c57272d572b7a7, 0x72ec3939e439d5dd, 0x98164c4c2d4c5a61,
0xbc945e5e655eca3b, 0xf09f7878fd78e785, 0x70e53838e038ddd8, 0x05988c8c0a8c1486,
0xbf17d1d163d1c6b2, 0x57e4a5a5aea5410b, 0xd9a1e2e2afe2434d, 0xc24e616199612ff8,
0x7b42b3b3f6b3f145, 0x42342121842115a5, 0x25089c9c4a9c94d6, 0x3cee1e1e781ef066,
0x8661434311432252, 0x93b1c7c73bc776fc, 0xe54ffcfcd7fcb32b, 0x0824040410042014,
0xa2e351515951b208, 0x2f2599995e99bcc7, 0xda226d6da96d4fc4, 0x1a650d0d340d6839,
0xe979fafacffa8335, 0xa369dfdf5bdfb684, 0xfca97e7ee57ed79b, 0x4819242490243db4,
0x76fe3b3bec3bc5d7, 0x4b9aabab96ab313d, 0x81f0cece1fce3ed1, 0x2299111144118855,
0x03838f8f068f0c89, 0x9c044e4e254e4a6b, 0x7366b7b7e6b7d151, 0xcbe0ebeb8beb0b60,
0x78c13c3cf03cfdcc, 0x1ffd81813e817cbf, 0x354094946a94d4fe, 0xf31cf7f7fbf7eb0c,
0x6f18b9b9deb9a167, 0x268b13134c13985f, 0x58512c2cb02c7d9c, 0xbb05d3d36bd3d6b8,
0xd38ce7e7bbe76b5c, 0xdc396e6ea56e57cb, 0x95aac4c437c46ef3, 0x061b03030c03180f,
0xacdc565645568a13, 0x885e44440d441a49, 0xfea07f7fe17fdf9e, 0x4f88a9a99ea92137,
0x54672a2aa82a4d82, 0x6b0abbbbd6bbb16d, 0x9f87c1c123c146e2, 0xa6f153535153a202,
0xa572dcdc57dcae8b, 0x16530b0b2c0b5827, 0x27019d9d4e9d9cd3, 0xd82b6c6cad6c47c1,
0x62a43131c43195f5, 0xe8f37474cd7487b9, 0xf115f6f6fff6e309, 0x8c4c464605460a43,
0x45a5acac8aac0926, 0x0fb589891e893c97, 0x28b414145014a044, 0xdfbae1e1a3e15b42,
0x2ca616165816b04e, 0x74f73a3ae83acdd2, 0xd2066969b9696fd0, 0x124109092409482d,
0xe0d77070dd70a7ad, 0x716fb6b6e2b6d954, 0xbd1ed0d067d0ceb7, 0xc7d6eded93ed3b7e,
0x85e2cccc17cc2edb, 0x8468424215422a57, 0x2d2c98985a98b4c2, 0x55eda4a4aaa4490e,
0x50752828a0285d88, 0xb8865c5c6d5cda31, 0xed6bf8f8c7f8933f, 0x11c28686228644a4,
}
C3 := [256]u64 {
0x7830d818186018c0, 0xaf462623238c2305, 0xf991b8c6c63fc67e, 0x6fcdfbe8e887e813,
0xa113cb878726874c, 0x626d11b8b8dab8a9, 0x0502090101040108, 0x6e9e0d4f4f214f42,
0xee6c9b3636d836ad, 0x0451ffa6a6a2a659, 0xbdb90cd2d26fd2de, 0x06f70ef5f5f3f5fb,
0x80f2967979f979ef, 0xcede306f6fa16f5f, 0xef3f6d91917e91fc, 0x07a4f852525552aa,
0xfdc04760609d6027, 0x766535bcbccabc89, 0xcd2b379b9b569bac, 0x8c018a8e8e028e04,
0x155bd2a3a3b6a371, 0x3c186c0c0c300c60, 0x8af6847b7bf17bff, 0xe16a803535d435b5,
0x693af51d1d741de8, 0x47ddb3e0e0a7e053, 0xacb321d7d77bd7f6, 0xed999cc2c22fc25e,
0x965c432e2eb82e6d, 0x7a96294b4b314b62, 0x21e15dfefedffea3, 0x16aed55757415782,
0x412abd15155415a8, 0xb6eee87777c1779f, 0xeb6e923737dc37a5, 0x56d79ee5e5b3e57b,
0xd923139f9f469f8c, 0x17fd23f0f0e7f0d3, 0x7f94204a4a354a6a, 0x95a944dada4fda9e,
0x25b0a258587d58fa, 0xca8fcfc9c903c906, 0x8d527c2929a42955, 0x22145a0a0a280a50,
0x4f7f50b1b1feb1e1, 0x1a5dc9a0a0baa069, 0xdad6146b6bb16b7f, 0xab17d985852e855c,
0x73673cbdbdcebd81, 0x34ba8f5d5d695dd2, 0x5020901010401080, 0x03f507f4f4f7f4f3,
0xc08bddcbcb0bcb16, 0xc67cd33e3ef83eed, 0x110a2d0505140528, 0xe6ce78676781671f,
0x53d597e4e4b7e473, 0xbb4e0227279c2725, 0x5882734141194132, 0x9d0ba78b8b168b2c,
0x0153f6a7a7a6a751, 0x94fab27d7de97dcf, 0xfb374995956e95dc, 0x9fad56d8d847d88e,
0x30eb70fbfbcbfb8b, 0x71c1cdeeee9fee23, 0x91f8bb7c7ced7cc7, 0xe3cc716666856617,
0x8ea77bdddd53dda6, 0x4b2eaf17175c17b8, 0x468e454747014702, 0xdc211a9e9e429e84,
0xc589d4caca0fca1e, 0x995a582d2db42d75, 0x79632ebfbfc6bf91, 0x1b0e3f07071c0738,
0x2347acadad8ead01, 0x2fb4b05a5a755aea, 0xb51bef838336836c, 0xff66b63333cc3385,
0xf2c65c636391633f, 0x0a04120202080210, 0x384993aaaa92aa39, 0xa8e2de7171d971af,
0xcf8dc6c8c807c80e, 0x7d32d119196419c8, 0x70923b4949394972, 0x9aaf5fd9d943d986,
0x1df931f2f2eff2c3, 0x48dba8e3e3abe34b, 0x2ab6b95b5b715be2, 0x920dbc88881a8834,
0xc8293e9a9a529aa4, 0xbe4c0b262698262d, 0xfa64bf3232c8328d, 0x4a7d59b0b0fab0e9,
0x6acff2e9e983e91b, 0x331e770f0f3c0f78, 0xa6b733d5d573d5e6, 0xba1df480803a8074,
0x7c6127bebec2be99, 0xde87ebcdcd13cd26, 0xe468893434d034bd, 0x75903248483d487a,
0x24e354ffffdbffab, 0x8ff48d7a7af57af7, 0xea3d6490907a90f4, 0x3ebe9d5f5f615fc2,
0xa0403d202080201d, 0xd5d00f6868bd6867, 0x7234ca1a1a681ad0, 0x2c41b7aeae82ae19,
0x5e757db4b4eab4c9, 0x19a8ce54544d549a, 0xe53b7f93937693ec, 0xaa442f222288220d,
0xe9c86364648d6407, 0x12ff2af1f1e3f1db, 0xa2e6cc7373d173bf, 0x5a24821212481290,
0x5d807a40401d403a, 0x2810480808200840, 0xe89b95c3c32bc356, 0x7bc5dfecec97ec33,
0x90ab4ddbdb4bdb96, 0x1f5fc0a1a1bea161, 0x8307918d8d0e8d1c, 0xc97ac83d3df43df5,
0xf1335b97976697cc, 0x0000000000000000, 0xd483f9cfcf1bcf36, 0x87566e2b2bac2b45,
0xb3ece17676c57697, 0xb019e68282328264, 0xa9b128d6d67fd6fe, 0x7736c31b1b6c1bd8,
0x5b7774b5b5eeb5c1, 0x2943beafaf86af11, 0xdfd41d6a6ab56a77, 0x0da0ea50505d50ba,
0x4c8a574545094512, 0x18fb38f3f3ebf3cb, 0xf060ad3030c0309d, 0x74c3c4efef9bef2b,
0xc37eda3f3ffc3fe5, 0x1caac75555495592, 0x1059dba2a2b2a279, 0x65c9e9eaea8fea03,
0xecca6a656589650f, 0x686903babad2bab9, 0x935e4a2f2fbc2f65, 0xe79d8ec0c027c04e,
0x81a160dede5fdebe, 0x6c38fc1c1c701ce0, 0x2ee746fdfdd3fdbb, 0x649a1f4d4d294d52,
0xe0397692927292e4, 0xbceafa7575c9758f, 0x1e0c360606180630, 0x9809ae8a8a128a24,
0x40794bb2b2f2b2f9, 0x59d185e6e6bfe663, 0x361c7e0e0e380e70, 0x633ee71f1f7c1ff8,
0xf7c4556262956237, 0xa3b53ad4d477d4ee, 0x324d81a8a89aa829, 0xf4315296966296c4,
0x3aef62f9f9c3f99b, 0xf697a3c5c533c566, 0xb14a102525942535, 0x20b2ab59597959f2,
0xae15d084842a8454, 0xa7e4c57272d572b7, 0xdd72ec3939e439d5, 0x6198164c4c2d4c5a,
0x3bbc945e5e655eca, 0x85f09f7878fd78e7, 0xd870e53838e038dd, 0x8605988c8c0a8c14,
0xb2bf17d1d163d1c6, 0x0b57e4a5a5aea541, 0x4dd9a1e2e2afe243, 0xf8c24e616199612f,
0x457b42b3b3f6b3f1, 0xa542342121842115, 0xd625089c9c4a9c94, 0x663cee1e1e781ef0,
0x5286614343114322, 0xfc93b1c7c73bc776, 0x2be54ffcfcd7fcb3, 0x1408240404100420,
0x08a2e351515951b2, 0xc72f2599995e99bc, 0xc4da226d6da96d4f, 0x391a650d0d340d68,
0x35e979fafacffa83, 0x84a369dfdf5bdfb6, 0x9bfca97e7ee57ed7, 0xb44819242490243d,
0xd776fe3b3bec3bc5, 0x3d4b9aabab96ab31, 0xd181f0cece1fce3e, 0x5522991111441188,
0x8903838f8f068f0c, 0x6b9c044e4e254e4a, 0x517366b7b7e6b7d1, 0x60cbe0ebeb8beb0b,
0xcc78c13c3cf03cfd, 0xbf1ffd81813e817c, 0xfe354094946a94d4, 0x0cf31cf7f7fbf7eb,
0x676f18b9b9deb9a1, 0x5f268b13134c1398, 0x9c58512c2cb02c7d, 0xb8bb05d3d36bd3d6,
0x5cd38ce7e7bbe76b, 0xcbdc396e6ea56e57, 0xf395aac4c437c46e, 0x0f061b03030c0318,
0x13acdc565645568a, 0x49885e44440d441a, 0x9efea07f7fe17fdf, 0x374f88a9a99ea921,
0x8254672a2aa82a4d, 0x6d6b0abbbbd6bbb1, 0xe29f87c1c123c146, 0x02a6f153535153a2,
0x8ba572dcdc57dcae, 0x2716530b0b2c0b58, 0xd327019d9d4e9d9c, 0xc1d82b6c6cad6c47,
0xf562a43131c43195, 0xb9e8f37474cd7487, 0x09f115f6f6fff6e3, 0x438c4c464605460a,
0x2645a5acac8aac09, 0x970fb589891e893c, 0x4428b414145014a0, 0x42dfbae1e1a3e15b,
0x4e2ca616165816b0, 0xd274f73a3ae83acd, 0xd0d2066969b9696f, 0x2d12410909240948,
0xade0d77070dd70a7, 0x54716fb6b6e2b6d9, 0xb7bd1ed0d067d0ce, 0x7ec7d6eded93ed3b,
0xdb85e2cccc17cc2e, 0x578468424215422a, 0xc22d2c98985a98b4, 0x0e55eda4a4aaa449,
0x8850752828a0285d, 0x31b8865c5c6d5cda, 0x3fed6bf8f8c7f893, 0xa411c28686228644,
}
C4 := [256]u64 {
0xc07830d818186018, 0x05af462623238c23, 0x7ef991b8c6c63fc6, 0x136fcdfbe8e887e8,
0x4ca113cb87872687, 0xa9626d11b8b8dab8, 0x0805020901010401, 0x426e9e0d4f4f214f,
0xadee6c9b3636d836, 0x590451ffa6a6a2a6, 0xdebdb90cd2d26fd2, 0xfb06f70ef5f5f3f5,
0xef80f2967979f979, 0x5fcede306f6fa16f, 0xfcef3f6d91917e91, 0xaa07a4f852525552,
0x27fdc04760609d60, 0x89766535bcbccabc, 0xaccd2b379b9b569b, 0x048c018a8e8e028e,
0x71155bd2a3a3b6a3, 0x603c186c0c0c300c, 0xff8af6847b7bf17b, 0xb5e16a803535d435,
0xe8693af51d1d741d, 0x5347ddb3e0e0a7e0, 0xf6acb321d7d77bd7, 0x5eed999cc2c22fc2,
0x6d965c432e2eb82e, 0x627a96294b4b314b, 0xa321e15dfefedffe, 0x8216aed557574157,
0xa8412abd15155415, 0x9fb6eee87777c177, 0xa5eb6e923737dc37, 0x7b56d79ee5e5b3e5,
0x8cd923139f9f469f, 0xd317fd23f0f0e7f0, 0x6a7f94204a4a354a, 0x9e95a944dada4fda,
0xfa25b0a258587d58, 0x06ca8fcfc9c903c9, 0x558d527c2929a429, 0x5022145a0a0a280a,
0xe14f7f50b1b1feb1, 0x691a5dc9a0a0baa0, 0x7fdad6146b6bb16b, 0x5cab17d985852e85,
0x8173673cbdbdcebd, 0xd234ba8f5d5d695d, 0x8050209010104010, 0xf303f507f4f4f7f4,
0x16c08bddcbcb0bcb, 0xedc67cd33e3ef83e, 0x28110a2d05051405, 0x1fe6ce7867678167,
0x7353d597e4e4b7e4, 0x25bb4e0227279c27, 0x3258827341411941, 0x2c9d0ba78b8b168b,
0x510153f6a7a7a6a7, 0xcf94fab27d7de97d, 0xdcfb374995956e95, 0x8e9fad56d8d847d8,
0x8b30eb70fbfbcbfb, 0x2371c1cdeeee9fee, 0xc791f8bb7c7ced7c, 0x17e3cc7166668566,
0xa68ea77bdddd53dd, 0xb84b2eaf17175c17, 0x02468e4547470147, 0x84dc211a9e9e429e,
0x1ec589d4caca0fca, 0x75995a582d2db42d, 0x9179632ebfbfc6bf, 0x381b0e3f07071c07,
0x012347acadad8ead, 0xea2fb4b05a5a755a, 0x6cb51bef83833683, 0x85ff66b63333cc33,
0x3ff2c65c63639163, 0x100a041202020802, 0x39384993aaaa92aa, 0xafa8e2de7171d971,
0x0ecf8dc6c8c807c8, 0xc87d32d119196419, 0x7270923b49493949, 0x869aaf5fd9d943d9,
0xc31df931f2f2eff2, 0x4b48dba8e3e3abe3, 0xe22ab6b95b5b715b, 0x34920dbc88881a88,
0xa4c8293e9a9a529a, 0x2dbe4c0b26269826, 0x8dfa64bf3232c832, 0xe94a7d59b0b0fab0,
0x1b6acff2e9e983e9, 0x78331e770f0f3c0f, 0xe6a6b733d5d573d5, 0x74ba1df480803a80,
0x997c6127bebec2be, 0x26de87ebcdcd13cd, 0xbde468893434d034, 0x7a75903248483d48,
0xab24e354ffffdbff, 0xf78ff48d7a7af57a, 0xf4ea3d6490907a90, 0xc23ebe9d5f5f615f,
0x1da0403d20208020, 0x67d5d00f6868bd68, 0xd07234ca1a1a681a, 0x192c41b7aeae82ae,
0xc95e757db4b4eab4, 0x9a19a8ce54544d54, 0xece53b7f93937693, 0x0daa442f22228822,
0x07e9c86364648d64, 0xdb12ff2af1f1e3f1, 0xbfa2e6cc7373d173, 0x905a248212124812,
0x3a5d807a40401d40, 0x4028104808082008, 0x56e89b95c3c32bc3, 0x337bc5dfecec97ec,
0x9690ab4ddbdb4bdb, 0x611f5fc0a1a1bea1, 0x1c8307918d8d0e8d, 0xf5c97ac83d3df43d,
0xccf1335b97976697, 0x0000000000000000, 0x36d483f9cfcf1bcf, 0x4587566e2b2bac2b,
0x97b3ece17676c576, 0x64b019e682823282, 0xfea9b128d6d67fd6, 0xd87736c31b1b6c1b,
0xc15b7774b5b5eeb5, 0x112943beafaf86af, 0x77dfd41d6a6ab56a, 0xba0da0ea50505d50,
0x124c8a5745450945, 0xcb18fb38f3f3ebf3, 0x9df060ad3030c030, 0x2b74c3c4efef9bef,
0xe5c37eda3f3ffc3f, 0x921caac755554955, 0x791059dba2a2b2a2, 0x0365c9e9eaea8fea,
0x0fecca6a65658965, 0xb9686903babad2ba, 0x65935e4a2f2fbc2f, 0x4ee79d8ec0c027c0,
0xbe81a160dede5fde, 0xe06c38fc1c1c701c, 0xbb2ee746fdfdd3fd, 0x52649a1f4d4d294d,
0xe4e0397692927292, 0x8fbceafa7575c975, 0x301e0c3606061806, 0x249809ae8a8a128a,
0xf940794bb2b2f2b2, 0x6359d185e6e6bfe6, 0x70361c7e0e0e380e, 0xf8633ee71f1f7c1f,
0x37f7c45562629562, 0xeea3b53ad4d477d4, 0x29324d81a8a89aa8, 0xc4f4315296966296,
0x9b3aef62f9f9c3f9, 0x66f697a3c5c533c5, 0x35b14a1025259425, 0xf220b2ab59597959,
0x54ae15d084842a84, 0xb7a7e4c57272d572, 0xd5dd72ec3939e439, 0x5a6198164c4c2d4c,
0xca3bbc945e5e655e, 0xe785f09f7878fd78, 0xddd870e53838e038, 0x148605988c8c0a8c,
0xc6b2bf17d1d163d1, 0x410b57e4a5a5aea5, 0x434dd9a1e2e2afe2, 0x2ff8c24e61619961,
0xf1457b42b3b3f6b3, 0x15a5423421218421, 0x94d625089c9c4a9c, 0xf0663cee1e1e781e,
0x2252866143431143, 0x76fc93b1c7c73bc7, 0xb32be54ffcfcd7fc, 0x2014082404041004,
0xb208a2e351515951, 0xbcc72f2599995e99, 0x4fc4da226d6da96d, 0x68391a650d0d340d,
0x8335e979fafacffa, 0xb684a369dfdf5bdf, 0xd79bfca97e7ee57e, 0x3db4481924249024,
0xc5d776fe3b3bec3b, 0x313d4b9aabab96ab, 0x3ed181f0cece1fce, 0x8855229911114411,
0x0c8903838f8f068f, 0x4a6b9c044e4e254e, 0xd1517366b7b7e6b7, 0x0b60cbe0ebeb8beb,
0xfdcc78c13c3cf03c, 0x7cbf1ffd81813e81, 0xd4fe354094946a94, 0xeb0cf31cf7f7fbf7,
0xa1676f18b9b9deb9, 0x985f268b13134c13, 0x7d9c58512c2cb02c, 0xd6b8bb05d3d36bd3,
0x6b5cd38ce7e7bbe7, 0x57cbdc396e6ea56e, 0x6ef395aac4c437c4, 0x180f061b03030c03,
0x8a13acdc56564556, 0x1a49885e44440d44, 0xdf9efea07f7fe17f, 0x21374f88a9a99ea9,
0x4d8254672a2aa82a, 0xb16d6b0abbbbd6bb, 0x46e29f87c1c123c1, 0xa202a6f153535153,
0xae8ba572dcdc57dc, 0x582716530b0b2c0b, 0x9cd327019d9d4e9d, 0x47c1d82b6c6cad6c,
0x95f562a43131c431, 0x87b9e8f37474cd74, 0xe309f115f6f6fff6, 0x0a438c4c46460546,
0x092645a5acac8aac, 0x3c970fb589891e89, 0xa04428b414145014, 0x5b42dfbae1e1a3e1,
0xb04e2ca616165816, 0xcdd274f73a3ae83a, 0x6fd0d2066969b969, 0x482d124109092409,
0xa7ade0d77070dd70, 0xd954716fb6b6e2b6, 0xceb7bd1ed0d067d0, 0x3b7ec7d6eded93ed,
0x2edb85e2cccc17cc, 0x2a57846842421542, 0xb4c22d2c98985a98, 0x490e55eda4a4aaa4,
0x5d8850752828a028, 0xda31b8865c5c6d5c, 0x933fed6bf8f8c7f8, 0x44a411c286862286,
}
C5 := [256]u64 {
0x18c07830d8181860, 0x2305af462623238c, 0xc67ef991b8c6c63f, 0xe8136fcdfbe8e887,
0x874ca113cb878726, 0xb8a9626d11b8b8da, 0x0108050209010104, 0x4f426e9e0d4f4f21,
0x36adee6c9b3636d8, 0xa6590451ffa6a6a2, 0xd2debdb90cd2d26f, 0xf5fb06f70ef5f5f3,
0x79ef80f2967979f9, 0x6f5fcede306f6fa1, 0x91fcef3f6d91917e, 0x52aa07a4f8525255,
0x6027fdc04760609d, 0xbc89766535bcbcca, 0x9baccd2b379b9b56, 0x8e048c018a8e8e02,
0xa371155bd2a3a3b6, 0x0c603c186c0c0c30, 0x7bff8af6847b7bf1, 0x35b5e16a803535d4,
0x1de8693af51d1d74, 0xe05347ddb3e0e0a7, 0xd7f6acb321d7d77b, 0xc25eed999cc2c22f,
0x2e6d965c432e2eb8, 0x4b627a96294b4b31, 0xfea321e15dfefedf, 0x578216aed5575741,
0x15a8412abd151554, 0x779fb6eee87777c1, 0x37a5eb6e923737dc, 0xe57b56d79ee5e5b3,
0x9f8cd923139f9f46, 0xf0d317fd23f0f0e7, 0x4a6a7f94204a4a35, 0xda9e95a944dada4f,
0x58fa25b0a258587d, 0xc906ca8fcfc9c903, 0x29558d527c2929a4, 0x0a5022145a0a0a28,
0xb1e14f7f50b1b1fe, 0xa0691a5dc9a0a0ba, 0x6b7fdad6146b6bb1, 0x855cab17d985852e,
0xbd8173673cbdbdce, 0x5dd234ba8f5d5d69, 0x1080502090101040, 0xf4f303f507f4f4f7,
0xcb16c08bddcbcb0b, 0x3eedc67cd33e3ef8, 0x0528110a2d050514, 0x671fe6ce78676781,
0xe47353d597e4e4b7, 0x2725bb4e0227279c, 0x4132588273414119, 0x8b2c9d0ba78b8b16,
0xa7510153f6a7a7a6, 0x7dcf94fab27d7de9, 0x95dcfb374995956e, 0xd88e9fad56d8d847,
0xfb8b30eb70fbfbcb, 0xee2371c1cdeeee9f, 0x7cc791f8bb7c7ced, 0x6617e3cc71666685,
0xdda68ea77bdddd53, 0x17b84b2eaf17175c, 0x4702468e45474701, 0x9e84dc211a9e9e42,
0xca1ec589d4caca0f, 0x2d75995a582d2db4, 0xbf9179632ebfbfc6, 0x07381b0e3f07071c,
0xad012347acadad8e, 0x5aea2fb4b05a5a75, 0x836cb51bef838336, 0x3385ff66b63333cc,
0x633ff2c65c636391, 0x02100a0412020208, 0xaa39384993aaaa92, 0x71afa8e2de7171d9,
0xc80ecf8dc6c8c807, 0x19c87d32d1191964, 0x497270923b494939, 0xd9869aaf5fd9d943,
0xf2c31df931f2f2ef, 0xe34b48dba8e3e3ab, 0x5be22ab6b95b5b71, 0x8834920dbc88881a,
0x9aa4c8293e9a9a52, 0x262dbe4c0b262698, 0x328dfa64bf3232c8, 0xb0e94a7d59b0b0fa,
0xe91b6acff2e9e983, 0x0f78331e770f0f3c, 0xd5e6a6b733d5d573, 0x8074ba1df480803a,
0xbe997c6127bebec2, 0xcd26de87ebcdcd13, 0x34bde468893434d0, 0x487a75903248483d,
0xffab24e354ffffdb, 0x7af78ff48d7a7af5, 0x90f4ea3d6490907a, 0x5fc23ebe9d5f5f61,
0x201da0403d202080, 0x6867d5d00f6868bd, 0x1ad07234ca1a1a68, 0xae192c41b7aeae82,
0xb4c95e757db4b4ea, 0x549a19a8ce54544d, 0x93ece53b7f939376, 0x220daa442f222288,
0x6407e9c86364648d, 0xf1db12ff2af1f1e3, 0x73bfa2e6cc7373d1, 0x12905a2482121248,
0x403a5d807a40401d, 0x0840281048080820, 0xc356e89b95c3c32b, 0xec337bc5dfecec97,
0xdb9690ab4ddbdb4b, 0xa1611f5fc0a1a1be, 0x8d1c8307918d8d0e, 0x3df5c97ac83d3df4,
0x97ccf1335b979766, 0x0000000000000000, 0xcf36d483f9cfcf1b, 0x2b4587566e2b2bac,
0x7697b3ece17676c5, 0x8264b019e6828232, 0xd6fea9b128d6d67f, 0x1bd87736c31b1b6c,
0xb5c15b7774b5b5ee, 0xaf112943beafaf86, 0x6a77dfd41d6a6ab5, 0x50ba0da0ea50505d,
0x45124c8a57454509, 0xf3cb18fb38f3f3eb, 0x309df060ad3030c0, 0xef2b74c3c4efef9b,
0x3fe5c37eda3f3ffc, 0x55921caac7555549, 0xa2791059dba2a2b2, 0xea0365c9e9eaea8f,
0x650fecca6a656589, 0xbab9686903babad2, 0x2f65935e4a2f2fbc, 0xc04ee79d8ec0c027,
0xdebe81a160dede5f, 0x1ce06c38fc1c1c70, 0xfdbb2ee746fdfdd3, 0x4d52649a1f4d4d29,
0x92e4e03976929272, 0x758fbceafa7575c9, 0x06301e0c36060618, 0x8a249809ae8a8a12,
0xb2f940794bb2b2f2, 0xe66359d185e6e6bf, 0x0e70361c7e0e0e38, 0x1ff8633ee71f1f7c,
0x6237f7c455626295, 0xd4eea3b53ad4d477, 0xa829324d81a8a89a, 0x96c4f43152969662,
0xf99b3aef62f9f9c3, 0xc566f697a3c5c533, 0x2535b14a10252594, 0x59f220b2ab595979,
0x8454ae15d084842a, 0x72b7a7e4c57272d5, 0x39d5dd72ec3939e4, 0x4c5a6198164c4c2d,
0x5eca3bbc945e5e65, 0x78e785f09f7878fd, 0x38ddd870e53838e0, 0x8c148605988c8c0a,
0xd1c6b2bf17d1d163, 0xa5410b57e4a5a5ae, 0xe2434dd9a1e2e2af, 0x612ff8c24e616199,
0xb3f1457b42b3b3f6, 0x2115a54234212184, 0x9c94d625089c9c4a, 0x1ef0663cee1e1e78,
0x4322528661434311, 0xc776fc93b1c7c73b, 0xfcb32be54ffcfcd7, 0x0420140824040410,
0x51b208a2e3515159, 0x99bcc72f2599995e, 0x6d4fc4da226d6da9, 0x0d68391a650d0d34,
0xfa8335e979fafacf, 0xdfb684a369dfdf5b, 0x7ed79bfca97e7ee5, 0x243db44819242490,
0x3bc5d776fe3b3bec, 0xab313d4b9aabab96, 0xce3ed181f0cece1f, 0x1188552299111144,
0x8f0c8903838f8f06, 0x4e4a6b9c044e4e25, 0xb7d1517366b7b7e6, 0xeb0b60cbe0ebeb8b,
0x3cfdcc78c13c3cf0, 0x817cbf1ffd81813e, 0x94d4fe354094946a, 0xf7eb0cf31cf7f7fb,
0xb9a1676f18b9b9de, 0x13985f268b13134c, 0x2c7d9c58512c2cb0, 0xd3d6b8bb05d3d36b,
0xe76b5cd38ce7e7bb, 0x6e57cbdc396e6ea5, 0xc46ef395aac4c437, 0x03180f061b03030c,
0x568a13acdc565645, 0x441a49885e44440d, 0x7fdf9efea07f7fe1, 0xa921374f88a9a99e,
0x2a4d8254672a2aa8, 0xbbb16d6b0abbbbd6, 0xc146e29f87c1c123, 0x53a202a6f1535351,
0xdcae8ba572dcdc57, 0x0b582716530b0b2c, 0x9d9cd327019d9d4e, 0x6c47c1d82b6c6cad,
0x3195f562a43131c4, 0x7487b9e8f37474cd, 0xf6e309f115f6f6ff, 0x460a438c4c464605,
0xac092645a5acac8a, 0x893c970fb589891e, 0x14a04428b4141450, 0xe15b42dfbae1e1a3,
0x16b04e2ca6161658, 0x3acdd274f73a3ae8, 0x696fd0d2066969b9, 0x09482d1241090924,
0x70a7ade0d77070dd, 0xb6d954716fb6b6e2, 0xd0ceb7bd1ed0d067, 0xed3b7ec7d6eded93,
0xcc2edb85e2cccc17, 0x422a578468424215, 0x98b4c22d2c98985a, 0xa4490e55eda4a4aa,
0x285d8850752828a0, 0x5cda31b8865c5c6d, 0xf8933fed6bf8f8c7, 0x8644a411c2868622,
}
C6 := [256]u64 {
0x6018c07830d81818, 0x8c2305af46262323, 0x3fc67ef991b8c6c6, 0x87e8136fcdfbe8e8,
0x26874ca113cb8787, 0xdab8a9626d11b8b8, 0x0401080502090101, 0x214f426e9e0d4f4f,
0xd836adee6c9b3636, 0xa2a6590451ffa6a6, 0x6fd2debdb90cd2d2, 0xf3f5fb06f70ef5f5,
0xf979ef80f2967979, 0xa16f5fcede306f6f, 0x7e91fcef3f6d9191, 0x5552aa07a4f85252,
0x9d6027fdc0476060, 0xcabc89766535bcbc, 0x569baccd2b379b9b, 0x028e048c018a8e8e,
0xb6a371155bd2a3a3, 0x300c603c186c0c0c, 0xf17bff8af6847b7b, 0xd435b5e16a803535,
0x741de8693af51d1d, 0xa7e05347ddb3e0e0, 0x7bd7f6acb321d7d7, 0x2fc25eed999cc2c2,
0xb82e6d965c432e2e, 0x314b627a96294b4b, 0xdffea321e15dfefe, 0x41578216aed55757,
0x5415a8412abd1515, 0xc1779fb6eee87777, 0xdc37a5eb6e923737, 0xb3e57b56d79ee5e5,
0x469f8cd923139f9f, 0xe7f0d317fd23f0f0, 0x354a6a7f94204a4a, 0x4fda9e95a944dada,
0x7d58fa25b0a25858, 0x03c906ca8fcfc9c9, 0xa429558d527c2929, 0x280a5022145a0a0a,
0xfeb1e14f7f50b1b1, 0xbaa0691a5dc9a0a0, 0xb16b7fdad6146b6b, 0x2e855cab17d98585,
0xcebd8173673cbdbd, 0x695dd234ba8f5d5d, 0x4010805020901010, 0xf7f4f303f507f4f4,
0x0bcb16c08bddcbcb, 0xf83eedc67cd33e3e, 0x140528110a2d0505, 0x81671fe6ce786767,
0xb7e47353d597e4e4, 0x9c2725bb4e022727, 0x1941325882734141, 0x168b2c9d0ba78b8b,
0xa6a7510153f6a7a7, 0xe97dcf94fab27d7d, 0x6e95dcfb37499595, 0x47d88e9fad56d8d8,
0xcbfb8b30eb70fbfb, 0x9fee2371c1cdeeee, 0xed7cc791f8bb7c7c, 0x856617e3cc716666,
0x53dda68ea77bdddd, 0x5c17b84b2eaf1717, 0x014702468e454747, 0x429e84dc211a9e9e,
0x0fca1ec589d4caca, 0xb42d75995a582d2d, 0xc6bf9179632ebfbf, 0x1c07381b0e3f0707,
0x8ead012347acadad, 0x755aea2fb4b05a5a, 0x36836cb51bef8383, 0xcc3385ff66b63333,
0x91633ff2c65c6363, 0x0802100a04120202, 0x92aa39384993aaaa, 0xd971afa8e2de7171,
0x07c80ecf8dc6c8c8, 0x6419c87d32d11919, 0x39497270923b4949, 0x43d9869aaf5fd9d9,
0xeff2c31df931f2f2, 0xabe34b48dba8e3e3, 0x715be22ab6b95b5b, 0x1a8834920dbc8888,
0x529aa4c8293e9a9a, 0x98262dbe4c0b2626, 0xc8328dfa64bf3232, 0xfab0e94a7d59b0b0,
0x83e91b6acff2e9e9, 0x3c0f78331e770f0f, 0x73d5e6a6b733d5d5, 0x3a8074ba1df48080,
0xc2be997c6127bebe, 0x13cd26de87ebcdcd, 0xd034bde468893434, 0x3d487a7590324848,
0xdbffab24e354ffff, 0xf57af78ff48d7a7a, 0x7a90f4ea3d649090, 0x615fc23ebe9d5f5f,
0x80201da0403d2020, 0xbd6867d5d00f6868, 0x681ad07234ca1a1a, 0x82ae192c41b7aeae,
0xeab4c95e757db4b4, 0x4d549a19a8ce5454, 0x7693ece53b7f9393, 0x88220daa442f2222,
0x8d6407e9c8636464, 0xe3f1db12ff2af1f1, 0xd173bfa2e6cc7373, 0x4812905a24821212,
0x1d403a5d807a4040, 0x2008402810480808, 0x2bc356e89b95c3c3, 0x97ec337bc5dfecec,
0x4bdb9690ab4ddbdb, 0xbea1611f5fc0a1a1, 0x0e8d1c8307918d8d, 0xf43df5c97ac83d3d,
0x6697ccf1335b9797, 0x0000000000000000, 0x1bcf36d483f9cfcf, 0xac2b4587566e2b2b,
0xc57697b3ece17676, 0x328264b019e68282, 0x7fd6fea9b128d6d6, 0x6c1bd87736c31b1b,
0xeeb5c15b7774b5b5, 0x86af112943beafaf, 0xb56a77dfd41d6a6a, 0x5d50ba0da0ea5050,
0x0945124c8a574545, 0xebf3cb18fb38f3f3, 0xc0309df060ad3030, 0x9bef2b74c3c4efef,
0xfc3fe5c37eda3f3f, 0x4955921caac75555, 0xb2a2791059dba2a2, 0x8fea0365c9e9eaea,
0x89650fecca6a6565, 0xd2bab9686903baba, 0xbc2f65935e4a2f2f, 0x27c04ee79d8ec0c0,
0x5fdebe81a160dede, 0x701ce06c38fc1c1c, 0xd3fdbb2ee746fdfd, 0x294d52649a1f4d4d,
0x7292e4e039769292, 0xc9758fbceafa7575, 0x1806301e0c360606, 0x128a249809ae8a8a,
0xf2b2f940794bb2b2, 0xbfe66359d185e6e6, 0x380e70361c7e0e0e, 0x7c1ff8633ee71f1f,
0x956237f7c4556262, 0x77d4eea3b53ad4d4, 0x9aa829324d81a8a8, 0x6296c4f431529696,
0xc3f99b3aef62f9f9, 0x33c566f697a3c5c5, 0x942535b14a102525, 0x7959f220b2ab5959,
0x2a8454ae15d08484, 0xd572b7a7e4c57272, 0xe439d5dd72ec3939, 0x2d4c5a6198164c4c,
0x655eca3bbc945e5e, 0xfd78e785f09f7878, 0xe038ddd870e53838, 0x0a8c148605988c8c,
0x63d1c6b2bf17d1d1, 0xaea5410b57e4a5a5, 0xafe2434dd9a1e2e2, 0x99612ff8c24e6161,
0xf6b3f1457b42b3b3, 0x842115a542342121, 0x4a9c94d625089c9c, 0x781ef0663cee1e1e,
0x1143225286614343, 0x3bc776fc93b1c7c7, 0xd7fcb32be54ffcfc, 0x1004201408240404,
0x5951b208a2e35151, 0x5e99bcc72f259999, 0xa96d4fc4da226d6d, 0x340d68391a650d0d,
0xcffa8335e979fafa, 0x5bdfb684a369dfdf, 0xe57ed79bfca97e7e, 0x90243db448192424,
0xec3bc5d776fe3b3b, 0x96ab313d4b9aabab, 0x1fce3ed181f0cece, 0x4411885522991111,
0x068f0c8903838f8f, 0x254e4a6b9c044e4e, 0xe6b7d1517366b7b7, 0x8beb0b60cbe0ebeb,
0xf03cfdcc78c13c3c, 0x3e817cbf1ffd8181, 0x6a94d4fe35409494, 0xfbf7eb0cf31cf7f7,
0xdeb9a1676f18b9b9, 0x4c13985f268b1313, 0xb02c7d9c58512c2c, 0x6bd3d6b8bb05d3d3,
0xbbe76b5cd38ce7e7, 0xa56e57cbdc396e6e, 0x37c46ef395aac4c4, 0x0c03180f061b0303,
0x45568a13acdc5656, 0x0d441a49885e4444, 0xe17fdf9efea07f7f, 0x9ea921374f88a9a9,
0xa82a4d8254672a2a, 0xd6bbb16d6b0abbbb, 0x23c146e29f87c1c1, 0x5153a202a6f15353,
0x57dcae8ba572dcdc, 0x2c0b582716530b0b, 0x4e9d9cd327019d9d, 0xad6c47c1d82b6c6c,
0xc43195f562a43131, 0xcd7487b9e8f37474, 0xfff6e309f115f6f6, 0x05460a438c4c4646,
0x8aac092645a5acac, 0x1e893c970fb58989, 0x5014a04428b41414, 0xa3e15b42dfbae1e1,
0x5816b04e2ca61616, 0xe83acdd274f73a3a, 0xb9696fd0d2066969, 0x2409482d12410909,
0xdd70a7ade0d77070, 0xe2b6d954716fb6b6, 0x67d0ceb7bd1ed0d0, 0x93ed3b7ec7d6eded,
0x17cc2edb85e2cccc, 0x15422a5784684242, 0x5a98b4c22d2c9898, 0xaaa4490e55eda4a4,
0xa0285d8850752828, 0x6d5cda31b8865c5c, 0xc7f8933fed6bf8f8, 0x228644a411c28686,
}
C7 := [256]u64 {
0x186018c07830d818, 0x238c2305af462623, 0xc63fc67ef991b8c6, 0xe887e8136fcdfbe8,
0x8726874ca113cb87, 0xb8dab8a9626d11b8, 0x0104010805020901, 0x4f214f426e9e0d4f,
0x36d836adee6c9b36, 0xa6a2a6590451ffa6, 0xd26fd2debdb90cd2, 0xf5f3f5fb06f70ef5,
0x79f979ef80f29679, 0x6fa16f5fcede306f, 0x917e91fcef3f6d91, 0x525552aa07a4f852,
0x609d6027fdc04760, 0xbccabc89766535bc, 0x9b569baccd2b379b, 0x8e028e048c018a8e,
0xa3b6a371155bd2a3, 0x0c300c603c186c0c, 0x7bf17bff8af6847b, 0x35d435b5e16a8035,
0x1d741de8693af51d, 0xe0a7e05347ddb3e0, 0xd77bd7f6acb321d7, 0xc22fc25eed999cc2,
0x2eb82e6d965c432e, 0x4b314b627a96294b, 0xfedffea321e15dfe, 0x5741578216aed557,
0x155415a8412abd15, 0x77c1779fb6eee877, 0x37dc37a5eb6e9237, 0xe5b3e57b56d79ee5,
0x9f469f8cd923139f, 0xf0e7f0d317fd23f0, 0x4a354a6a7f94204a, 0xda4fda9e95a944da,
0x587d58fa25b0a258, 0xc903c906ca8fcfc9, 0x29a429558d527c29, 0x0a280a5022145a0a,
0xb1feb1e14f7f50b1, 0xa0baa0691a5dc9a0, 0x6bb16b7fdad6146b, 0x852e855cab17d985,
0xbdcebd8173673cbd, 0x5d695dd234ba8f5d, 0x1040108050209010, 0xf4f7f4f303f507f4,
0xcb0bcb16c08bddcb, 0x3ef83eedc67cd33e, 0x05140528110a2d05, 0x6781671fe6ce7867,
0xe4b7e47353d597e4, 0x279c2725bb4e0227, 0x4119413258827341, 0x8b168b2c9d0ba78b,
0xa7a6a7510153f6a7, 0x7de97dcf94fab27d, 0x956e95dcfb374995, 0xd847d88e9fad56d8,
0xfbcbfb8b30eb70fb, 0xee9fee2371c1cdee, 0x7ced7cc791f8bb7c, 0x66856617e3cc7166,
0xdd53dda68ea77bdd, 0x175c17b84b2eaf17, 0x47014702468e4547, 0x9e429e84dc211a9e,
0xca0fca1ec589d4ca, 0x2db42d75995a582d, 0xbfc6bf9179632ebf, 0x071c07381b0e3f07,
0xad8ead012347acad, 0x5a755aea2fb4b05a, 0x8336836cb51bef83, 0x33cc3385ff66b633,
0x6391633ff2c65c63, 0x020802100a041202, 0xaa92aa39384993aa, 0x71d971afa8e2de71,
0xc807c80ecf8dc6c8, 0x196419c87d32d119, 0x4939497270923b49, 0xd943d9869aaf5fd9,
0xf2eff2c31df931f2, 0xe3abe34b48dba8e3, 0x5b715be22ab6b95b, 0x881a8834920dbc88,
0x9a529aa4c8293e9a, 0x2698262dbe4c0b26, 0x32c8328dfa64bf32, 0xb0fab0e94a7d59b0,
0xe983e91b6acff2e9, 0x0f3c0f78331e770f, 0xd573d5e6a6b733d5, 0x803a8074ba1df480,
0xbec2be997c6127be, 0xcd13cd26de87ebcd, 0x34d034bde4688934, 0x483d487a75903248,
0xffdbffab24e354ff, 0x7af57af78ff48d7a, 0x907a90f4ea3d6490, 0x5f615fc23ebe9d5f,
0x2080201da0403d20, 0x68bd6867d5d00f68, 0x1a681ad07234ca1a, 0xae82ae192c41b7ae,
0xb4eab4c95e757db4, 0x544d549a19a8ce54, 0x937693ece53b7f93, 0x2288220daa442f22,
0x648d6407e9c86364, 0xf1e3f1db12ff2af1, 0x73d173bfa2e6cc73, 0x124812905a248212,
0x401d403a5d807a40, 0x0820084028104808, 0xc32bc356e89b95c3, 0xec97ec337bc5dfec,
0xdb4bdb9690ab4ddb, 0xa1bea1611f5fc0a1, 0x8d0e8d1c8307918d, 0x3df43df5c97ac83d,
0x976697ccf1335b97, 0x0000000000000000, 0xcf1bcf36d483f9cf, 0x2bac2b4587566e2b,
0x76c57697b3ece176, 0x82328264b019e682, 0xd67fd6fea9b128d6, 0x1b6c1bd87736c31b,
0xb5eeb5c15b7774b5, 0xaf86af112943beaf, 0x6ab56a77dfd41d6a, 0x505d50ba0da0ea50,
0x450945124c8a5745, 0xf3ebf3cb18fb38f3, 0x30c0309df060ad30, 0xef9bef2b74c3c4ef,
0x3ffc3fe5c37eda3f, 0x554955921caac755, 0xa2b2a2791059dba2, 0xea8fea0365c9e9ea,
0x6589650fecca6a65, 0xbad2bab9686903ba, 0x2fbc2f65935e4a2f, 0xc027c04ee79d8ec0,
0xde5fdebe81a160de, 0x1c701ce06c38fc1c, 0xfdd3fdbb2ee746fd, 0x4d294d52649a1f4d,
0x927292e4e0397692, 0x75c9758fbceafa75, 0x061806301e0c3606, 0x8a128a249809ae8a,
0xb2f2b2f940794bb2, 0xe6bfe66359d185e6, 0x0e380e70361c7e0e, 0x1f7c1ff8633ee71f,
0x62956237f7c45562, 0xd477d4eea3b53ad4, 0xa89aa829324d81a8, 0x966296c4f4315296,
0xf9c3f99b3aef62f9, 0xc533c566f697a3c5, 0x25942535b14a1025, 0x597959f220b2ab59,
0x842a8454ae15d084, 0x72d572b7a7e4c572, 0x39e439d5dd72ec39, 0x4c2d4c5a6198164c,
0x5e655eca3bbc945e, 0x78fd78e785f09f78, 0x38e038ddd870e538, 0x8c0a8c148605988c,
0xd163d1c6b2bf17d1, 0xa5aea5410b57e4a5, 0xe2afe2434dd9a1e2, 0x6199612ff8c24e61,
0xb3f6b3f1457b42b3, 0x21842115a5423421, 0x9c4a9c94d625089c, 0x1e781ef0663cee1e,
0x4311432252866143, 0xc73bc776fc93b1c7, 0xfcd7fcb32be54ffc, 0x0410042014082404,
0x515951b208a2e351, 0x995e99bcc72f2599, 0x6da96d4fc4da226d, 0x0d340d68391a650d,
0xfacffa8335e979fa, 0xdf5bdfb684a369df, 0x7ee57ed79bfca97e, 0x2490243db4481924,
0x3bec3bc5d776fe3b, 0xab96ab313d4b9aab, 0xce1fce3ed181f0ce, 0x1144118855229911,
0x8f068f0c8903838f, 0x4e254e4a6b9c044e, 0xb7e6b7d1517366b7, 0xeb8beb0b60cbe0eb,
0x3cf03cfdcc78c13c, 0x813e817cbf1ffd81, 0x946a94d4fe354094, 0xf7fbf7eb0cf31cf7,
0xb9deb9a1676f18b9, 0x134c13985f268b13, 0x2cb02c7d9c58512c, 0xd36bd3d6b8bb05d3,
0xe7bbe76b5cd38ce7, 0x6ea56e57cbdc396e, 0xc437c46ef395aac4, 0x030c03180f061b03,
0x5645568a13acdc56, 0x440d441a49885e44, 0x7fe17fdf9efea07f, 0xa99ea921374f88a9,
0x2aa82a4d8254672a, 0xbbd6bbb16d6b0abb, 0xc123c146e29f87c1, 0x535153a202a6f153,
0xdc57dcae8ba572dc, 0x0b2c0b582716530b, 0x9d4e9d9cd327019d, 0x6cad6c47c1d82b6c,
0x31c43195f562a431, 0x74cd7487b9e8f374, 0xf6fff6e309f115f6, 0x4605460a438c4c46,
0xac8aac092645a5ac, 0x891e893c970fb589, 0x145014a04428b414, 0xe1a3e15b42dfbae1,
0x165816b04e2ca616, 0x3ae83acdd274f73a, 0x69b9696fd0d20669, 0x092409482d124109,
0x70dd70a7ade0d770, 0xb6e2b6d954716fb6, 0xd067d0ceb7bd1ed0, 0xed93ed3b7ec7d6ed,
0xcc17cc2edb85e2cc, 0x4215422a57846842, 0x985a98b4c22d2c98, 0xa4aaa4490e55eda4,
0x28a0285d88507528, 0x5c6d5cda31b8865c, 0xf8c7f8933fed6bf8, 0x86228644a411c286,
}
RC := [ROUNDS + 1]u64 {
0x0000000000000000,
0x1823c6e887b8014f,
0x36a6d2f5796f9152,
0x60bc9b8ea30c7b35,
0x1de0d7c22e4bfe57,
0x157737e59ff04ada,
0x58c9290ab1a06b85,
0xbd5d10f4cb3e0567,
0xe427418ba77d95d8,
0xfbee7c66dd17479e,
0xca2dbf07ad5a8333,
}
transform :: proc (ctx: ^Whirlpool_Context) {
K, block, state, L: [8]u64
for i := 0; i < 8; i += 1 {block[i] = util.U64_BE(ctx.buffer[8 * i:])}
for i := 0; i < 8; i += 1 {
K[i] = ctx.hash[i]
state[i] = block[i] ~ K[i]
}
for r := 1; r <= ROUNDS; r += 1 {
for i := 0; i < 8; i += 1 {
L[i] = C0[byte(K[i % 8] >> 56)] ~
C1[byte(K[(i + 7) % 8] >> 48)] ~
C2[byte(K[(i + 6) % 8] >> 40)] ~
C3[byte(K[(i + 5) % 8] >> 32)] ~
C4[byte(K[(i + 4) % 8] >> 24)] ~
C5[byte(K[(i + 3) % 8] >> 16)] ~
C6[byte(K[(i + 2) % 8] >> 8)] ~
C7[byte(K[(i + 1) % 8])]
}
L[0] ~= RC[r]
for i := 0; i < 8; i += 1 {K[i] = L[i]}
for i := 0; i < 8; i += 1 {
L[i] = C0[byte(state[i % 8] >> 56)] ~
C1[byte(state[(i + 7) % 8] >> 48)] ~
C2[byte(state[(i + 6) % 8] >> 40)] ~
C3[byte(state[(i + 5) % 8] >> 32)] ~
C4[byte(state[(i + 4) % 8] >> 24)] ~
C5[byte(state[(i + 3) % 8] >> 16)] ~
C6[byte(state[(i + 2) % 8] >> 8)] ~
C7[byte(state[(i + 1) % 8])] ~
K[i % 8]
}
for i := 0; i < 8; i += 1 {state[i] = L[i]}
}
for i := 0; i < 8; i += 1 {ctx.hash[i] ~= state[i] ~ block[i]}
}

View File

@@ -1,5 +1,5 @@
/*
Package endian implements sa simple translation between bytes and numbers with
Package endian implements a simple translation between bytes and numbers with
specific endian encodings.
buf: [100]u8

View File

@@ -1,5 +1,8 @@
package encoding_endian
import "core:intrinsics"
import "core:math/bits"
Byte_Order :: enum u8 {
Little,
Big,
@@ -7,147 +10,154 @@ Byte_Order :: enum u8 {
PLATFORM_BYTE_ORDER :: Byte_Order.Little when ODIN_ENDIAN == .Little else Byte_Order.Big
get_u16 :: proc(b: []byte, order: Byte_Order) -> (v: u16, ok: bool) {
unchecked_get_u16le :: #force_inline proc "contextless" (b: []byte) -> u16 {
return bits.from_le_u16(intrinsics.unaligned_load((^u16)(raw_data(b))))
}
unchecked_get_u32le :: #force_inline proc "contextless" (b: []byte) -> u32 {
return bits.from_le_u32(intrinsics.unaligned_load((^u32)(raw_data(b))))
}
unchecked_get_u64le :: #force_inline proc "contextless" (b: []byte) -> u64 {
return bits.from_le_u64(intrinsics.unaligned_load((^u64)(raw_data(b))))
}
unchecked_get_u16be :: #force_inline proc "contextless" (b: []byte) -> u16 {
return bits.from_be_u16(intrinsics.unaligned_load((^u16)(raw_data(b))))
}
unchecked_get_u32be :: #force_inline proc "contextless" (b: []byte) -> u32 {
return bits.from_be_u32(intrinsics.unaligned_load((^u32)(raw_data(b))))
}
unchecked_get_u64be :: #force_inline proc "contextless" (b: []byte) -> u64 {
return bits.from_be_u64(intrinsics.unaligned_load((^u64)(raw_data(b))))
}
get_u16 :: proc "contextless" (b: []byte, order: Byte_Order) -> (v: u16, ok: bool) {
if len(b) < 2 {
return 0, false
}
#no_bounds_check if order == .Little {
v = u16(b[0]) | u16(b[1])<<8
if order == .Little {
v = unchecked_get_u16le(b)
} else {
v = u16(b[1]) | u16(b[0])<<8
v = unchecked_get_u16be(b)
}
return v, true
}
get_u32 :: proc(b: []byte, order: Byte_Order) -> (v: u32, ok: bool) {
get_u32 :: proc "contextless" (b: []byte, order: Byte_Order) -> (v: u32, ok: bool) {
if len(b) < 4 {
return 0, false
}
#no_bounds_check if order == .Little {
v = u32(b[0]) | u32(b[1])<<8 | u32(b[2])<<16 | u32(b[3])<<24
if order == .Little {
v = unchecked_get_u32le(b)
} else {
v = u32(b[3]) | u32(b[2])<<8 | u32(b[1])<<16 | u32(b[0])<<24
v = unchecked_get_u32be(b)
}
return v, true
}
get_u64 :: proc(b: []byte, order: Byte_Order) -> (v: u64, ok: bool) {
get_u64 :: proc "contextless" (b: []byte, order: Byte_Order) -> (v: u64, ok: bool) {
if len(b) < 8 {
return 0, false
}
#no_bounds_check if order == .Little {
v = u64(b[0]) | u64(b[1])<<8 | u64(b[2])<<16 | u64(b[3])<<24 |
u64(b[4])<<32 | u64(b[5])<<40 | u64(b[6])<<48 | u64(b[7])<<56
if order == .Little {
v = unchecked_get_u64le(b)
} else {
v = u64(b[7]) | u64(b[6])<<8 | u64(b[5])<<16 | u64(b[4])<<24 |
u64(b[3])<<32 | u64(b[2])<<40 | u64(b[1])<<48 | u64(b[0])<<56
v = unchecked_get_u64be(b)
}
return v, true
}
get_i16 :: proc(b: []byte, order: Byte_Order) -> (i16, bool) {
get_i16 :: proc "contextless" (b: []byte, order: Byte_Order) -> (i16, bool) {
v, ok := get_u16(b, order)
return i16(v), ok
}
get_i32 :: proc(b: []byte, order: Byte_Order) -> (i32, bool) {
get_i32 :: proc "contextless" (b: []byte, order: Byte_Order) -> (i32, bool) {
v, ok := get_u32(b, order)
return i32(v), ok
}
get_i64 :: proc(b: []byte, order: Byte_Order) -> (i64, bool) {
get_i64 :: proc "contextless" (b: []byte, order: Byte_Order) -> (i64, bool) {
v, ok := get_u64(b, order)
return i64(v), ok
}
get_f16 :: proc(b: []byte, order: Byte_Order) -> (f16, bool) {
get_f16 :: proc "contextless" (b: []byte, order: Byte_Order) -> (f16, bool) {
v, ok := get_u16(b, order)
return transmute(f16)v, ok
}
get_f32 :: proc(b: []byte, order: Byte_Order) -> (f32, bool) {
get_f32 :: proc "contextless" (b: []byte, order: Byte_Order) -> (f32, bool) {
v, ok := get_u32(b, order)
return transmute(f32)v, ok
}
get_f64 :: proc(b: []byte, order: Byte_Order) -> (f64, bool) {
get_f64 :: proc "contextless" (b: []byte, order: Byte_Order) -> (f64, bool) {
v, ok := get_u64(b, order)
return transmute(f64)v, ok
}
unchecked_put_u16le :: #force_inline proc "contextless" (b: []byte, v: u16) {
intrinsics.unaligned_store((^u16)(raw_data(b)), bits.to_le_u16(v))
}
unchecked_put_u32le :: #force_inline proc "contextless" (b: []byte, v: u32) {
intrinsics.unaligned_store((^u32)(raw_data(b)), bits.to_le_u32(v))
}
unchecked_put_u64le :: #force_inline proc "contextless" (b: []byte, v: u64) {
intrinsics.unaligned_store((^u64)(raw_data(b)), bits.to_le_u64(v))
}
unchecked_put_u16be :: #force_inline proc "contextless" (b: []byte, v: u16) {
intrinsics.unaligned_store((^u16)(raw_data(b)), bits.to_be_u16(v))
}
unchecked_put_u32be :: #force_inline proc "contextless" (b: []byte, v: u32) {
intrinsics.unaligned_store((^u32)(raw_data(b)), bits.to_be_u32(v))
}
unchecked_put_u64be :: #force_inline proc "contextless" (b: []byte, v: u64) {
intrinsics.unaligned_store((^u64)(raw_data(b)), bits.to_be_u64(v))
}
put_u16 :: proc(b: []byte, order: Byte_Order, v: u16) -> bool {
put_u16 :: proc "contextless" (b: []byte, order: Byte_Order, v: u16) -> bool {
if len(b) < 2 {
return false
}
#no_bounds_check if order == .Little {
b[0] = byte(v)
b[1] = byte(v >> 8)
if order == .Little {
unchecked_put_u16le(b, v)
} else {
b[0] = byte(v >> 8)
b[1] = byte(v)
unchecked_put_u16be(b, v)
}
return true
}
put_u32 :: proc(b: []byte, order: Byte_Order, v: u32) -> bool {
put_u32 :: proc "contextless" (b: []byte, order: Byte_Order, v: u32) -> bool {
if len(b) < 4 {
return false
}
#no_bounds_check if order == .Little {
b[0] = byte(v)
b[1] = byte(v >> 8)
b[2] = byte(v >> 16)
b[3] = byte(v >> 24)
if order == .Little {
unchecked_put_u32le(b, v)
} else {
b[0] = byte(v >> 24)
b[1] = byte(v >> 16)
b[2] = byte(v >> 8)
b[3] = byte(v)
unchecked_put_u32be(b, v)
}
return true
}
put_u64 :: proc(b: []byte, order: Byte_Order, v: u64) -> bool {
put_u64 :: proc "contextless" (b: []byte, order: Byte_Order, v: u64) -> bool {
if len(b) < 8 {
return false
}
#no_bounds_check if order == .Little {
b[0] = byte(v >> 0)
b[1] = byte(v >> 8)
b[2] = byte(v >> 16)
b[3] = byte(v >> 24)
b[4] = byte(v >> 32)
b[5] = byte(v >> 40)
b[6] = byte(v >> 48)
b[7] = byte(v >> 56)
if order == .Little {
unchecked_put_u64le(b, v)
} else {
b[0] = byte(v >> 56)
b[1] = byte(v >> 48)
b[2] = byte(v >> 40)
b[3] = byte(v >> 32)
b[4] = byte(v >> 24)
b[5] = byte(v >> 16)
b[6] = byte(v >> 8)
b[7] = byte(v)
unchecked_put_u64be(b, v)
}
return true
}
put_i16 :: proc(b: []byte, order: Byte_Order, v: i16) -> bool {
put_i16 :: proc "contextless" (b: []byte, order: Byte_Order, v: i16) -> bool {
return put_u16(b, order, u16(v))
}
put_i32 :: proc(b: []byte, order: Byte_Order, v: i32) -> bool {
put_i32 :: proc "contextless" (b: []byte, order: Byte_Order, v: i32) -> bool {
return put_u32(b, order, u32(v))
}
put_i64 :: proc(b: []byte, order: Byte_Order, v: i64) -> bool {
put_i64 :: proc "contextless" (b: []byte, order: Byte_Order, v: i64) -> bool {
return put_u64(b, order, u64(v))
}
put_f16 :: proc(b: []byte, order: Byte_Order, v: f16) -> bool {
put_f16 :: proc "contextless" (b: []byte, order: Byte_Order, v: f16) -> bool {
return put_u16(b, order, transmute(u16)v)
}
put_f32 :: proc(b: []byte, order: Byte_Order, v: f32) -> bool {
put_f32 :: proc "contextless" (b: []byte, order: Byte_Order, v: f32) -> bool {
return put_u32(b, order, transmute(u32)v)
}
put_f64 :: proc(b: []byte, order: Byte_Order, v: f64) -> bool {
put_f64 :: proc "contextless" (b: []byte, order: Byte_Order, v: f64) -> bool {
return put_u64(b, order, transmute(u64)v)
}

View File

@@ -120,9 +120,9 @@ register_user_formatter :: proc(id: typeid, formatter: User_Formatter) -> Regist
//
// Returns: A formatted string.
//
aprint :: proc(args: ..any, sep := " ") -> string {
aprint :: proc(args: ..any, sep := " ", allocator := context.allocator) -> string {
str: strings.Builder
strings.builder_init(&str)
strings.builder_init(&str, allocator)
sbprint(&str, ..args, sep=sep)
return strings.to_string(str)
}
@@ -136,9 +136,9 @@ aprint :: proc(args: ..any, sep := " ") -> string {
//
// Returns: A formatted string with a newline character at the end.
//
aprintln :: proc(args: ..any, sep := " ") -> string {
aprintln :: proc(args: ..any, sep := " ", allocator := context.allocator) -> string {
str: strings.Builder
strings.builder_init(&str)
strings.builder_init(&str, allocator)
sbprintln(&str, ..args, sep=sep)
return strings.to_string(str)
}
@@ -1534,8 +1534,9 @@ stored_enum_value_to_string :: proc(enum_type: ^runtime.Type_Info, ev: runtime.T
// - fi: A pointer to the Info structure where the formatted bit set will be written.
// - v: The bit set value to be formatted.
// - name: An optional string for the name of the bit set (default is an empty string).
// - verb: An optional verb to adjust format.
//
fmt_bit_set :: proc(fi: ^Info, v: any, name: string = "") {
fmt_bit_set :: proc(fi: ^Info, v: any, name: string = "", verb: rune = 'v') {
is_bit_set_different_endian_to_platform :: proc(ti: ^runtime.Type_Info) -> bool {
if ti == nil {
return false
@@ -1559,7 +1560,7 @@ fmt_bit_set :: proc(fi: ^Info, v: any, name: string = "") {
case runtime.Type_Info_Named:
val := v
val.id = info.base.id
fmt_bit_set(fi, val, info.name)
fmt_bit_set(fi, val, info.name, verb)
case runtime.Type_Info_Bit_Set:
bits: u128
@@ -1567,26 +1568,52 @@ fmt_bit_set :: proc(fi: ^Info, v: any, name: string = "") {
do_byte_swap := is_bit_set_different_endian_to_platform(info.underlying)
as_arg := verb == 'b' || verb == 'o' || verb == 'd' || verb == 'i' || verb == 'z' || verb == 'x' || verb == 'X'
if as_arg && !fi.width_set {
fi.width_set = true
fi.width = int(bit_size)
}
switch bit_size {
case 0: bits = 0
case 8:
x := (^u8)(v.data)^
if as_arg {
fmt_arg(fi, x, verb)
return
}
bits = u128(x)
case 16:
x := (^u16)(v.data)^
if do_byte_swap { x = byte_swap(x) }
if as_arg {
fmt_arg(fi, x, verb)
return
}
bits = u128(x)
case 32:
x := (^u32)(v.data)^
if do_byte_swap { x = byte_swap(x) }
if as_arg {
fmt_arg(fi, x, verb)
return
}
bits = u128(x)
case 64:
x := (^u64)(v.data)^
if do_byte_swap { x = byte_swap(x) }
if as_arg {
fmt_arg(fi, x, verb)
return
}
bits = u128(x)
case 128:
x := (^u128)(v.data)^
if do_byte_swap { x = byte_swap(x) }
if as_arg {
fmt_arg(fi, x, verb)
return
}
bits = x
case: panic("unknown bit_size size")
}
@@ -1628,6 +1655,7 @@ fmt_bit_set :: proc(fi: ^Info, v: any, name: string = "") {
}
}
}
// Writes the specified number of indents to the provided Info structure
//
// Inputs:
@@ -2173,7 +2201,7 @@ fmt_named :: proc(fi: ^Info, v: any, verb: rune, info: runtime.Type_Info_Named)
case runtime.Type_Info_Struct:
fmt_struct(fi, v, verb, b, info.name)
case runtime.Type_Info_Bit_Set:
fmt_bit_set(fi, v)
fmt_bit_set(fi, v, verb = verb)
case:
fmt_value(fi, any{v.data, info.base.id}, verb)
}
@@ -2594,7 +2622,7 @@ fmt_value :: proc(fi: ^Info, v: any, verb: rune) {
reflect.write_typeid(fi.writer, id, &fi.n)
case runtime.Type_Info_Bit_Set:
fmt_bit_set(fi, v)
fmt_bit_set(fi, v, verb = verb)
case runtime.Type_Info_Relative_Pointer:
ptr := reflect.relative_pointer_to_absolute_raw(v.data, info.base_integer.id)

View File

@@ -162,7 +162,14 @@ type_is_matrix :: proc($T: typeid) -> bool ---
type_has_nil :: proc($T: typeid) -> bool ---
type_is_specialization_of :: proc($T, $S: typeid) -> bool ---
type_is_variant_of :: proc($U, $V: typeid) -> bool where type_is_union(U) ---
type_union_tag_type :: proc($T: typeid) -> typeid where type_is_union(T) ---
type_union_tag_offset :: proc($T: typeid) -> uintptr where type_is_union(T) ---
type_union_base_tag_value :: proc($T: typeid) -> int where type_is_union(U) ---
type_union_variant_count :: proc($T: typeid) -> int where type_is_union(T) ---
type_variant_type_of :: proc($T: typeid, $index: int) -> typeid where type_is_union(T) ---
type_variant_index_of :: proc($U, $V: typeid) -> int where type_is_union(U) ---
type_has_field :: proc($T: typeid, $name: string) -> bool ---
type_field_type :: proc($T: typeid, $name: string) -> typeid ---

View File

@@ -34,7 +34,7 @@ Error :: enum i32 {
// No_Progress is returned by some implementations of `io.Reader` when many calls
// to `read` have failed to return any data or error.
// This is usually a signed of a broken `io.Reader` implementation
// This is usually a sign of a broken `io.Reader` implementation
No_Progress,
Invalid_Whence,

View File

@@ -60,9 +60,9 @@ Logger_Proc :: runtime.Logger_Proc
/*
Logger :: struct {
procedure: Logger_Proc,
data: rawptr,
data: rawptr,
lowest_level: Level,
options: Logger_Options,
options: Logger_Options,
}
*/
Logger :: runtime.Logger

View File

@@ -2856,7 +2856,7 @@ internal_int_random :: proc(dest: ^Int, bits: int, r: ^rnd.Rand = nil, allocator
dest.digit[digits - 1] &= ((1 << uint(bits)) - 1)
}
dest.used = digits
return nil
return internal_clamp(dest)
}
internal_random :: proc { internal_int_random, }

View File

@@ -33,7 +33,9 @@ init_from_f64 :: proc(x: ^$T/Fixed($Backing, $Fraction_Width), val: f64) {
x.i = Backing(f * (1<<Fraction_Width))
x.i &= 1<<Fraction_Width - 1
x.i |= Backing(i) << Fraction_Width
if val < 0 do x.i *= -1
if val < 0 {
x.i *= -1
}
}
init_from_parts :: proc(x: ^$T/Fixed($Backing, $Fraction_Width), integer, fraction: Backing) {

View File

@@ -203,7 +203,58 @@ pow10_f64 :: proc "contextless" (n: f64) -> f64 {
return 0
}
@(require_results)
pow2_f64 :: proc "contextless" (#any_int exp: int) -> (res: f64) {
switch {
case exp >= -1022 && exp <= 1023: // Normal
return transmute(f64)(u64(exp + F64_BIAS) << F64_SHIFT)
case exp < -1075: // Underflow
return f64(0)
case exp == -1075: // Underflow.
// Note that pow(2, -1075) returns 0h1 on Windows and 0h0 on macOS & Linux.
return 0h00000000_00000000
case exp < -1022: // Denormal
x := u64(exp + (F64_SHIFT + 1) + F64_BIAS) << F64_SHIFT
return f64(1) / (1 << (F64_SHIFT + 1)) * transmute(f64)x
case exp > 1023: // Overflow, +Inf
return 0h7ff00000_00000000
}
unreachable()
}
@(require_results)
pow2_f32 :: proc "contextless" (#any_int exp: int) -> (res: f32) {
switch {
case exp >= -126 && exp <= 127: // Normal
return transmute(f32)(u32(exp + F32_BIAS) << F32_SHIFT)
case exp < -151: // Underflow
return f32(0)
case exp < -126: // Denormal
x := u32(exp + (F32_SHIFT + 1) + F32_BIAS) << F32_SHIFT
return f32(1) / (1 << (F32_SHIFT + 1)) * transmute(f32)x
case exp > 127: // Overflow, +Inf
return 0h7f80_0000
}
unreachable()
}
@(require_results)
pow2_f16 :: proc "contextless" (#any_int exp: int) -> (res: f16) {
switch {
case exp >= -14 && exp <= 15: // Normal
return transmute(f16)(u16(exp + F16_BIAS) << F16_SHIFT)
case exp < -25: // Underflow
return 0h0000
case exp == -25: // Underflow
return 0h0001
case exp < -14: // Denormal
x := u16(exp + (F16_SHIFT + 1) + F16_BIAS) << F16_SHIFT
return f16(1) / (1 << (F16_SHIFT + 1)) * transmute(f16)x
case exp > 15: // Overflow, +Inf
return 0h7c00
}
unreachable()
}
@(require_results)
ldexp_f64 :: proc "contextless" (val: f64, exp: int) -> f64 {
@@ -2261,17 +2312,17 @@ F32_NORMALIZE :: 0
F32_RADIX :: 2
F32_ROUNDS :: 1
F64_DIG :: 15 // # of decimal digits of precision
F64_EPSILON :: 2.2204460492503131e-016 // smallest such that 1.0+F64_EPSILON != 1.0
F64_MANT_DIG :: 53 // # of bits in mantissa
F64_MAX :: 1.7976931348623158e+308 // max value
F64_MAX_10_EXP :: 308 // max decimal exponent
F64_MAX_EXP :: 1024 // max binary exponent
F64_MIN :: 2.2250738585072014e-308 // min positive value
F64_MIN_10_EXP :: -307 // min decimal exponent
F64_MIN_EXP :: -1021 // min binary exponent
F64_RADIX :: 2 // exponent radix
F64_ROUNDS :: 1 // addition rounding: near
F64_DIG :: 15 // Number of representable decimal digits.
F64_EPSILON :: 2.2204460492503131e-016 // Smallest number such that `1.0 + F64_EPSILON != 1.0`.
F64_MANT_DIG :: 53 // Number of bits in the mantissa.
F64_MAX :: 1.7976931348623158e+308 // Maximum representable value.
F64_MAX_10_EXP :: 308 // Maximum base-10 exponent yielding normalized value.
F64_MAX_EXP :: 1024 // One greater than the maximum possible base-2 exponent yielding normalized value.
F64_MIN :: 2.2250738585072014e-308 // Minimum positive normalized value.
F64_MIN_10_EXP :: -307 // Minimum base-10 exponent yielding normalized value.
F64_MIN_EXP :: -1021 // One greater than the minimum possible base-2 exponent yielding normalized value.
F64_RADIX :: 2 // Exponent radix.
F64_ROUNDS :: 1 // Addition rounding: near.
F16_MASK :: 0x1f
@@ -2302,4 +2353,4 @@ INF_F64 :: f64(0h7FF0_0000_0000_0000)
NEG_INF_F64 :: f64(0hFFF0_0000_0000_0000)
SNAN_F64 :: f64(0h7FF0_0000_0000_0001)
QNAN_F64 :: f64(0h7FF8_0000_0000_0001)
QNAN_F64 :: f64(0h7FF8_0000_0000_0001)

View File

@@ -749,7 +749,9 @@ dynamic_pool_alloc_bytes :: proc(p: ^Dynamic_Pool, bytes: int) -> ([]byte, Alloc
n := bytes
extra := p.alignment - (n % p.alignment)
n += extra
if n > p.block_size do return nil, .Invalid_Argument
if n > p.block_size {
return nil, .Invalid_Argument
}
if n >= p.out_band_size {
assert(p.block_allocator.procedure != nil)
memory, err := p.block_allocator.procedure(p.block_allocator.data, Allocator_Mode.Alloc,

View File

@@ -462,7 +462,9 @@ split_port :: proc(endpoint_str: string) -> (addr_or_host: string, port: int, ok
// Joins an address or hostname with a port.
join_port :: proc(address_or_host: string, port: int, allocator := context.allocator) -> string {
addr_or_host, _, ok := split_port(address_or_host)
if !ok do return addr_or_host
if !ok {
return addr_or_host
}
b := strings.builder_make(allocator)

View File

@@ -148,7 +148,29 @@ recv_udp :: proc(socket: UDP_Socket, buf: []byte) -> (bytes_read: int, remote_en
return _recv_udp(socket, buf)
}
recv :: proc{recv_tcp, recv_udp}
/*
Receive data from into a buffer from any socket.
Note: `remote_endpoint` parameter is non-nil only if the socket type is UDP. On TCP sockets it
will always return `nil`.
*/
recv_any :: proc(socket: Any_Socket, buf: []byte) -> (
bytes_read: int,
remote_endpoint: Maybe(Endpoint),
err: Network_Error,
) {
switch socktype in socket {
case TCP_Socket:
bytes_read, err := recv_tcp(socktype, buf)
return bytes_read, nil, err
case UDP_Socket:
bytes_read, endpoint, err := recv_udp(socktype, buf)
return bytes_read, endpoint, err
case: panic("Not supported")
}
}
recv :: proc{recv_tcp, recv_udp, recv_any}
/*
Repeatedly sends data until the entire buffer is sent.
@@ -168,7 +190,20 @@ send_udp :: proc(socket: UDP_Socket, buf: []byte, to: Endpoint) -> (bytes_writte
return _send_udp(socket, buf, to)
}
send :: proc{send_tcp, send_udp}
send_any :: proc(socket: Any_Socket, buf: []byte, to: Maybe(Endpoint) = nil) -> (
bytes_written: int,
err: Network_Error,
) {
switch socktype in socket {
case TCP_Socket:
return send_tcp(socktype, buf)
case UDP_Socket:
return send_udp(socktype, buf, to.(Endpoint))
case: panic("Not supported")
}
}
send :: proc{send_tcp, send_udp, send_any}
shutdown :: proc(socket: Any_Socket, manner: Shutdown_Manner) -> (err: Network_Error) {
return _shutdown(socket, manner)
@@ -180,4 +215,4 @@ set_option :: proc(socket: Any_Socket, option: Socket_Option, value: any, loc :=
set_blocking :: proc(socket: Any_Socket, should_block: bool) -> (err: Network_Error) {
return _set_blocking(socket, should_block)
}
}

View File

@@ -125,7 +125,7 @@ _create_socket :: proc(family: Address_Family, protocol: Socket_Protocol) -> (An
}
@(private)
_dial_tcp_from_endpoint :: proc(endpoint: Endpoint, options := default_tcp_options) -> (tcp_sock: TCP_Socket, err: Network_Error) {
_dial_tcp_from_endpoint :: proc(endpoint: Endpoint, options := default_tcp_options) -> (TCP_Socket, Network_Error) {
errno: linux.Errno
if endpoint.port == 0 {
return 0, .Port_Required
@@ -143,7 +143,7 @@ _dial_tcp_from_endpoint :: proc(endpoint: Endpoint, options := default_tcp_optio
reuse_addr: b32 = true
_ = linux.setsockopt(os_sock, linux.SOL_SOCKET, linux.Socket_Option.REUSEADDR, &reuse_addr)
addr := _unwrap_os_addr(endpoint)
errno = linux.connect(linux.Fd(tcp_sock), &addr)
errno = linux.connect(linux.Fd(os_sock), &addr)
if errno != .NONE {
return cast(TCP_Socket) os_sock, Dial_Error(errno)
}
@@ -333,7 +333,9 @@ _set_option :: proc(sock: Any_Socket, option: Socket_Option, value: any, loc :=
.Send_Timeout,
.Receive_Timeout:
t, ok := value.(time.Duration)
if !ok do panic("set_option() value must be a time.Duration here", loc)
if !ok {
panic("set_option() value must be a time.Duration here", loc)
}
micros := cast(i64) (time.duration_microseconds(t))
timeval_value.microseconds = cast(int) (micros % 1e6)

View File

@@ -24,7 +24,7 @@ import "core:encoding/hex"
split_url :: proc(url: string, allocator := context.allocator) -> (scheme, host, path: string, queries: map[string]string) {
s := url
i := strings.last_index(s, "://")
i := strings.index(s, "://")
if i >= 0 {
scheme = s[:i]
s = s[i+3:]
@@ -123,7 +123,9 @@ percent_encode :: proc(s: string, allocator := context.allocator) -> string {
percent_decode :: proc(encoded_string: string, allocator := context.allocator) -> (decoded_string: string, ok: bool) {
b := strings.builder_make(allocator)
strings.builder_grow(&b, len(encoded_string))
defer if !ok do strings.builder_destroy(&b)
defer if !ok {
strings.builder_destroy(&b)
}
s := encoded_string
@@ -137,7 +139,9 @@ percent_decode :: proc(encoded_string: string, allocator := context.allocator) -
strings.write_string(&b, s[:i])
s = s[i:]
if len(s) == 0 do return // percent without anything after it
if len(s) == 0 {
return // percent without anything after it
}
s = s[1:]
if s[0] == '%' {
@@ -177,7 +181,9 @@ base64url_encode :: proc(data: []byte, allocator := context.allocator) -> string
}
i := len(out)-1;
for ; i >= 0; i -= 1 {
if out[i] != '=' do break;
if out[i] != '=' {
break;
}
}
return string(out[:i+1]);
}

View File

@@ -7,7 +7,7 @@ import "core:reflect"
import "core:odin/tokenizer"
_ :: intrinsics
new :: proc($T: typeid, pos, end: tokenizer.Pos) -> ^T {
new_from_positions :: proc($T: typeid, pos, end: tokenizer.Pos) -> ^T {
n, _ := mem.new(T)
n.pos = pos
n.end = end
@@ -23,6 +23,15 @@ new :: proc($T: typeid, pos, end: tokenizer.Pos) -> ^T {
return n
}
new_from_pos_and_end_node :: proc($T: typeid, pos: tokenizer.Pos, end: ^Node) -> ^T {
return new(T, pos, end != nil ? end.end : pos)
}
new :: proc {
new_from_positions,
new_from_pos_and_end_node,
}
clone :: proc{
clone_node,
clone_expr,
@@ -107,226 +116,228 @@ clone_node :: proc(node: ^Node) -> ^Node {
reflect.set_union_value(ds, res_ptr_any)
}
if res.derived != nil do switch r in res.derived {
case ^Package, ^File:
case ^Bad_Expr:
case ^Ident:
case ^Implicit:
case ^Undef:
case ^Basic_Lit:
case ^Basic_Directive:
case ^Comment_Group:
if res.derived != nil {
switch r in res.derived {
case ^Package, ^File:
case ^Bad_Expr:
case ^Ident:
case ^Implicit:
case ^Undef:
case ^Basic_Lit:
case ^Basic_Directive:
case ^Comment_Group:
case ^Ellipsis:
r.expr = clone(r.expr)
case ^Proc_Lit:
r.type = auto_cast clone(r.type)
r.body = clone(r.body)
case ^Comp_Lit:
r.type = clone(r.type)
r.elems = clone(r.elems)
case ^Ellipsis:
r.expr = clone(r.expr)
case ^Proc_Lit:
r.type = auto_cast clone(r.type)
r.body = clone(r.body)
case ^Comp_Lit:
r.type = clone(r.type)
r.elems = clone(r.elems)
case ^Tag_Expr:
r.expr = clone(r.expr)
case ^Unary_Expr:
r.expr = clone(r.expr)
case ^Binary_Expr:
r.left = clone(r.left)
r.right = clone(r.right)
case ^Paren_Expr:
r.expr = clone(r.expr)
case ^Selector_Expr:
r.expr = clone(r.expr)
r.field = auto_cast clone(r.field)
case ^Implicit_Selector_Expr:
r.field = auto_cast clone(r.field)
case ^Selector_Call_Expr:
r.expr = clone(r.expr)
r.call = auto_cast clone(r.call)
case ^Index_Expr:
r.expr = clone(r.expr)
r.index = clone(r.index)
case ^Matrix_Index_Expr:
r.expr = clone(r.expr)
r.row_index = clone(r.row_index)
r.column_index = clone(r.column_index)
case ^Deref_Expr:
r.expr = clone(r.expr)
case ^Slice_Expr:
r.expr = clone(r.expr)
r.low = clone(r.low)
r.high = clone(r.high)
case ^Call_Expr:
r.expr = clone(r.expr)
r.args = clone(r.args)
case ^Field_Value:
r.field = clone(r.field)
r.value = clone(r.value)
case ^Ternary_If_Expr:
r.x = clone(r.x)
r.cond = clone(r.cond)
r.y = clone(r.y)
case ^Ternary_When_Expr:
r.x = clone(r.x)
r.cond = clone(r.cond)
r.y = clone(r.y)
case ^Or_Else_Expr:
r.x = clone(r.x)
r.y = clone(r.y)
case ^Or_Return_Expr:
r.expr = clone(r.expr)
case ^Or_Branch_Expr:
r.expr = clone(r.expr)
r.label = clone(r.label)
case ^Type_Assertion:
r.expr = clone(r.expr)
r.type = clone(r.type)
case ^Type_Cast:
r.type = clone(r.type)
r.expr = clone(r.expr)
case ^Auto_Cast:
r.expr = clone(r.expr)
case ^Inline_Asm_Expr:
r.param_types = clone(r.param_types)
r.return_type = clone(r.return_type)
r.constraints_string = clone(r.constraints_string)
r.asm_string = clone(r.asm_string)
case ^Tag_Expr:
r.expr = clone(r.expr)
case ^Unary_Expr:
r.expr = clone(r.expr)
case ^Binary_Expr:
r.left = clone(r.left)
r.right = clone(r.right)
case ^Paren_Expr:
r.expr = clone(r.expr)
case ^Selector_Expr:
r.expr = clone(r.expr)
r.field = auto_cast clone(r.field)
case ^Implicit_Selector_Expr:
r.field = auto_cast clone(r.field)
case ^Selector_Call_Expr:
r.expr = clone(r.expr)
r.call = auto_cast clone(r.call)
case ^Index_Expr:
r.expr = clone(r.expr)
r.index = clone(r.index)
case ^Matrix_Index_Expr:
r.expr = clone(r.expr)
r.row_index = clone(r.row_index)
r.column_index = clone(r.column_index)
case ^Deref_Expr:
r.expr = clone(r.expr)
case ^Slice_Expr:
r.expr = clone(r.expr)
r.low = clone(r.low)
r.high = clone(r.high)
case ^Call_Expr:
r.expr = clone(r.expr)
r.args = clone(r.args)
case ^Field_Value:
r.field = clone(r.field)
r.value = clone(r.value)
case ^Ternary_If_Expr:
r.x = clone(r.x)
r.cond = clone(r.cond)
r.y = clone(r.y)
case ^Ternary_When_Expr:
r.x = clone(r.x)
r.cond = clone(r.cond)
r.y = clone(r.y)
case ^Or_Else_Expr:
r.x = clone(r.x)
r.y = clone(r.y)
case ^Or_Return_Expr:
r.expr = clone(r.expr)
case ^Or_Branch_Expr:
r.expr = clone(r.expr)
r.label = clone(r.label)
case ^Type_Assertion:
r.expr = clone(r.expr)
r.type = clone(r.type)
case ^Type_Cast:
r.type = clone(r.type)
r.expr = clone(r.expr)
case ^Auto_Cast:
r.expr = clone(r.expr)
case ^Inline_Asm_Expr:
r.param_types = clone(r.param_types)
r.return_type = clone(r.return_type)
r.constraints_string = clone(r.constraints_string)
r.asm_string = clone(r.asm_string)
case ^Bad_Stmt:
// empty
case ^Empty_Stmt:
// empty
case ^Expr_Stmt:
r.expr = clone(r.expr)
case ^Tag_Stmt:
r.stmt = clone(r.stmt)
case ^Bad_Stmt:
// empty
case ^Empty_Stmt:
// empty
case ^Expr_Stmt:
r.expr = clone(r.expr)
case ^Tag_Stmt:
r.stmt = clone(r.stmt)
case ^Assign_Stmt:
r.lhs = clone(r.lhs)
r.rhs = clone(r.rhs)
case ^Block_Stmt:
r.label = clone(r.label)
r.stmts = clone(r.stmts)
case ^If_Stmt:
r.label = clone(r.label)
r.init = clone(r.init)
r.cond = clone(r.cond)
r.body = clone(r.body)
r.else_stmt = clone(r.else_stmt)
case ^When_Stmt:
r.cond = clone(r.cond)
r.body = clone(r.body)
r.else_stmt = clone(r.else_stmt)
case ^Return_Stmt:
r.results = clone(r.results)
case ^Defer_Stmt:
r.stmt = clone(r.stmt)
case ^For_Stmt:
r.label = clone(r.label)
r.init = clone(r.init)
r.cond = clone(r.cond)
r.post = clone(r.post)
r.body = clone(r.body)
case ^Range_Stmt:
r.label = clone(r.label)
r.vals = clone(r.vals)
r.expr = clone(r.expr)
r.body = clone(r.body)
case ^Inline_Range_Stmt:
r.label = clone(r.label)
r.val0 = clone(r.val0)
r.val1 = clone(r.val1)
r.expr = clone(r.expr)
r.body = clone(r.body)
case ^Case_Clause:
r.list = clone(r.list)
r.body = clone(r.body)
case ^Switch_Stmt:
r.label = clone(r.label)
r.init = clone(r.init)
r.cond = clone(r.cond)
r.body = clone(r.body)
case ^Type_Switch_Stmt:
r.label = clone(r.label)
r.tag = clone(r.tag)
r.expr = clone(r.expr)
r.body = clone(r.body)
case ^Branch_Stmt:
r.label = auto_cast clone(r.label)
case ^Using_Stmt:
r.list = clone(r.list)
case ^Bad_Decl:
case ^Value_Decl:
r.attributes = clone(r.attributes)
r.names = clone(r.names)
r.type = clone(r.type)
r.values = clone(r.values)
case ^Package_Decl:
case ^Import_Decl:
case ^Foreign_Block_Decl:
r.attributes = clone(r.attributes)
r.foreign_library = clone(r.foreign_library)
r.body = clone(r.body)
case ^Foreign_Import_Decl:
r.name = auto_cast clone(r.name)
case ^Proc_Group:
r.args = clone(r.args)
case ^Attribute:
r.elems = clone(r.elems)
case ^Field:
r.names = clone(r.names)
r.type = clone(r.type)
r.default_value = clone(r.default_value)
case ^Field_List:
r.list = clone(r.list)
case ^Typeid_Type:
r.specialization = clone(r.specialization)
case ^Helper_Type:
r.type = clone(r.type)
case ^Distinct_Type:
r.type = clone(r.type)
case ^Poly_Type:
r.type = auto_cast clone(r.type)
r.specialization = clone(r.specialization)
case ^Proc_Type:
r.params = auto_cast clone(r.params)
r.results = auto_cast clone(r.results)
case ^Pointer_Type:
r.elem = clone(r.elem)
r.tag = clone(r.tag)
case ^Multi_Pointer_Type:
r.elem = clone(r.elem)
case ^Array_Type:
r.len = clone(r.len)
r.elem = clone(r.elem)
case ^Dynamic_Array_Type:
r.elem = clone(r.elem)
case ^Struct_Type:
r.poly_params = auto_cast clone(r.poly_params)
r.align = clone(r.align)
r.fields = auto_cast clone(r.fields)
case ^Union_Type:
r.poly_params = auto_cast clone(r.poly_params)
r.align = clone(r.align)
r.variants = clone(r.variants)
case ^Enum_Type:
r.base_type = clone(r.base_type)
r.fields = clone(r.fields)
case ^Bit_Set_Type:
r.elem = clone(r.elem)
r.underlying = clone(r.underlying)
case ^Map_Type:
r.key = clone(r.key)
r.value = clone(r.value)
case ^Matrix_Type:
r.row_count = clone(r.row_count)
r.column_count = clone(r.column_count)
r.elem = clone(r.elem)
case ^Relative_Type:
r.tag = clone(r.tag)
r.type = clone(r.type)
case:
fmt.panicf("Unhandled node kind: %v", r)
case ^Assign_Stmt:
r.lhs = clone(r.lhs)
r.rhs = clone(r.rhs)
case ^Block_Stmt:
r.label = clone(r.label)
r.stmts = clone(r.stmts)
case ^If_Stmt:
r.label = clone(r.label)
r.init = clone(r.init)
r.cond = clone(r.cond)
r.body = clone(r.body)
r.else_stmt = clone(r.else_stmt)
case ^When_Stmt:
r.cond = clone(r.cond)
r.body = clone(r.body)
r.else_stmt = clone(r.else_stmt)
case ^Return_Stmt:
r.results = clone(r.results)
case ^Defer_Stmt:
r.stmt = clone(r.stmt)
case ^For_Stmt:
r.label = clone(r.label)
r.init = clone(r.init)
r.cond = clone(r.cond)
r.post = clone(r.post)
r.body = clone(r.body)
case ^Range_Stmt:
r.label = clone(r.label)
r.vals = clone(r.vals)
r.expr = clone(r.expr)
r.body = clone(r.body)
case ^Inline_Range_Stmt:
r.label = clone(r.label)
r.val0 = clone(r.val0)
r.val1 = clone(r.val1)
r.expr = clone(r.expr)
r.body = clone(r.body)
case ^Case_Clause:
r.list = clone(r.list)
r.body = clone(r.body)
case ^Switch_Stmt:
r.label = clone(r.label)
r.init = clone(r.init)
r.cond = clone(r.cond)
r.body = clone(r.body)
case ^Type_Switch_Stmt:
r.label = clone(r.label)
r.tag = clone(r.tag)
r.expr = clone(r.expr)
r.body = clone(r.body)
case ^Branch_Stmt:
r.label = auto_cast clone(r.label)
case ^Using_Stmt:
r.list = clone(r.list)
case ^Bad_Decl:
case ^Value_Decl:
r.attributes = clone(r.attributes)
r.names = clone(r.names)
r.type = clone(r.type)
r.values = clone(r.values)
case ^Package_Decl:
case ^Import_Decl:
case ^Foreign_Block_Decl:
r.attributes = clone(r.attributes)
r.foreign_library = clone(r.foreign_library)
r.body = clone(r.body)
case ^Foreign_Import_Decl:
r.name = auto_cast clone(r.name)
case ^Proc_Group:
r.args = clone(r.args)
case ^Attribute:
r.elems = clone(r.elems)
case ^Field:
r.names = clone(r.names)
r.type = clone(r.type)
r.default_value = clone(r.default_value)
case ^Field_List:
r.list = clone(r.list)
case ^Typeid_Type:
r.specialization = clone(r.specialization)
case ^Helper_Type:
r.type = clone(r.type)
case ^Distinct_Type:
r.type = clone(r.type)
case ^Poly_Type:
r.type = auto_cast clone(r.type)
r.specialization = clone(r.specialization)
case ^Proc_Type:
r.params = auto_cast clone(r.params)
r.results = auto_cast clone(r.results)
case ^Pointer_Type:
r.elem = clone(r.elem)
r.tag = clone(r.tag)
case ^Multi_Pointer_Type:
r.elem = clone(r.elem)
case ^Array_Type:
r.len = clone(r.len)
r.elem = clone(r.elem)
case ^Dynamic_Array_Type:
r.elem = clone(r.elem)
case ^Struct_Type:
r.poly_params = auto_cast clone(r.poly_params)
r.align = clone(r.align)
r.fields = auto_cast clone(r.fields)
case ^Union_Type:
r.poly_params = auto_cast clone(r.poly_params)
r.align = clone(r.align)
r.variants = clone(r.variants)
case ^Enum_Type:
r.base_type = clone(r.base_type)
r.fields = clone(r.fields)
case ^Bit_Set_Type:
r.elem = clone(r.elem)
r.underlying = clone(r.underlying)
case ^Map_Type:
r.key = clone(r.key)
r.value = clone(r.value)
case ^Matrix_Type:
r.row_count = clone(r.row_count)
r.column_count = clone(r.column_count)
r.elem = clone(r.elem)
case ^Relative_Type:
r.tag = clone(r.tag)
r.type = clone(r.type)
case:
fmt.panicf("Unhandled node kind: %v", r)
}
}
return res

View File

@@ -786,8 +786,11 @@ parse_if_stmt :: proc(p: ^Parser) -> ^ast.If_Stmt {
else_stmt = ast.new(ast.Bad_Stmt, p.curr_tok.pos, end_pos(p.curr_tok))
}
}
end := body.end
end: tokenizer.Pos
if body != nil {
end = body.end
}
if else_stmt != nil {
end = else_stmt.end
}
@@ -850,7 +853,7 @@ parse_for_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
body = parse_body(p)
}
range_stmt := ast.new(ast.Range_Stmt, tok.pos, body.end)
range_stmt := ast.new(ast.Range_Stmt, tok.pos, body)
range_stmt.for_pos = tok.pos
range_stmt.in_pos = in_tok.pos
range_stmt.expr = rhs
@@ -910,7 +913,7 @@ parse_for_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
rhs = assign_stmt.rhs[0]
}
range_stmt := ast.new(ast.Range_Stmt, tok.pos, body.end)
range_stmt := ast.new(ast.Range_Stmt, tok.pos, body)
range_stmt.for_pos = tok.pos
range_stmt.vals = vals
range_stmt.in_pos = assign_stmt.op.pos
@@ -920,7 +923,7 @@ parse_for_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
}
cond_expr := convert_stmt_to_expr(p, cond, "boolean expression")
for_stmt := ast.new(ast.For_Stmt, tok.pos, body.end)
for_stmt := ast.new(ast.For_Stmt, tok.pos, body)
for_stmt.for_pos = tok.pos
for_stmt.init = init
for_stmt.cond = cond_expr
@@ -976,7 +979,7 @@ parse_switch_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
lhs[0] = new_blank_ident(p, tok.pos)
rhs[0] = parse_expr(p, true)
as := ast.new(ast.Assign_Stmt, tok.pos, rhs[0].end)
as := ast.new(ast.Assign_Stmt, tok.pos, rhs[0])
as.lhs = lhs
as.op = in_tok
as.rhs = rhs
@@ -1010,14 +1013,14 @@ parse_switch_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
body.stmts = clauses[:]
if is_type_switch {
ts := ast.new(ast.Type_Switch_Stmt, tok.pos, body.end)
ts := ast.new(ast.Type_Switch_Stmt, tok.pos, body)
ts.tag = tag
ts.body = body
ts.switch_pos = tok.pos
return ts
} else {
cond := convert_stmt_to_expr(p, tag, "switch expression")
ts := ast.new(ast.Switch_Stmt, tok.pos, body.end)
ts := ast.new(ast.Switch_Stmt, tok.pos, body)
ts.init = init
ts.cond = cond
ts.body = body
@@ -1044,7 +1047,7 @@ parse_attribute :: proc(p: ^Parser, tok: tokenizer.Token, open_kind, close_kind:
if p.curr_tok.kind == .Eq {
eq := expect_token(p, .Eq)
value := parse_value(p)
fv := ast.new(ast.Field_Value, elem.pos, value.end)
fv := ast.new(ast.Field_Value, elem.pos, value)
fv.field = elem
fv.sep = eq.pos
fv.value = value
@@ -1137,7 +1140,7 @@ parse_foreign_block :: proc(p: ^Parser, tok: tokenizer.Token) -> ^ast.Foreign_Bl
body.stmts = decls[:]
body.close = close.pos
decl := ast.new(ast.Foreign_Block_Decl, tok.pos, body.end)
decl := ast.new(ast.Foreign_Block_Decl, tok.pos, body)
decl.docs = docs
decl.tok = tok
decl.foreign_library = foreign_library
@@ -1248,7 +1251,7 @@ parse_unrolled_for_loop :: proc(p: ^Parser, inline_tok: tokenizer.Token) -> ^ast
return ast.new(ast.Bad_Stmt, inline_tok.pos, end_pos(p.prev_tok))
}
range_stmt := ast.new(ast.Inline_Range_Stmt, inline_tok.pos, body.end)
range_stmt := ast.new(ast.Inline_Range_Stmt, inline_tok.pos, body)
range_stmt.inline_pos = inline_tok.pos
range_stmt.for_pos = for_tok.pos
range_stmt.val0 = val0
@@ -1304,7 +1307,7 @@ parse_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
case ^ast.Return_Stmt:
error(p, s.pos, "you cannot defer a return statement")
}
ds := ast.new(ast.Defer_Stmt, tok.pos, stmt.end)
ds := ast.new(ast.Defer_Stmt, tok.pos, stmt)
ds.stmt = stmt
return ds
@@ -1341,8 +1344,7 @@ parse_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
if tok.kind != .Fallthrough && p.curr_tok.kind == .Ident {
label = parse_ident(p)
}
end := label.end if label != nil else end_pos(tok)
s := ast.new(ast.Branch_Stmt, tok.pos, end)
s := ast.new(ast.Branch_Stmt, tok.pos, label)
s.tok = tok
s.label = label
expect_semicolon(p, s)
@@ -1366,7 +1368,7 @@ parse_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
if p.curr_tok.kind != .Colon {
end := list[len(list)-1]
expect_semicolon(p, end)
us := ast.new(ast.Using_Stmt, tok.pos, end.end)
us := ast.new(ast.Using_Stmt, tok.pos, end)
us.list = list
return us
}
@@ -1416,13 +1418,13 @@ parse_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
bd.tok = tok
bd.name = name
ce := parse_call_expr(p, bd)
es := ast.new(ast.Expr_Stmt, ce.pos, ce.end)
es := ast.new(ast.Expr_Stmt, ce.pos, ce)
es.expr = ce
return es
case "force_inline", "force_no_inline":
expr := parse_inlining_operand(p, true, tag)
es := ast.new(ast.Expr_Stmt, expr.pos, expr.end)
es := ast.new(ast.Expr_Stmt, expr.pos, expr)
es.expr = expr
return es
case "unroll":
@@ -1444,7 +1446,8 @@ parse_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
return ast.new(ast.Bad_Stmt, tok.pos, end_pos(tag))
case:
stmt := parse_stmt(p)
te := ast.new(ast.Tag_Stmt, tok.pos, stmt.pos)
end := stmt.pos if stmt != nil else end_pos(tok)
te := ast.new(ast.Tag_Stmt, tok.pos, end)
te.op = tok
te.name = name
te.stmt = stmt
@@ -1572,7 +1575,7 @@ convert_stmt_to_body :: proc(p: ^Parser, stmt: ^ast.Stmt) -> ^ast.Stmt {
error(p, stmt.pos, "expected a non-empty statement")
}
bs := ast.new(ast.Block_Stmt, stmt.pos, stmt.end)
bs := ast.new(ast.Block_Stmt, stmt.pos, stmt)
bs.open = stmt.pos
bs.stmts = make([]^ast.Stmt, 1)
bs.stmts[0] = stmt
@@ -1741,7 +1744,7 @@ parse_var_type :: proc(p: ^Parser, flags: ast.Field_Flags) -> ^ast.Expr {
error(p, tok.pos, "variadic field missing type after '..'")
type = ast.new(ast.Bad_Expr, tok.pos, end_pos(tok))
}
e := ast.new(ast.Ellipsis, type.pos, type.end)
e := ast.new(ast.Ellipsis, type.pos, type)
e.expr = type
return e
}
@@ -1808,7 +1811,7 @@ parse_ident_list :: proc(p: ^Parser, allow_poly_names: bool) -> []^ast.Expr {
if is_blank_ident(ident) {
error(p, ident.pos, "invalid polymorphic type definition with a blank identifier")
}
poly_name := ast.new(ast.Poly_Type, tok.pos, ident.end)
poly_name := ast.new(ast.Poly_Type, tok.pos, ident)
poly_name.type = ident
append(&list, poly_name)
} else {
@@ -2154,7 +2157,7 @@ parse_inlining_operand :: proc(p: ^Parser, lhs: bool, tok: tokenizer.Token) -> ^
e.inlining = pi
case:
error(p, tok.pos, "'%s' must be followed by a procedure literal or call", tok.text)
return ast.new(ast.Bad_Expr, tok.pos, expr.end)
return ast.new(ast.Bad_Expr, tok.pos, expr)
}
return expr
}
@@ -2204,7 +2207,7 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
case .Distinct:
tok := advance_token(p)
type := parse_type(p)
dt := ast.new(ast.Distinct_Type, tok.pos, type.end)
dt := ast.new(ast.Distinct_Type, tok.pos, type)
dt.tok = tok.kind
dt.type = type
return dt
@@ -2215,7 +2218,7 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
switch name.text {
case "type":
type := parse_type(p)
hp := ast.new(ast.Helper_Type, tok.pos, type.end)
hp := ast.new(ast.Helper_Type, tok.pos, type)
hp.tok = tok.kind
hp.type = type
return hp
@@ -2319,7 +2322,7 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
tag_call := parse_call_expr(p, tag)
type := parse_type(p)
rt := ast.new(ast.Relative_Type, tok.pos, type.end)
rt := ast.new(ast.Relative_Type, tok.pos, type)
rt.tag = tag_call
rt.type = type
return rt
@@ -2328,7 +2331,8 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
return parse_inlining_operand(p, lhs, name)
case:
expr := parse_expr(p, lhs)
te := ast.new(ast.Tag_Expr, tok.pos, expr.pos)
end := expr.pos if expr != nil else end_pos(tok)
te := ast.new(ast.Tag_Expr, tok.pos, end)
te.op = tok
te.name = name.text
te.expr = expr
@@ -2456,7 +2460,7 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
case .Pointer:
tok := expect_token(p, .Pointer)
elem := parse_type(p)
ptr := ast.new(ast.Pointer_Type, tok.pos, elem.end)
ptr := ast.new(ast.Pointer_Type, tok.pos, elem)
ptr.pointer = tok.pos
ptr.elem = elem
return ptr
@@ -2470,7 +2474,7 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
tok := expect_token(p, .Pointer)
close := expect_token(p, .Close_Bracket)
elem := parse_type(p)
t := ast.new(ast.Multi_Pointer_Type, open.pos, elem.end)
t := ast.new(ast.Multi_Pointer_Type, open.pos, elem)
t.open = open.pos
t.pointer = tok.pos
t.close = close.pos
@@ -2480,7 +2484,7 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
tok := expect_token(p, .Dynamic)
close := expect_token(p, .Close_Bracket)
elem := parse_type(p)
da := ast.new(ast.Dynamic_Array_Type, open.pos, elem.end)
da := ast.new(ast.Dynamic_Array_Type, open.pos, elem)
da.open = open.pos
da.dynamic_pos = tok.pos
da.close = close.pos
@@ -2500,7 +2504,7 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
}
close := expect_token(p, .Close_Bracket)
elem := parse_type(p)
at := ast.new(ast.Array_Type, open.pos, elem.end)
at := ast.new(ast.Array_Type, open.pos, elem)
at.open = open.pos
at.len = count
at.close = close.pos
@@ -2514,7 +2518,7 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
expect_token(p, .Close_Bracket)
value := parse_type(p)
mt := ast.new(ast.Map_Type, tok.pos, value.end)
mt := ast.new(ast.Map_Type, tok.pos, value)
mt.tok_pos = tok.pos
mt.key = key
mt.value = value
@@ -2755,7 +2759,7 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
expect_token(p, .Close_Bracket)
elem := parse_type(p)
mt := ast.new(ast.Matrix_Type, tok.pos, elem.end)
mt := ast.new(ast.Matrix_Type, tok.pos, elem)
mt.tok_pos = tok.pos
mt.row_count = row_count
mt.column_count = column_count
@@ -2893,7 +2897,7 @@ parse_elem_list :: proc(p: ^Parser) -> []^ast.Expr {
eq := expect_token(p, .Eq)
value := parse_value(p)
fv := ast.new(ast.Field_Value, elem.pos, value.end)
fv := ast.new(ast.Field_Value, elem.pos, value)
fv.field = elem
fv.sep = eq.pos
fv.value = value
@@ -2962,7 +2966,7 @@ parse_call_expr :: proc(p: ^Parser, operand: ^ast.Expr) -> ^ast.Expr {
}
value := parse_value(p)
fv := ast.new(ast.Field_Value, arg.pos, value.end)
fv := ast.new(ast.Field_Value, arg.pos, value)
fv.field = arg
fv.sep = eq.pos
fv.value = value
@@ -2993,7 +2997,7 @@ parse_call_expr :: proc(p: ^Parser, operand: ^ast.Expr) -> ^ast.Expr {
o := ast.unparen_expr(operand)
if se, ok := o.derived.(^ast.Selector_Expr); ok && se.op.kind == .Arrow_Right {
sce := ast.new(ast.Selector_Call_Expr, ce.pos, ce.end)
sce := ast.new(ast.Selector_Call_Expr, ce.pos, ce)
sce.expr = o
sce.call = ce
return sce
@@ -3101,7 +3105,7 @@ parse_atom_expr :: proc(p: ^Parser, value: ^ast.Expr, lhs: bool) -> (operand: ^a
case .Ident:
field := parse_ident(p)
sel := ast.new(ast.Selector_Expr, operand.pos, field.end)
sel := ast.new(ast.Selector_Expr, operand.pos, field)
sel.expr = operand
sel.op = tok
sel.field = field
@@ -3127,7 +3131,7 @@ parse_atom_expr :: proc(p: ^Parser, value: ^ast.Expr, lhs: bool) -> (operand: ^a
type.op = question
type.expr = nil
ta := ast.new(ast.Type_Assertion, operand.pos, type.end)
ta := ast.new(ast.Type_Assertion, operand.pos, type)
ta.expr = operand
ta.type = type
@@ -3145,7 +3149,7 @@ parse_atom_expr :: proc(p: ^Parser, value: ^ast.Expr, lhs: bool) -> (operand: ^a
case .Ident:
field := parse_ident(p)
sel := ast.new(ast.Selector_Expr, operand.pos, field.end)
sel := ast.new(ast.Selector_Expr, operand.pos, field)
sel.expr = operand
sel.op = tok
sel.field = field
@@ -3225,7 +3229,7 @@ parse_unary_expr :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
close := expect_token(p, .Close_Paren)
expr := parse_unary_expr(p, lhs)
tc := ast.new(ast.Type_Cast, tok.pos, expr.end)
tc := ast.new(ast.Type_Cast, tok.pos, expr)
tc.tok = tok
tc.open = open.pos
tc.type = type
@@ -3237,7 +3241,7 @@ parse_unary_expr :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
op := advance_token(p)
expr := parse_unary_expr(p, lhs)
ac := ast.new(ast.Auto_Cast, op.pos, expr.end)
ac := ast.new(ast.Auto_Cast, op.pos, expr)
ac.op = op
ac.expr = expr
return ac
@@ -3247,8 +3251,8 @@ parse_unary_expr :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
.And:
op := advance_token(p)
expr := parse_unary_expr(p, lhs)
ue := ast.new(ast.Unary_Expr, op.pos, expr.end)
ue := ast.new(ast.Unary_Expr, op.pos, expr)
ue.op = op
ue.expr = expr
return ue
@@ -3258,7 +3262,7 @@ parse_unary_expr :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
error(p, op.pos, "unary '%s' operator is not supported", op.text)
expr := parse_unary_expr(p, lhs)
ue := ast.new(ast.Unary_Expr, op.pos, expr.end)
ue := ast.new(ast.Unary_Expr, op.pos, expr)
ue.op = op
ue.expr = expr
return ue
@@ -3266,7 +3270,7 @@ parse_unary_expr :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
case .Period:
op := advance_token(p)
field := parse_ident(p)
ise := ast.new(ast.Implicit_Selector_Expr, op.pos, field.end)
ise := ast.new(ast.Implicit_Selector_Expr, op.pos, field)
ise.field = field
return ise
@@ -3407,7 +3411,7 @@ parse_simple_stmt :: proc(p: ^Parser, flags: Stmt_Allow_Flags) -> ^ast.Stmt {
error(p, p.curr_tok.pos, "no right-hand side in assignment statement")
return ast.new(ast.Bad_Stmt, start_tok.pos, end_pos(p.curr_tok))
}
stmt := ast.new(ast.Assign_Stmt, lhs[0].pos, rhs[len(rhs)-1].end)
stmt := ast.new(ast.Assign_Stmt, lhs[0].pos, rhs[len(rhs)-1])
stmt.lhs = lhs
stmt.op = op
stmt.rhs = rhs
@@ -3424,7 +3428,7 @@ parse_simple_stmt :: proc(p: ^Parser, flags: Stmt_Allow_Flags) -> ^ast.Stmt {
rhs := make([]^ast.Expr, 1)
rhs[0] = expr
stmt := ast.new(ast.Assign_Stmt, lhs[0].pos, rhs[len(rhs)-1].end)
stmt := ast.new(ast.Assign_Stmt, lhs[0].pos, rhs[len(rhs)-1])
stmt.lhs = lhs
stmt.op = op
stmt.rhs = rhs
@@ -3466,7 +3470,7 @@ parse_simple_stmt :: proc(p: ^Parser, flags: Stmt_Allow_Flags) -> ^ast.Stmt {
error(p, op.pos, "postfix '%s' statement is not supported", op.text)
}
es := ast.new(ast.Expr_Stmt, lhs[0].pos, lhs[0].end)
es := ast.new(ast.Expr_Stmt, lhs[0].pos, lhs[0])
es.expr = lhs[0]
return es
}

View File

@@ -149,7 +149,7 @@ read_console :: proc(handle: win32.HANDLE, b: []byte) -> (n: int, err: Errno) {
return
}
read :: proc(fd: Handle, data: []byte) -> (int, Errno) {
read :: proc(fd: Handle, data: []byte) -> (total_read: int, err: Errno) {
if len(data) == 0 {
return 0, ERROR_NONE
}
@@ -158,32 +158,32 @@ read :: proc(fd: Handle, data: []byte) -> (int, Errno) {
m: u32
is_console := win32.GetConsoleMode(handle, &m)
single_read_length: win32.DWORD
total_read: int
length := len(data)
// NOTE(Jeroen): `length` can't be casted to win32.DWORD here because it'll overflow if > 4 GiB and return 0 if exactly that.
to_read := min(i64(length), MAX_RW)
e: win32.BOOL
if is_console {
n, err := read_console(handle, data[total_read:][:to_read])
total_read += n
total_read, err = read_console(handle, data[total_read:][:to_read])
if err != 0 {
return int(total_read), err
return total_read, err
}
} else {
// NOTE(Jeroen): So we cast it here *after* we've ensured that `to_read` is at most MAX_RW (1 GiB)
e = win32.ReadFile(handle, &data[total_read], win32.DWORD(to_read), &single_read_length, nil)
bytes_read: win32.DWORD
if e := win32.ReadFile(handle, &data[total_read], win32.DWORD(to_read), &bytes_read, nil); e {
// Successful read can mean two things, including EOF, see:
// https://learn.microsoft.com/en-us/windows/win32/fileio/testing-for-the-end-of-a-file
if bytes_read == 0 {
return 0, ERROR_HANDLE_EOF
} else {
return int(bytes_read), ERROR_NONE
}
} else {
return 0, Errno(win32.GetLastError())
}
}
if single_read_length <= 0 || !e {
err := Errno(win32.GetLastError())
return int(total_read), err
}
total_read += int(single_read_length)
return int(total_read), ERROR_NONE
return total_read, ERROR_NONE
}
seek :: proc(fd: Handle, offset: i64, whence: int) -> (i64, Errno) {

View File

@@ -27,9 +27,7 @@ _file_stream_proc :: proc(stream_data: rawptr, mode: io.Stream_Mode, p: []byte,
case .Read:
n_int, os_err = read(fd, p)
n = i64(n_int)
if os_err != 0 {
err = .Unknown
}
case .Read_At:
when !(ODIN_OS == .FreeBSD || ODIN_OS == .OpenBSD) {
n_int, os_err = read_at(fd, p, offset)
@@ -57,6 +55,11 @@ _file_stream_proc :: proc(stream_data: rawptr, mode: io.Stream_Mode, p: []byte,
}
}
if err == nil && os_err != 0 {
when ODIN_OS == .Windows {
if os_err == ERROR_HANDLE_EOF {
return n, .EOF
}
}
err = .Unknown
}
return

View File

@@ -109,7 +109,7 @@ remove_range :: proc(array: ^$D/[dynamic]$T, lo, hi: int, loc := #caller_locatio
// `pop` will remove and return the end value of dynamic array `array` and reduces the length of `array` by 1.
//
// Note: If the dynamic array as no elements (`len(array) == 0`), this procedure will panic.
// Note: If the dynamic array has no elements (`len(array) == 0`), this procedure will panic.
@builtin
pop :: proc(array: ^$T/[dynamic]$E, loc := #caller_location) -> (res: E) #no_bounds_check {
assert(len(array) > 0, loc=loc)

View File

@@ -287,7 +287,7 @@ append_soa_elem :: proc(array: ^$T/#soa[dynamic]$E, arg: E, loc := #caller_locat
footer := raw_soa_footer(array)
if size_of(E) > 0 && cap(array)-len(array) > 0 {
ti := type_info_of(typeid_of(T))
ti := type_info_of(T)
ti = type_info_base(ti)
si := &ti.variant.(Type_Info_Struct)
field_count: uintptr

View File

@@ -35,7 +35,7 @@ nil_allocator :: proc() -> Allocator {
when ODIN_OS == .Freestanding {
default_allocator_proc :: nil_allocator_proc
default_allocator :: nil_allocator
}
}
@@ -78,9 +78,7 @@ panic_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
panic_allocator :: proc() -> Allocator {
return Allocator{
procedure = nil_allocator_proc,
procedure = panic_allocator_proc,
data = nil,
}
}

179
core/runtime/docs.odin Normal file
View File

@@ -0,0 +1,179 @@
package runtime
/*
package runtime has numerous entities (declarations) which are required by the compiler to function.
## Basic types and calls (and anything they rely on)
Source_Code_Location
Context
Allocator
Logger
__init_context
_cleanup_runtime
## cstring calls
cstring_to_string
cstring_len
## Required when RTTI is enabled (the vast majority of targets)
Type_Info
type_table
__type_info_of
## Hashing
default_hasher
default_hasher_cstring
default_hasher_string
## Pseudo-CRT required procedured due to LLVM but useful in general
memset
memcpy
memove
## Procedures required by the LLVM backend
umodti3
udivti3
modti3
divti3
fixdfti
fixunsdfti
fixunsdfdi
floattidf
floattidf_unsigned
truncsfhf2
truncdfhf2
gnu_h2f_ieee
gnu_f2h_ieee
extendhfsf2
__ashlti3 // wasm specific
__multi3 // wasm specific
## Required an entry point is defined (i.e. 'main')
args__
## When -no-crt is defined (and not a wasm target) (mostly due to LLVM)
_tls_index
_fltused
## Bounds checking procedures (when not disabled with -no-bounds-check)
bounds_check_error
matrix_bounds_check_error
slice_expr_error_hi
slice_expr_error_lo_hi
multi_pointer_slice_expr_error
## Type assertion check
type_assertion_check
type_assertion_check2 // takes in typeid
## Arithmetic
quo_complex32
quo_complex64
quo_complex128
mul_quaternion64
mul_quaternion128
mul_quaternion256
quo_quaternion64
quo_quaternion128
quo_quaternion256
abs_complex32
abs_complex64
abs_complex128
abs_quaternion64
abs_quaternion128
abs_quaternion256
## Comparison
memory_equal
memory_compare
memory_compare_zero
cstring_eq
cstring_ne
cstring_lt
cstring_gt
cstring_le
cstring_gt
string_eq
string_ne
string_lt
string_gt
string_le
string_gt
complex32_eq
complex32_ne
complex64_eq
complex64_ne
complex128_eq
complex128_ne
quaternion64_eq
quaternion64_ne
quaternion128_eq
quaternion128_ne
quaternion256_eq
quaternion256_ne
## Map specific calls
map_seed_from_map_data
__dynamic_map_check_grow // static map calls
map_insert_hash_dynamic // static map calls
__dynamic_map_get // dynamic map calls
__dynamic_map_set // dynamic map calls
## Dynamic literals ([dymamic]T and map[K]V) (can be disabled with -no-dynamic-literals)
__dynamic_array_reserve
__dynamic_array_append
__dynamic_map_reserve
## Objective-C specific
objc_lookUpClass
sel_registerName
objc_allocateClassPair
## for-in `string` type
string_decode_rune
string_decode_last_rune // #reverse for
*/

View File

@@ -44,7 +44,7 @@ _ :: intrinsics
MAP_LOAD_FACTOR :: 75
// Minimum log2 capacity.
MAP_MIN_LOG2_CAPACITY :: 6 // 64 elements
MAP_MIN_LOG2_CAPACITY :: 3 // 8 elements
// Has to be less than 100% though.
#assert(MAP_LOAD_FACTOR < 100)

View File

@@ -26,8 +26,13 @@ when ODIN_BUILD_MODE == .Dynamic {
// to retrieve argc and argv from the stack
when ODIN_ARCH == .amd64 {
@require foreign import entry "entry_unix_no_crt_amd64.asm"
SYS_exit :: 60
} else when ODIN_ARCH == .i386 {
@require foreign import entry "entry_unix_no_crt_i386.asm"
SYS_exit :: 1
} else when ODIN_OS == .Darwin && ODIN_ARCH == .arm64 {
@require foreign import entry "entry_unix_no_crt_darwin_arm64.asm"
SYS_exit :: 1
}
@(link_name="_start_odin", linkage="strong", require)
_start_odin :: proc "c" (argc: i32, argv: [^]cstring) -> ! {
@@ -36,11 +41,7 @@ when ODIN_BUILD_MODE == .Dynamic {
#force_no_inline _startup_runtime()
intrinsics.__entry_point()
#force_no_inline _cleanup_runtime()
when ODIN_ARCH == .amd64 {
intrinsics.syscall(/*SYS_exit = */60)
} else when ODIN_ARCH == .i386 {
intrinsics.syscall(/*SYS_exit = */1)
}
intrinsics.syscall(SYS_exit, 0)
unreachable()
}
} else {

View File

@@ -0,0 +1,20 @@
.section __TEXT,__text
; NOTE(laytan): this should ideally be the -minimum-os-version flag but there is no nice way of preprocessing assembly in Odin.
; 10 seems to be the lowest it goes and I don't see it mess with any targeted os version so this seems fine.
.build_version macos, 10, 0
.extern __start_odin
.global _main
.align 2
_main:
mov x5, sp ; use x5 as the stack pointer
str x0, [x5] ; get argc into x0 (kernel passes 32-bit int argc as 64-bits on stack to keep alignment)
str x1, [x5, #8] ; get argv into x1
and sp, x5, #~15 ; force 16-byte alignment of the stack
bl __start_odin ; call into Odin entry point
ret ; should never get here

View File

@@ -1,4 +1,4 @@
//+build !freestanding !wasi !windows !js
//+build !freestanding !wasi !windows !js !darwin
package runtime
import "core:os"

View File

@@ -0,0 +1,12 @@
//+build darwin
package runtime
import "core:intrinsics"
_os_write :: proc "contextless" (data: []byte) -> (int, _OS_Errno) {
ret := intrinsics.syscall(0x2000004, 1, uintptr(raw_data(data)), uintptr(len(data)))
if ret < 0 {
return 0, _OS_Errno(-ret)
}
return int(ret), 0
}

View File

@@ -26,7 +26,9 @@ package heap
make :: proc(data: []$T, less: proc(a, b: T) -> bool) {
// amoritize length lookup
length := len(data)
if length <= 1 do return
if length <= 1 {
return
}
// start from data parent, no need to consider children
for start := (length - 2) / 2; start >= 0; start -= 1 {
@@ -53,7 +55,9 @@ push :: proc(data: []$T, less: proc(a, b: T) -> bool) {
*/
pop :: proc(data: []$T, less: proc(a, b: T) -> bool) {
length := len(data)
if length <= 1 do return
if length <= 1 {
return
}
last := length
@@ -206,7 +210,9 @@ sift_up :: proc(data: []$T, less: proc(a, b: T) -> bool) {
// amoritize length lookup
length := len(data)
if length <= 1 do return
if length <= 1 {
return
}
last := length
length = (length - 2) / 2

View File

@@ -49,7 +49,7 @@ to_bytes :: proc "contextless" (s: []$T) -> []byte {
```
```
small_items := []byte{1, 0, 0, 0, 0, 0, 0, 0,
2, 0, 0, 0}
2, 0, 0, 0}
large_items := slice.reinterpret([]i64, small_items)
assert(len(large_items) == 1) // only enough bytes to make 1 x i64; two would need at least 8 bytes.
```
@@ -78,7 +78,7 @@ swap_between :: proc(a, b: $T/[]$E) {
n := builtin.min(len(a), len(b))
if n >= 0 {
ptr_swap_overlapping(&a[0], &b[0], size_of(E)*n)
}
}
}
@@ -117,46 +117,95 @@ linear_search_proc :: proc(array: $A/[]$T, f: proc(T) -> bool) -> (index: int, f
return -1, false
}
/*
Binary search searches the given slice for the given element.
If the slice is not sorted, the returned index is unspecified and meaningless.
If the value is found then the returned int is the index of the matching element.
If there are multiple matches, then any one of the matches could be returned.
If the value is not found then the returned int is the index where a matching
element could be inserted while maintaining sorted order.
# Examples
Looks up a series of four elements. The first is found, with a
uniquely determined position; the second and third are not
found; the fourth could match any position in `[1, 4]`.
```
index: int
found: bool
s := []i32{0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55}
index, found = slice.binary_search(s, 13)
assert(index == 9 && found == true)
index, found = slice.binary_search(s, 4)
assert(index == 7 && found == false)
index, found = slice.binary_search(s, 100)
assert(index == 13 && found == false)
index, found = slice.binary_search(s, 1)
assert(index >= 1 && index <= 4 && found == true)
```
For slices of more complex types see: binary_search_by
*/
@(require_results)
binary_search :: proc(array: $A/[]$T, key: T) -> (index: int, found: bool)
where intrinsics.type_is_ordered(T) #no_bounds_check {
n := len(array)
switch n {
case 0:
return -1, false
case 1:
if array[0] == key {
return 0, true
}
return -1, false
}
lo, hi := 0, n-1
for array[hi] != array[lo] && key >= array[lo] && key <= array[hi] {
when intrinsics.type_is_ordered_numeric(T) {
// NOTE(bill): This is technically interpolation search
m := lo + int((key - array[lo]) * T(hi - lo) / (array[hi] - array[lo]))
} else {
m := lo + (hi - lo)/2
}
where intrinsics.type_is_ordered(T) #no_bounds_check
{
// I would like to use binary_search_by(array, key, cmp) here, but it doesn't like it:
// Cannot assign value 'cmp' of type 'proc($E, $E) -> Ordering' to 'proc(i32, i32) -> Ordering' in argument
return binary_search_by(array, key, proc(key: T, element: T) -> Ordering {
switch {
case array[m] < key:
lo = m + 1
case key < array[m]:
hi = m - 1
case:
return m, true
case element < key: return .Less
case element > key: return .Greater
case: return .Equal
}
}
if key == array[lo] {
return lo, true
}
return -1, false
})
}
@(require_results)
binary_search_by :: proc(array: $A/[]$T, key: T, f: proc(T, T) -> Ordering) -> (index: int, found: bool)
where intrinsics.type_is_ordered(T) #no_bounds_check
{
// INVARIANTS:
// - 0 <= left <= (left + size = right) <= len(array)
// - f returns .Less for everything in array[:left]
// - f returns .Greater for everything in array[right:]
size := len(array)
left := 0
right := size
for left < right {
mid := left + size / 2
// Steps to verify this is in-bounds:
// 1. We note that `size` is strictly positive due to the loop condition
// 2. Therefore `size/2 < size`
// 3. Adding `left` to both sides yields `(left + size/2) < (left + size)`
// 4. We know from the invariant that `left + size <= len(array)`
// 5. Therefore `left + size/2 < self.len()`
cmp := f(key, array[mid])
left = mid + 1 if cmp == .Less else left
right = mid if cmp == .Greater else right
switch cmp {
case .Equal: return mid, true
case .Less: left = mid + 1
case .Greater: right = mid
}
size = right - left
}
return left, false
}
@(require_results)
equal :: proc(a, b: $T/[]$E) -> bool where intrinsics.type_is_comparable(E) {
@@ -463,6 +512,40 @@ min_max :: proc(s: $S/[]$T) -> (min, max: T, ok: bool) where intrinsics.type_is_
return
}
// Find the index of the (first) minimum element in a slice.
@(require_results)
min_index :: proc(s: $S/[]$T) -> (min_index: int, ok: bool) where intrinsics.type_is_ordered(T) #optional_ok {
if len(s) == 0 {
return -1, false
}
min_index = 0
min_value := s[0]
for v, i in s[1:] {
if v < min_value {
min_value = v
min_index = i+1
}
}
return min_index, true
}
// Find the index of the (first) maximum element in a slice.
@(require_results)
max_index :: proc(s: $S/[]$T) -> (max_index: int, ok: bool) where intrinsics.type_is_ordered(T) #optional_ok {
if len(s) == 0 {
return -1, false
}
max_index = 0
max_value := s[0]
for v, i in s[1:] {
if v > max_value {
max_value = v
max_index = i+1
}
}
return max_index, true
}
@(require_results)
any_of :: proc(s: $S/[]$T, value: T) -> bool where intrinsics.type_is_comparable(T) {
for v in s {

View File

@@ -1,4 +1,3 @@
//+private
package strings
import "core:unicode/utf8"

View File

@@ -780,10 +780,8 @@ Perf_Event_Sample_Type_Bits :: enum {
WEIGHT_STRUCT = 24,
}
/*
Describes field sets to include in mmaped page
*/
Perf_Read_Format :: enum {
/// Describes field sets to include in mmaped page
Perf_Read_Format_Bits :: enum {
TOTAL_TIME_ENABLED = 0,
TOTAL_TIME_RUNNING = 1,
ID = 2,

View File

@@ -185,7 +185,7 @@ mprotect :: proc "contextless" (addr: rawptr, size: uint, prot: Mem_Protection)
Available since Linux 1.0.
*/
munmap :: proc "contextless" (addr: rawptr, size: uint) -> (Errno) {
ret := syscall(SYS_mmap, addr, size)
ret := syscall(SYS_munmap, addr, size)
return Errno(-ret)
}
@@ -504,7 +504,8 @@ where
T == Sock_Addr_In6 ||
T == Sock_Addr_Any
{
ret := syscall(SYS_accept4, sock, addr, size_of(T), transmute(int) sockflags)
addr_len: i32 = size_of(T)
ret := syscall(SYS_accept4, sock, addr, &addr_len, transmute(int) sockflags)
return errno_unwrap(ret, Fd)
}

View File

@@ -386,6 +386,8 @@ Perf_Flags :: bit_set[Perf_Flags_Bits; uint]
Perf_Event_Flags :: distinct bit_set[Perf_Event_Flags_Bits; u64]
Perf_Read_Format :: distinct bit_set[Perf_Read_Format_Bits; u64]
Perf_Cap_Flags :: distinct bit_set[Perf_Cap_Flags_Bits; u64]
Perf_Event_Sample_Type :: bit_set[Perf_Event_Sample_Type_Bits; u64]

View File

@@ -0,0 +1,250 @@
// +build windows
package sys_windows
foreign import "system:Dbghelp.lib"
RVA :: DWORD
MINIDUMP_LOCATION_DESCRIPTOR :: struct {
DataSize: ULONG32,
Rva: RVA,
}
MINIDUMP_DIRECTORY :: struct {
StreamType: ULONG32,
Location: MINIDUMP_LOCATION_DESCRIPTOR,
}
MINIDUMP_EXCEPTION_INFORMATION :: struct {
ThreadId: DWORD,
ExceptionPointers: ^EXCEPTION_POINTERS,
ClientPointers: BOOL,
}
MINIDUMP_MEMORY_INFO :: struct {
BaseAddress: ULONG64,
AllocationBase: ULONG64,
AllocationProtect: ULONG32,
__alignment1: ULONG32,
RegionSize: ULONG64,
State: ULONG32,
Protect: ULONG32,
Type: ULONG32,
__alignment2: ULONG32,
}
MINIDUMP_USER_STREAM :: struct {
Type: ULONG32,
BufferSize: ULONG,
Buffer: PVOID,
}
MINIDUMP_USER_STREAM_INFORMATION :: struct {
UserStreamCount: ULONG,
UserStreamArray: ^MINIDUMP_USER_STREAM,
}
MINIDUMP_CALLBACK_ROUTINE :: #type proc "stdcall" (
CallbackParam: PVOID,
CallbackInput: ^MINIDUMP_CALLBACK_INPUT,
CallbackOutpu: ^MINIDUMP_CALLBACK_OUTPUT,
) -> BOOL
MINIDUMP_CALLBACK_INFORMATION :: struct {
CallbackRoutine: MINIDUMP_CALLBACK_ROUTINE,
CallbackParam: PVOID,
}
MINIDUMP_CALLBACK_INPUT :: struct {
ProcessId: ULONG,
ProcessHandle: HANDLE,
CallbackType: ULONG,
using _: struct #raw_union {
Status: HRESULT,
Thread: MINIDUMP_THREAD_CALLBACK,
ThreadEx: MINIDUMP_THREAD_EX_CALLBACK,
Module: MINIDUMP_MODULE_CALLBACK,
IncludeThread: MINIDUMP_INCLUDE_THREAD_CALLBACK,
IncludeModule: MINIDUMP_INCLUDE_MODULE_CALLBACK,
Io: MINIDUMP_IO_CALLBACK,
ReadMemoryFailure: MINIDUMP_READ_MEMORY_FAILURE_CALLBACK,
SecondaryFlags: ULONG,
VmQuery: MINIDUMP_VM_QUERY_CALLBACK,
VmPreRead: MINIDUMP_VM_PRE_READ_CALLBACK,
VmPostRead: MINIDUMP_VM_POST_READ_CALLBACK,
},
}
_MINIDUMP_ARM64_PAD :: ULONG when ODIN_ARCH == .arm64 else struct {}
MINIDUMP_THREAD_CALLBACK :: struct {
ThreadId: ULONG,
ThreadHandle: HANDLE,
Pad: _MINIDUMP_ARM64_PAD,
Context: CONTEXT,
SizeOfContext: ULONG,
StackBase: ULONG64,
StackEnd: ULONG64,
}
MINIDUMP_THREAD_EX_CALLBACK :: struct {
ThreadId: ULONG,
ThreadHandle: HANDLE,
Pad: _MINIDUMP_ARM64_PAD,
Context: CONTEXT,
SizeOfContext: ULONG,
StackBase: ULONG64,
StackEnd: ULONG64,
BackingStoreBase: ULONG64,
BackingStoreEnd: ULONG64,
}
MINIDUMP_INCLUDE_THREAD_CALLBACK :: struct {
ThreadId: ULONG,
}
// NOTE(jakubtomsu): From verrsrc.h
VS_FIXEDFILEINFO :: struct {
dwSignature: DWORD, /* e.g. 0xfeef04bd */
dwStrucVersion: DWORD, /* e.g. 0x00000042 = "0.42" */
dwFileVersionMS: DWORD, /* e.g. 0x00030075 = "3.75" */
dwFileVersionLS: DWORD, /* e.g. 0x00000031 = "0.31" */
dwProductVersionMS: DWORD, /* e.g. 0x00030010 = "3.10" */
dwProductVersionLS: DWORD, /* e.g. 0x00000031 = "0.31" */
dwFileFlagsMask: DWORD, /* = 0x3F for version "0.42" */
dwFileFlags: DWORD, /* e.g. VFF_DEBUG | VFF_PRERELEASE */
dwFileOS: DWORD, /* e.g. VOS_DOS_WINDOWS16 */
dwFileType: DWORD, /* e.g. VFT_DRIVER */
dwFileSubtype: DWORD, /* e.g. VFT2_DRV_KEYBOARD */
dwFileDateMS: DWORD, /* e.g. 0 */
dwFileDateLS: DWORD, /* e.g. 0 */
}
MINIDUMP_MODULE_CALLBACK :: struct {
FullPath: ^WCHAR,
BaseOfImage: ULONG64,
SizeOfImage: ULONG,
CheckSum: ULONG,
TimeDateStamp: ULONG,
VersionInfo: VS_FIXEDFILEINFO,
CvRecord: PVOID,
SizeOfCvRecord: ULONG,
MiscRecord: PVOID,
SizeOfMiscRecord: ULONG,
}
MINIDUMP_INCLUDE_MODULE_CALLBACK :: struct {
BaseOfImage: u64,
}
MINIDUMP_IO_CALLBACK :: struct {
Handle: HANDLE,
Offset: ULONG64,
Buffer: PVOID,
BufferBytes: ULONG,
}
MINIDUMP_READ_MEMORY_FAILURE_CALLBACK :: struct {
Offset: ULONG64,
Bytes: ULONG,
FailureStatus: HRESULT,
}
MINIDUMP_VM_QUERY_CALLBACK :: struct {
Offset: ULONG64,
}
MINIDUMP_VM_PRE_READ_CALLBACK :: struct {
Offset: ULONG64,
Buffer: PVOID,
Size: ULONG,
}
MINIDUMP_VM_POST_READ_CALLBACK :: struct {
Offset: ULONG64,
Buffer: PVOID,
Size: ULONG,
Completed: ULONG,
Status: HRESULT,
}
MINIDUMP_CALLBACK_OUTPUT :: struct {
using _: struct #raw_union {
ModuleWriteFlags: ULONG,
ThreadWriteFlags: ULONG,
SecondaryFlags: ULONG,
using _: struct {
MemoryBase: ULONG64,
MemorySize: ULONG,
},
using _: struct {
CheckCancel: BOOL,
Cancel: BOOL,
},
Handle: HANDLE,
using _: struct {
VmRegion: MINIDUMP_MEMORY_INFO,
Continue: BOOL,
},
using _: struct {
VmQueryStatus: HRESULT,
VmQueryResult: MINIDUMP_MEMORY_INFO,
},
using _: struct {
VmReadStatus: HRESULT,
VmReadBytesCompleted: ULONG,
},
Status: HRESULT,
},
}
MINIDUMP_TYPE :: enum u32 {
Normal = 0x00000000,
WithDataSegs = 0x00000001,
WithFullMemory = 0x00000002,
WithHandleData = 0x00000004,
FilterMemory = 0x00000008,
ScanMemory = 0x00000010,
WithUnloadedModules = 0x00000020,
WithIndirectlyReferencedMemory = 0x00000040,
FilterModulePaths = 0x00000080,
WithProcessThreadData = 0x00000100,
WithPrivateReadWriteMemory = 0x00000200,
WithoutOptionalData = 0x00000400,
WithFullMemoryInfo = 0x00000800,
WithThreadInfo = 0x00001000,
WithCodeSegs = 0x00002000,
WithoutAuxiliaryState = 0x00004000,
WithFullAuxiliaryState = 0x00008000,
WithPrivateWriteCopyMemory = 0x00010000,
IgnoreInaccessibleMemory = 0x00020000,
WithTokenInformation = 0x00040000,
WithModuleHeaders = 0x00080000,
FilterTriage = 0x00100000,
WithAvxXStateContext = 0x00200000,
WithIptTrace = 0x00400000,
ScanInaccessiblePartialPages = 0x00800000,
FilterWriteCombinedMemory = 0x01000000,
ValidTypeFlags = 0x01ffffff,
}
@(default_calling_convention = "stdcall")
foreign Dbghelp {
MiniDumpWriteDump :: proc(
hProcess: HANDLE,
ProcessId: DWORD,
hFile: HANDLE,
DumpType: MINIDUMP_TYPE,
ExceptionParam: ^MINIDUMP_EXCEPTION_INFORMATION,
UserStreamParam: ^MINIDUMP_USER_STREAM_INFORMATION,
CallbackPara: ^MINIDUMP_CALLBACK_INFORMATION,
) -> BOOL ---
MiniDumpReadDumpStream :: proc(
BaseOfDump: PVOID,
StreamNumber: ULONG,
Dir: ^^MINIDUMP_DIRECTORY,
StreamPointer: ^PVOID,
StreamSize: ^ULONG,
) -> BOOL ---
}

View File

@@ -43,5 +43,6 @@ foreign dwmapi {
DwmFlush :: proc() -> HRESULT ---
DwmIsCompositionEnabled :: proc(pfEnabled: ^BOOL) -> HRESULT ---
DwmExtendFrameIntoClientArea :: proc(hWnd: HWND, pMarInset: PMARGINS) -> HRESULT ---
DwmGetWindowAttribute :: proc(hWnd: HWND, dwAttribute: DWORD, pvAttribute: PVOID, cbAttribute: DWORD) -> HRESULT ---
DwmSetWindowAttribute :: proc(hWnd: HWND, dwAttribute: DWORD, pvAttribute: LPCVOID, cbAttribute: DWORD) -> HRESULT ---
}

View File

@@ -10,6 +10,8 @@ foreign gdi32 {
DeleteObject :: proc(ho: HGDIOBJ) -> BOOL ---
SetBkColor :: proc(hdc: HDC, color: COLORREF) -> COLORREF ---
CreateCompatibleDC :: proc(hdc: HDC) -> HDC ---
CreateDIBPatternBrush :: proc(h: HGLOBAL, iUsage: UINT) -> HBRUSH ---
CreateDIBitmap :: proc(
@@ -81,6 +83,11 @@ foreign gdi32 {
GetTextMetricsW :: proc(hdc: HDC, lptm: LPTEXTMETRICW) -> BOOL ---
CreateSolidBrush :: proc(color: COLORREF) -> HBRUSH ---
GetObjectW :: proc(h: HANDLE, c: c_int, pv: LPVOID) -> int ---
CreateCompatibleBitmap :: proc(hdc: HDC, cx, cy: c_int) -> HBITMAP ---
BitBlt :: proc(hdc: HDC, x, y, cx, cy: c_int, hdcSrc: HDC, x1, y1: c_int, rop: DWORD) -> BOOL ---
GetDIBits :: proc(hdc: HDC, hbm: HBITMAP, start, cLines: UINT, lpvBits: LPVOID, lpbmi: ^BITMAPINFO, usage: UINT) -> int ---
}
RGB :: #force_inline proc "contextless" (r, g, b: u8) -> COLORREF {

View File

@@ -172,6 +172,7 @@ foreign kernel32 {
TolerableDelay: ULONG,
) -> BOOL ---
WaitForSingleObject :: proc(hHandle: HANDLE, dwMilliseconds: DWORD) -> DWORD ---
WaitForSingleObjectEx :: proc(hHandle: HANDLE, dwMilliseconds: DWORD, bAlterable: BOOL) -> DWORD ---
Sleep :: proc(dwMilliseconds: DWORD) ---
GetProcessId :: proc(handle: HANDLE) -> DWORD ---
CopyFileW :: proc(
@@ -290,6 +291,14 @@ foreign kernel32 {
hTemplateFile: HANDLE,
) -> HANDLE ---
GetFileTime :: proc(
hFile: HANDLE,
lpCreationTime: LPFILETIME,
lpLastAccessTime: LPFILETIME,
lpLastWriteTime: LPFILETIME,
) -> BOOL ---
CompareFileTime :: proc(lpFileTime1: LPFILETIME, lpFileTime2: LPFILETIME) -> LONG ---
FindFirstFileW :: proc(fileName: LPCWSTR, findFileData: LPWIN32_FIND_DATAW) -> HANDLE ---
FindNextFileW :: proc(findFile: HANDLE, findFileData: LPWIN32_FIND_DATAW) -> BOOL ---
FindClose :: proc(findFile: HANDLE) -> BOOL ---
@@ -320,6 +329,13 @@ foreign kernel32 {
bWaitAll: BOOL,
dwMilliseconds: DWORD,
) -> DWORD ---
WaitForMultipleObjectsEx :: proc(
nCount: DWORD,
lpHandles: ^HANDLE,
bWaitAll: BOOL,
dwMilliseconds: DWORD,
bAlterable: BOOL,
) -> DWORD ---
CreateNamedPipeW :: proc(
lpName: LPCWSTR,
dwOpenMode: DWORD,
@@ -346,6 +362,9 @@ foreign kernel32 {
LocalReAlloc :: proc(mem: LPVOID, bytes: SIZE_T, flags: UINT) -> LPVOID ---
LocalFree :: proc(mem: LPVOID) -> LPVOID ---
GlobalAlloc :: proc(flags: UINT, bytes: SIZE_T) -> LPVOID ---
GlobalReAlloc :: proc(mem: LPVOID, bytes: SIZE_T, flags: UINT) -> LPVOID ---
GlobalFree :: proc(mem: LPVOID) -> LPVOID ---
ReadDirectoryChangesW :: proc(
hDirectory: HANDLE,
@@ -414,7 +433,7 @@ foreign kernel32 {
GetConsoleWindow :: proc() -> HWND ---
GetConsoleScreenBufferInfo :: proc(hConsoleOutput: HANDLE, lpConsoleScreenBufferInfo: PCONSOLE_SCREEN_BUFFER_INFO) -> BOOL ---
SetConsoleScreenBufferSize :: proc(hConsoleOutput: HANDLE, dwSize: COORD) -> BOOL ---
SetConsoleWindowInfo :: proc(hConsoleOutput: HANDLE, bAbsolute : BOOL, lpConsoleWindow: ^SMALL_RECT) -> BOOL ---
SetConsoleWindowInfo :: proc(hConsoleOutput: HANDLE, bAbsolute: BOOL, lpConsoleWindow: ^SMALL_RECT) -> BOOL ---
GetConsoleCursorInfo :: proc(hConsoleOutput: HANDLE, lpConsoleCursorInfo: PCONSOLE_CURSOR_INFO) -> BOOL ---
SetConsoleCursorInfo :: proc(hConsoleOutput: HANDLE, lpConsoleCursorInfo: PCONSOLE_CURSOR_INFO) -> BOOL ---

View File

@@ -0,0 +1,145 @@
// +build windows
package sys_windows
FOLDERID_NetworkFolder :: GUID {0xD20BEEC4, 0x5CA8, 0x4905, {0xAE, 0x3B, 0xBF, 0x25, 0x1E, 0xA0, 0x9B, 0x53}}
FOLDERID_ComputerFolder :: GUID {0x0AC0837C, 0xBBF8, 0x452A, {0x85, 0x0D, 0x79, 0xD0, 0x8E, 0x66, 0x7C, 0xA7}}
FOLDERID_InternetFolder :: GUID {0x4D9F7874, 0x4E0C, 0x4904, {0x96, 0x7B, 0x40, 0xB0, 0xD2, 0x0C, 0x3E, 0x4B}}
FOLDERID_ControlPanelFolder :: GUID {0x82A74AEB, 0xAEB4, 0x465C, {0xA0, 0x14, 0xD0, 0x97, 0xEE, 0x34, 0x6D, 0x63}}
FOLDERID_PrintersFolder :: GUID {0x76FC4E2D, 0xD6AD, 0x4519, {0xA6, 0x63, 0x37, 0xBD, 0x56, 0x06, 0x81, 0x85}}
FOLDERID_SyncManagerFolder :: GUID {0x43668BF8, 0xC14E, 0x49B2, {0x97, 0xC9, 0x74, 0x77, 0x84, 0xD7, 0x84, 0xB7}}
FOLDERID_SyncSetupFolder :: GUID {0xf214138 , 0xb1d3, 0x4a90, {0xbb, 0xa9, 0x27, 0xcb, 0xc0, 0xc5, 0x38, 0x9a}}
FOLDERID_ConflictFolder :: GUID {0x4bfefb45, 0x347d, 0x4006, {0xa5, 0xbe, 0xac, 0x0c, 0xb0, 0x56, 0x71, 0x92}}
FOLDERID_SyncResultsFolder :: GUID {0x289a9a43, 0xbe44, 0x4057, {0xa4, 0x1b, 0x58, 0x7a, 0x76, 0xd7, 0xe7, 0xf9}}
FOLDERID_RecycleBinFolder :: GUID {0xB7534046, 0x3ECB, 0x4C18, {0xBE, 0x4E, 0x64, 0xCD, 0x4C, 0xB7, 0xD6, 0xAC}}
FOLDERID_ConnectionsFolder :: GUID {0x6F0CD92B, 0x2E97, 0x45D1, {0x88, 0xFF, 0xB0, 0xD1, 0x86, 0xB8, 0xDE, 0xDD}}
FOLDERID_Fonts :: GUID {0xFD228CB7, 0xAE11, 0x4AE3, {0x86, 0x4C, 0x16, 0xF3, 0x91, 0x0A, 0xB8, 0xFE}}
FOLDERID_Desktop :: GUID {0xB4BFCC3A, 0xDB2C, 0x424C, {0xB0, 0x29, 0x7F, 0xE9, 0x9A, 0x87, 0xC6, 0x41}}
FOLDERID_Startup :: GUID {0xB97D20BB, 0xF46A, 0x4C97, {0xBA, 0x10, 0x5E, 0x36, 0x08, 0x43, 0x08, 0x54}}
FOLDERID_Programs :: GUID {0xA77F5D77, 0x2E2B, 0x44C3, {0xA6, 0xA2, 0xAB, 0xA6, 0x01, 0x05, 0x4A, 0x51}}
FOLDERID_StartMenu :: GUID {0x625B53C3, 0xAB48, 0x4EC1, {0xBA, 0x1F, 0xA1, 0xEF, 0x41, 0x46, 0xFC, 0x19}}
FOLDERID_Recent :: GUID {0xAE50C081, 0xEBD2, 0x438A, {0x86, 0x55, 0x8A, 0x09, 0x2E, 0x34, 0x98, 0x7A}}
FOLDERID_SendTo :: GUID {0x8983036C, 0x27C0, 0x404B, {0x8F, 0x08, 0x10, 0x2D, 0x10, 0xDC, 0xFD, 0x74}}
FOLDERID_Documents :: GUID {0xFDD39AD0, 0x238F, 0x46AF, {0xAD, 0xB4, 0x6C, 0x85, 0x48, 0x03, 0x69, 0xC7}}
FOLDERID_Favorites :: GUID {0x1777F761, 0x68AD, 0x4D8A, {0x87, 0xBD, 0x30, 0xB7, 0x59, 0xFA, 0x33, 0xDD}}
FOLDERID_NetHood :: GUID {0xC5ABBF53, 0xE17F, 0x4121, {0x89, 0x00, 0x86, 0x62, 0x6F, 0xC2, 0xC9, 0x73}}
FOLDERID_PrintHood :: GUID {0x9274BD8D, 0xCFD1, 0x41C3, {0xB3, 0x5E, 0xB1, 0x3F, 0x55, 0xA7, 0x58, 0xF4}}
FOLDERID_Templates :: GUID {0xA63293E8, 0x664E, 0x48DB, {0xA0, 0x79, 0xDF, 0x75, 0x9E, 0x05, 0x09, 0xF7}}
FOLDERID_CommonStartup :: GUID {0x82A5EA35, 0xD9CD, 0x47C5, {0x96, 0x29, 0xE1, 0x5D, 0x2F, 0x71, 0x4E, 0x6E}}
FOLDERID_CommonPrograms :: GUID {0x0139D44E, 0x6AFE, 0x49F2, {0x86, 0x90, 0x3D, 0xAF, 0xCA, 0xE6, 0xFF, 0xB8}}
FOLDERID_CommonStartMenu :: GUID {0xA4115719, 0xD62E, 0x491D, {0xAA, 0x7C, 0xE7, 0x4B, 0x8B, 0xE3, 0xB0, 0x67}}
FOLDERID_PublicDesktop :: GUID {0xC4AA340D, 0xF20F, 0x4863, {0xAF, 0xEF, 0xF8, 0x7E, 0xF2, 0xE6, 0xBA, 0x25}}
FOLDERID_ProgramData :: GUID {0x62AB5D82, 0xFDC1, 0x4DC3, {0xA9, 0xDD, 0x07, 0x0D, 0x1D, 0x49, 0x5D, 0x97}}
FOLDERID_CommonTemplates :: GUID {0xB94237E7, 0x57AC, 0x4347, {0x91, 0x51, 0xB0, 0x8C, 0x6C, 0x32, 0xD1, 0xF7}}
FOLDERID_PublicDocuments :: GUID {0xED4824AF, 0xDCE4, 0x45A8, {0x81, 0xE2, 0xFC, 0x79, 0x65, 0x08, 0x36, 0x34}}
FOLDERID_RoamingAppData :: GUID {0x3EB685DB, 0x65F9, 0x4CF6, {0xA0, 0x3A, 0xE3, 0xEF, 0x65, 0x72, 0x9F, 0x3D}}
FOLDERID_LocalAppData :: GUID {0xF1B32785, 0x6FBA, 0x4FCF, {0x9D, 0x55, 0x7B, 0x8E, 0x7F, 0x15, 0x70, 0x91}}
FOLDERID_LocalAppDataLow :: GUID {0xA520A1A4, 0x1780, 0x4FF6, {0xBD, 0x18, 0x16, 0x73, 0x43, 0xC5, 0xAF, 0x16}}
FOLDERID_InternetCache :: GUID {0x352481E8, 0x33BE, 0x4251, {0xBA, 0x85, 0x60, 0x07, 0xCA, 0xED, 0xCF, 0x9D}}
FOLDERID_Cookies :: GUID {0x2B0F765D, 0xC0E9, 0x4171, {0x90, 0x8E, 0x08, 0xA6, 0x11, 0xB8, 0x4F, 0xF6}}
FOLDERID_History :: GUID {0xD9DC8A3B, 0xB784, 0x432E, {0xA7, 0x81, 0x5A, 0x11, 0x30, 0xA7, 0x59, 0x63}}
FOLDERID_System :: GUID {0x1AC14E77, 0x02E7, 0x4E5D, {0xB7, 0x44, 0x2E, 0xB1, 0xAE, 0x51, 0x98, 0xB7}}
FOLDERID_SystemX86 :: GUID {0xD65231B0, 0xB2F1, 0x4857, {0xA4, 0xCE, 0xA8, 0xE7, 0xC6, 0xEA, 0x7D, 0x27}}
FOLDERID_Windows :: GUID {0xF38BF404, 0x1D43, 0x42F2, {0x93, 0x05, 0x67, 0xDE, 0x0B, 0x28, 0xFC, 0x23}}
FOLDERID_Profile :: GUID {0x5E6C858F, 0x0E22, 0x4760, {0x9A, 0xFE, 0xEA, 0x33, 0x17, 0xB6, 0x71, 0x73}}
FOLDERID_Pictures :: GUID {0x33E28130, 0x4E1E, 0x4676, {0x83, 0x5A, 0x98, 0x39, 0x5C, 0x3B, 0xC3, 0xBB}}
FOLDERID_ProgramFilesX86 :: GUID {0x7C5A40EF, 0xA0FB, 0x4BFC, {0x87, 0x4A, 0xC0, 0xF2, 0xE0, 0xB9, 0xFA, 0x8E}}
FOLDERID_ProgramFilesCommonX86 :: GUID {0xDE974D24, 0xD9C6, 0x4D3E, {0xBF, 0x91, 0xF4, 0x45, 0x51, 0x20, 0xB9, 0x17}}
FOLDERID_ProgramFilesX64 :: GUID {0x6d809377, 0x6af0, 0x444b, {0x89, 0x57, 0xa3, 0x77, 0x3f, 0x02, 0x20, 0x0e}}
FOLDERID_ProgramFilesCommonX64 :: GUID {0x6365d5a7, 0xf0d , 0x45e5, {0x87, 0xf6, 0xd, 0xa5, 0x6b, 0x6a, 0x4f, 0x7d }}
FOLDERID_ProgramFiles :: GUID {0x905e63b6, 0xc1bf, 0x494e, {0xb2, 0x9c, 0x65, 0xb7, 0x32, 0xd3, 0xd2, 0x1a}}
FOLDERID_ProgramFilesCommon :: GUID {0xF7F1ED05, 0x9F6D, 0x47A2, {0xAA, 0xAE, 0x29, 0xD3, 0x17, 0xC6, 0xF0, 0x66}}
FOLDERID_UserProgramFiles :: GUID {0x5cd7aee2, 0x2219, 0x4a67, {0xb8, 0x5d, 0x6c, 0x9c, 0xe1, 0x56, 0x60, 0xcb}}
FOLDERID_UserProgramFilesCommon :: GUID {0xbcbd3057, 0xca5c, 0x4622, {0xb4, 0x2d, 0xbc, 0x56, 0xdb, 0x0a, 0xe5, 0x16}}
FOLDERID_AdminTools :: GUID {0x724EF170, 0xA42D, 0x4FEF, {0x9F, 0x26, 0xB6, 0x0E, 0x84, 0x6F, 0xBA, 0x4F}}
FOLDERID_CommonAdminTools :: GUID {0xD0384E7D, 0xBAC3, 0x4797, {0x8F, 0x14, 0xCB, 0xA2, 0x29, 0xB3, 0x92, 0xB5}}
FOLDERID_Music :: GUID {0x4BD8D571, 0x6D19, 0x48D3, {0xBE, 0x97, 0x42, 0x22, 0x20, 0x08, 0x0E, 0x43}}
FOLDERID_Videos :: GUID {0x18989B1D, 0x99B5, 0x455B, {0x84, 0x1C, 0xAB, 0x7C, 0x74, 0xE4, 0xDD, 0xFC}}
FOLDERID_Ringtones :: GUID {0xC870044B, 0xF49E, 0x4126, {0xA9, 0xC3, 0xB5, 0x2A, 0x1F, 0xF4, 0x11, 0xE8}}
FOLDERID_PublicPictures :: GUID {0xB6EBFB86, 0x6907, 0x413C, {0x9A, 0xF7, 0x4F, 0xC2, 0xAB, 0xF0, 0x7C, 0xC5}}
FOLDERID_PublicMusic :: GUID {0x3214FAB5, 0x9757, 0x4298, {0xBB, 0x61, 0x92, 0xA9, 0xDE, 0xAA, 0x44, 0xFF}}
FOLDERID_PublicVideos :: GUID {0x2400183A, 0x6185, 0x49FB, {0xA2, 0xD8, 0x4A, 0x39, 0x2A, 0x60, 0x2B, 0xA3}}
FOLDERID_PublicRingtones :: GUID {0xE555AB60, 0x153B, 0x4D17, {0x9F, 0x04, 0xA5, 0xFE, 0x99, 0xFC, 0x15, 0xEC}}
FOLDERID_ResourceDir :: GUID {0x8AD10C31, 0x2ADB, 0x4296, {0xA8, 0xF7, 0xE4, 0x70, 0x12, 0x32, 0xC9, 0x72}}
FOLDERID_LocalizedResourcesDir :: GUID {0x2A00375E, 0x224C, 0x49DE, {0xB8, 0xD1, 0x44, 0x0D, 0xF7, 0xEF, 0x3D, 0xDC}}
FOLDERID_CommonOEMLinks :: GUID {0xC1BAE2D0, 0x10DF, 0x4334, {0xBE, 0xDD, 0x7A, 0xA2, 0x0B, 0x22, 0x7A, 0x9D}}
FOLDERID_CDBurning :: GUID {0x9E52AB10, 0xF80D, 0x49DF, {0xAC, 0xB8, 0x43, 0x30, 0xF5, 0x68, 0x78, 0x55}}
FOLDERID_UserProfiles :: GUID {0x0762D272, 0xC50A, 0x4BB0, {0xA3, 0x82, 0x69, 0x7D, 0xCD, 0x72, 0x9B, 0x80}}
FOLDERID_Playlists :: GUID {0xDE92C1C7, 0x837F, 0x4F69, {0xA3, 0xBB, 0x86, 0xE6, 0x31, 0x20, 0x4A, 0x23}}
FOLDERID_SamplePlaylists :: GUID {0x15CA69B3, 0x30EE, 0x49C1, {0xAC, 0xE1, 0x6B, 0x5E, 0xC3, 0x72, 0xAF, 0xB5}}
FOLDERID_SampleMusic :: GUID {0xB250C668, 0xF57D, 0x4EE1, {0xA6, 0x3C, 0x29, 0x0E, 0xE7, 0xD1, 0xAA, 0x1F}}
FOLDERID_SamplePictures :: GUID {0xC4900540, 0x2379, 0x4C75, {0x84, 0x4B, 0x64, 0xE6, 0xFA, 0xF8, 0x71, 0x6B}}
FOLDERID_SampleVideos :: GUID {0x859EAD94, 0x2E85, 0x48AD, {0xA7, 0x1A, 0x09, 0x69, 0xCB, 0x56, 0xA6, 0xCD}}
FOLDERID_PhotoAlbums :: GUID {0x69D2CF90, 0xFC33, 0x4FB7, {0x9A, 0x0C, 0xEB, 0xB0, 0xF0, 0xFC, 0xB4, 0x3C}}
FOLDERID_Public :: GUID {0xDFDF76A2, 0xC82A, 0x4D63, {0x90, 0x6A, 0x56, 0x44, 0xAC, 0x45, 0x73, 0x85}}
FOLDERID_ChangeRemovePrograms :: GUID {0xdf7266ac, 0x9274, 0x4867, {0x8d, 0x55, 0x3b, 0xd6, 0x61, 0xde, 0x87, 0x2d}}
FOLDERID_AppUpdates :: GUID {0xa305ce99, 0xf527, 0x492b, {0x8b, 0x1a, 0x7e, 0x76, 0xfa, 0x98, 0xd6, 0xe4}}
FOLDERID_AddNewPrograms :: GUID {0xde61d971, 0x5ebc, 0x4f02, {0xa3, 0xa9, 0x6c, 0x82, 0x89, 0x5e, 0x5c, 0x04}}
FOLDERID_Downloads :: GUID {0x374de290, 0x123f, 0x4565, {0x91, 0x64, 0x39, 0xc4, 0x92, 0x5e, 0x46, 0x7b}}
FOLDERID_PublicDownloads :: GUID {0x3d644c9b, 0x1fb8, 0x4f30, {0x9b, 0x45, 0xf6, 0x70, 0x23, 0x5f, 0x79, 0xc0}}
FOLDERID_SavedSearches :: GUID {0x7d1d3a04, 0xdebb, 0x4115, {0x95, 0xcf, 0x2f, 0x29, 0xda, 0x29, 0x20, 0xda}}
FOLDERID_QuickLaunch :: GUID {0x52a4f021, 0x7b75, 0x48a9, {0x9f, 0x6b, 0x4b, 0x87, 0xa2, 0x10, 0xbc, 0x8f}}
FOLDERID_Contacts :: GUID {0x56784854, 0xc6cb, 0x462b, {0x81, 0x69, 0x88, 0xe3, 0x50, 0xac, 0xb8, 0x82}}
FOLDERID_SidebarParts :: GUID {0xa75d362e, 0x50fc, 0x4fb7, {0xac, 0x2c, 0xa8, 0xbe, 0xaa, 0x31, 0x44, 0x93}}
FOLDERID_SidebarDefaultParts :: GUID {0x7b396e54, 0x9ec5, 0x4300, {0xbe, 0xa , 0x24, 0x82, 0xeb, 0xae, 0x1a, 0x26}}
FOLDERID_PublicGameTasks :: GUID {0xdebf2536, 0xe1a8, 0x4c59, {0xb6, 0xa2, 0x41, 0x45, 0x86, 0x47, 0x6a, 0xea}}
FOLDERID_GameTasks :: GUID {0x54fae61 , 0x4dd8, 0x4787, {0x80, 0xb6, 0x9 , 0x2 , 0x20, 0xc4, 0xb7, 0x0 }}
FOLDERID_SavedGames :: GUID {0x4c5c32ff, 0xbb9d, 0x43b0, {0xb5, 0xb4, 0x2d, 0x72, 0xe5, 0x4e, 0xaa, 0xa4}}
FOLDERID_Games :: GUID {0xcac52c1a, 0xb53d, 0x4edc, {0x92, 0xd7, 0x6b, 0x2e, 0x8a, 0xc1, 0x94, 0x34}}
FOLDERID_SEARCH_MAPI :: GUID {0x98ec0e18, 0x2098, 0x4d44, {0x86, 0x44, 0x66, 0x97, 0x93, 0x15, 0xa2, 0x81}}
FOLDERID_SEARCH_CSC :: GUID {0xee32e446, 0x31ca, 0x4aba, {0x81, 0x4f, 0xa5, 0xeb, 0xd2, 0xfd, 0x6d, 0x5e}}
FOLDERID_Links :: GUID {0xbfb9d5e0, 0xc6a9, 0x404c, {0xb2, 0xb2, 0xae, 0x6d, 0xb6, 0xaf, 0x49, 0x68}}
FOLDERID_UsersFiles :: GUID {0xf3ce0f7c, 0x4901, 0x4acc, {0x86, 0x48, 0xd5, 0xd4, 0x4b, 0x04, 0xef, 0x8f}}
FOLDERID_UsersLibraries :: GUID {0xa302545d, 0xdeff, 0x464b, {0xab, 0xe8, 0x61, 0xc8, 0x64, 0x8d, 0x93, 0x9b}}
FOLDERID_SearchHome :: GUID {0x190337d1, 0xb8ca, 0x4121, {0xa6, 0x39, 0x6d, 0x47, 0x2d, 0x16, 0x97, 0x2a}}
FOLDERID_OriginalImages :: GUID {0x2C36C0AA, 0x5812, 0x4b87, {0xbf, 0xd0, 0x4c, 0xd0, 0xdf, 0xb1, 0x9b, 0x39}}
FOLDERID_DocumentsLibrary :: GUID {0x7b0db17d, 0x9cd2, 0x4a93, {0x97, 0x33, 0x46, 0xcc, 0x89, 0x02, 0x2e, 0x7c}}
FOLDERID_MusicLibrary :: GUID {0x2112ab0a, 0xc86a, 0x4ffe, {0xa3, 0x68, 0xd , 0xe9, 0x6e, 0x47, 0x1 , 0x2e}}
FOLDERID_PicturesLibrary :: GUID {0xa990ae9f, 0xa03b, 0x4e80, {0x94, 0xbc, 0x99, 0x12, 0xd7, 0x50, 0x41, 0x4 }}
FOLDERID_VideosLibrary :: GUID {0x491e922f, 0x5643, 0x4af4, {0xa7, 0xeb, 0x4e, 0x7a, 0x13, 0x8d, 0x81, 0x74}}
FOLDERID_RecordedTVLibrary :: GUID {0x1a6fdba2, 0xf42d, 0x4358, {0xa7, 0x98, 0xb7, 0x4d, 0x74, 0x59, 0x26, 0xc5}}
FOLDERID_HomeGroup :: GUID {0x52528a6b, 0xb9e3, 0x4add, {0xb6, 0xd , 0x58, 0x8c, 0x2d, 0xba, 0x84, 0x2d}}
FOLDERID_HomeGroupCurrentUser :: GUID {0x9b74b6a3, 0xdfd , 0x4f11, {0x9e, 0x78, 0x5f, 0x78, 0x0 , 0xf2, 0xe7, 0x72}}
FOLDERID_DeviceMetadataStore :: GUID {0x5ce4a5e9, 0xe4eb, 0x479d, {0xb8, 0x9f, 0x13, 0x0c, 0x02, 0x88, 0x61, 0x55}}
FOLDERID_Libraries :: GUID {0x1b3ea5dc, 0xb587, 0x4786, {0xb4, 0xef, 0xbd, 0x1d, 0xc3, 0x32, 0xae, 0xae}}
FOLDERID_PublicLibraries :: GUID {0x48daf80b, 0xe6cf, 0x4f4e, {0xb8, 0x00, 0x0e, 0x69, 0xd8, 0x4e, 0xe3, 0x84}}
FOLDERID_UserPinned :: GUID {0x9e3995ab, 0x1f9c, 0x4f13, {0xb8, 0x27, 0x48, 0xb2, 0x4b, 0x6c, 0x71, 0x74}}
FOLDERID_ImplicitAppShortcuts :: GUID {0xbcb5256f, 0x79f6, 0x4cee, {0xb7, 0x25, 0xdc, 0x34, 0xe4, 0x2 , 0xfd, 0x46}}
FOLDERID_AccountPictures :: GUID {0x008ca0b1, 0x55b4, 0x4c56, {0xb8, 0xa8, 0x4d, 0xe4, 0xb2, 0x99, 0xd3, 0xbe}}
FOLDERID_PublicUserTiles :: GUID {0x0482af6c, 0x08f1, 0x4c34, {0x8c, 0x90, 0xe1, 0x7e, 0xc9, 0x8b, 0x1e, 0x17}}
FOLDERID_AppsFolder :: GUID {0x1e87508d, 0x89c2, 0x42f0, {0x8a, 0x7e, 0x64, 0x5a, 0x0f, 0x50, 0xca, 0x58}}
FOLDERID_StartMenuAllPrograms :: GUID {0xf26305ef, 0x6948, 0x40b9, {0xb2, 0x55, 0x81, 0x45, 0x3d, 0x9 , 0xc7, 0x85}}
FOLDERID_CommonStartMenuPlaces :: GUID {0xa440879f, 0x87a0, 0x4f7d, {0xb7, 0x0 , 0x2 , 0x7 , 0xb9, 0x66, 0x19, 0x4a}}
FOLDERID_ApplicationShortcuts :: GUID {0xa3918781, 0xe5f2, 0x4890, {0xb3, 0xd9, 0xa7, 0xe5, 0x43, 0x32, 0x32, 0x8c}}
FOLDERID_RoamingTiles :: GUID {0xbcfc5a , 0xed94, 0x4e48, {0x96, 0xa1, 0x3f, 0x62, 0x17, 0xf2, 0x19, 0x90}}
FOLDERID_RoamedTileImages :: GUID {0xaaa8d5a5, 0xf1d6, 0x4259, {0xba, 0xa8, 0x78, 0xe7, 0xef, 0x60, 0x83, 0x5e}}
FOLDERID_Screenshots :: GUID {0xb7bede81, 0xdf94, 0x4682, {0xa7, 0xd8, 0x57, 0xa5, 0x26, 0x20, 0xb8, 0x6f}}
FOLDERID_CameraRoll :: GUID {0xab5fb87b, 0x7ce2, 0x4f83, {0x91, 0x5d, 0x55, 0x8 , 0x46, 0xc9, 0x53, 0x7b}}
FOLDERID_SkyDrive :: GUID {0xa52bba46, 0xe9e1, 0x435f, {0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}}
FOLDERID_OneDrive :: GUID {0xa52bba46, 0xe9e1, 0x435f, {0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}}
FOLDERID_SkyDriveDocuments :: GUID {0x24d89e24, 0x2f19, 0x4534, {0x9d, 0xde, 0x6a, 0x66, 0x71, 0xfb, 0xb8, 0xfe}}
FOLDERID_SkyDrivePictures :: GUID {0x339719b5, 0x8c47, 0x4894, {0x94, 0xc2, 0xd8, 0xf7, 0x7a, 0xdd, 0x44, 0xa6}}
FOLDERID_SkyDriveMusic :: GUID {0xc3f2459e, 0x80d6, 0x45dc, {0xbf, 0xef, 0x1f, 0x76, 0x9f, 0x2b, 0xe7, 0x30}}
FOLDERID_SkyDriveCameraRoll :: GUID {0x767e6811, 0x49cb, 0x4273, {0x87, 0xc2, 0x20, 0xf3, 0x55, 0xe1, 0x08, 0x5b}}
FOLDERID_SearchHistory :: GUID {0x0d4c3db6, 0x03a3, 0x462f, {0xa0, 0xe6, 0x08, 0x92, 0x4c, 0x41, 0xb5, 0xd4}}
FOLDERID_SearchTemplates :: GUID {0x7e636bfe, 0xdfa9, 0x4d5e, {0xb4, 0x56, 0xd7, 0xb3, 0x98, 0x51, 0xd8, 0xa9}}
FOLDERID_CameraRollLibrary :: GUID {0x2b20df75, 0x1eda, 0x4039, {0x80, 0x97, 0x38, 0x79, 0x82, 0x27, 0xd5, 0xb7}}
FOLDERID_SavedPictures :: GUID {0x3b193882, 0xd3ad, 0x4eab, {0x96, 0x5a, 0x69, 0x82, 0x9d, 0x1f, 0xb5, 0x9f}}
FOLDERID_SavedPicturesLibrary :: GUID {0xe25b5812, 0xbe88, 0x4bd9, {0x94, 0xb0, 0x29, 0x23, 0x34, 0x77, 0xb6, 0xc3}}
FOLDERID_RetailDemo :: GUID {0x12d4c69e, 0x24ad, 0x4923, {0xbe, 0x19, 0x31, 0x32, 0x1c, 0x43, 0xa7, 0x67}}
FOLDERID_Device :: GUID {0x1C2AC1DC, 0x4358, 0x4B6C, {0x97, 0x33, 0xAF, 0x21, 0x15, 0x65, 0x76, 0xF0}}
FOLDERID_DevelopmentFiles :: GUID {0xdbe8e08e, 0x3053, 0x4bbc, {0xb1, 0x83, 0x2a, 0x7b, 0x2b, 0x19, 0x1e, 0x59}}
FOLDERID_Objects3D :: GUID {0x31c0dd25, 0x9439, 0x4f12, {0xbf, 0x41, 0x7f, 0xf4, 0xed, 0xa3, 0x87, 0x22}}
FOLDERID_AppCaptures :: GUID {0xedc0fe71, 0x98d8, 0x4f4a, {0xb9, 0x20, 0xc8, 0xdc, 0x13, 0x3c, 0xb1, 0x65}}
FOLDERID_LocalDocuments :: GUID {0xf42ee2d3, 0x909f, 0x4907, {0x88, 0x71, 0x4c, 0x22, 0xfc, 0x0b, 0xf7, 0x56}}
FOLDERID_LocalPictures :: GUID {0x0ddd015d, 0xb06c, 0x45d5, {0x8c, 0x4c, 0xf5, 0x97, 0x13, 0x85, 0x46, 0x39}}
FOLDERID_LocalVideos :: GUID {0x35286a68, 0x3c57, 0x41a1, {0xbb, 0xb1, 0x0e, 0xae, 0x73, 0xd7, 0x6c, 0x95}}
FOLDERID_LocalMusic :: GUID {0xa0c69a99, 0x21c8, 0x4671, {0x87, 0x03, 0x79, 0x34, 0x16, 0x2f, 0xcf, 0x1d}}
FOLDERID_LocalDownloads :: GUID {0x7d83ee9b, 0x2244, 0x4e70, {0xb1, 0xf5, 0x53, 0x93, 0x04, 0x2a, 0xf1, 0xe4}}
FOLDERID_RecordedCalls :: GUID {0x2f8b40c2, 0x83ed, 0x48ee, {0xb3, 0x83, 0xa1, 0xf1, 0x57, 0xec, 0x6f, 0x9a}}
FOLDERID_AllAppMods :: GUID {0x7ad67899, 0x66af, 0x43ba, {0x91, 0x56, 0x6a, 0xad, 0x42, 0xe6, 0xc5, 0x96}}
FOLDERID_CurrentAppMods :: GUID {0x3db40b20, 0x2a30, 0x4dbe, {0x91, 0x7e, 0x77, 0x1d, 0xd2, 0x1d, 0xd0, 0x99}}
FOLDERID_AppDataDesktop :: GUID {0xb2c5e279, 0x7add, 0x439f, {0xb2, 0x8c, 0xc4, 0x1f, 0xe1, 0xbb, 0xf6, 0x72}}
FOLDERID_AppDataDocuments :: GUID {0x7be16610, 0x1f7f, 0x44ac, {0xbf, 0xf0, 0x83, 0xe1, 0x5f, 0x2f, 0xfc, 0xa1}}
FOLDERID_AppDataFavorites :: GUID {0x7cfbefbc, 0xde1f, 0x45aa, {0xb8, 0x43, 0xa5, 0x42, 0xac, 0x53, 0x6c, 0xc9}}
FOLDERID_AppDataProgramData :: GUID {0x559d40a3, 0xa036, 0x40fa, {0xaf, 0x61, 0x84, 0xcb, 0x43, 0xa , 0x4d, 0x34}}
FOLDERID_LocalStorage :: GUID {0xB3EB08D3, 0xA1F3, 0x496B, {0x86, 0x5A, 0x42, 0xB5, 0x36, 0xCD, 0xA0, 0xEC}}

View File

@@ -22,9 +22,13 @@ foreign shell32 {
) -> c_int ---
SHFileOperationW :: proc(lpFileOp: LPSHFILEOPSTRUCTW) -> c_int ---
SHGetFolderPathW :: proc(hwnd: HWND, csidl: c_int, hToken: HANDLE, dwFlags: DWORD, pszPath: LPWSTR) -> HRESULT ---
SHAppBarMessage :: proc(dwMessage: DWORD, pData: PAPPBARDATA) -> UINT_PTR ---
SHAppBarMessage :: proc(dwMessage: DWORD, pData: PAPPBARDATA) -> UINT_PTR ---
Shell_NotifyIconW :: proc(dwMessage: DWORD, lpData: ^NOTIFYICONDATAW) -> BOOL ---
SHGetKnownFolderIDList :: proc(rfid: REFKNOWNFOLDERID, dwFlags: /* KNOWN_FOLDER_FLAG */ DWORD, hToken: HANDLE, ppidl: rawptr) -> HRESULT ---
SHSetKnownFolderPath :: proc(rfid: REFKNOWNFOLDERID, dwFlags: /* KNOWN_FOLDER_FLAG */ DWORD, hToken: HANDLE, pszPath: PCWSTR ) -> HRESULT ---
SHGetKnownFolderPath :: proc(rfid: REFKNOWNFOLDERID, dwFlags: /* KNOWN_FOLDER_FLAG */ DWORD, hToken: HANDLE, ppszPath: ^LPWSTR) -> HRESULT ---
}
APPBARDATA :: struct {
@@ -36,16 +40,16 @@ APPBARDATA :: struct {
lParam: LPARAM,
}
PAPPBARDATA :: ^APPBARDATA
ABM_NEW :: 0x00000000
ABM_REMOVE :: 0x00000001
ABM_QUERYPOS :: 0x00000002
ABM_SETPOS :: 0x00000003
ABM_GETSTATE :: 0x00000004
ABM_GETTASKBARPOS :: 0x00000005
ABM_ACTIVATE :: 0x00000006
ABM_ACTIVATE :: 0x00000006
ABM_GETAUTOHIDEBAR :: 0x00000007
ABM_SETAUTOHIDEBAR :: 0x00000008
ABM_SETAUTOHIDEBAR :: 0x00000008
ABM_WINDOWPOSCHANGED :: 0x0000009
ABM_SETSTATE :: 0x0000000a
ABN_STATECHANGE :: 0x0000000
@@ -58,3 +62,32 @@ ABE_LEFT :: 0
ABE_TOP :: 1
ABE_RIGHT :: 2
ABE_BOTTOM :: 3
KNOWNFOLDERID :: GUID
REFKNOWNFOLDERID :: ^KNOWNFOLDERID
KNOWN_FOLDER_FLAG :: enum u32 {
DEFAULT = 0x00000000,
// if NTDDI_VERSION >= NTDDI_WIN10_RS3
FORCE_APP_DATA_REDIRECTION = 0x00080000,
// if NTDDI_VERSION >= NTDDI_WIN10_RS2
RETURN_FILTER_REDIRECTION_TARGET = 0x00040000,
FORCE_PACKAGE_REDIRECTION = 0x00020000,
NO_PACKAGE_REDIRECTION = 0x00010000,
FORCE_APPCONTAINER_REDIRECTION = 0x00020000,
// if NTDDI_VERSION >= NTDDI_WIN7
NO_APPCONTAINER_REDIRECTION = 0x00010000,
CREATE = 0x00008000,
DONT_VERIFY = 0x00004000,
DONT_UNEXPAND = 0x00002000,
NO_ALIAS = 0x00001000,
INIT = 0x00000800,
DEFAULT_PATH = 0x00000400,
NOT_PARENT_RELATIVE = 0x00000200,
SIMPLE_IDLIST = 0x00000100,
ALIAS_ONLY = 0x80000000,
}

View File

@@ -86,6 +86,8 @@ INT16 :: i16
INT32 :: i32
INT64 :: i64
ULONG32 :: u32
LONG32 :: i32
ULONG64 :: u64
LONG64 :: i64
@@ -1969,6 +1971,16 @@ BITMAPINFO :: struct {
bmiColors: [1]RGBQUAD,
}
BITMAP :: struct {
bmType: LONG,
bmWidth: LONG,
bmHeight: LONG,
bmWidthBytes: LONG,
bmPlanes: WORD,
bmBitsPixel: WORD,
bmBits: LPVOID,
}
// pixel types
PFD_TYPE_RGBA :: 0
PFD_TYPE_COLORINDEX :: 1

View File

@@ -136,6 +136,7 @@ foreign user32 {
GetKeyboardState :: proc(lpKeyState: PBYTE) -> BOOL ---
MapVirtualKeyW :: proc(uCode: UINT, uMapType: UINT) -> UINT ---
ToUnicode :: proc(nVirtKey: UINT, wScanCode: UINT, lpKeyState: ^BYTE, pwszBuff: LPWSTR, cchBuff: c_int, wFlags: UINT) -> c_int ---
SetWindowsHookExW :: proc(idHook: c_int, lpfn: HOOKPROC, hmod: HINSTANCE, dwThreadId: DWORD) -> HHOOK ---
UnhookWindowsHookEx :: proc(hhk: HHOOK) -> BOOL ---
@@ -160,6 +161,8 @@ foreign user32 {
MonitorFromRect :: proc(lprc: LPRECT, dwFlags: Monitor_From_Flags) -> HMONITOR ---
MonitorFromWindow :: proc(hwnd: HWND, dwFlags: Monitor_From_Flags) -> HMONITOR ---
EnumDisplayMonitors :: proc(hdc: HDC, lprcClip: LPRECT, lpfnEnum: Monitor_Enum_Proc, dwData: LPARAM) -> BOOL ---
EnumWindows :: proc(lpEnumFunc: Window_Enum_Proc, lParam: LPARAM) -> BOOL ---
SetThreadDpiAwarenessContext :: proc(dpiContext: DPI_AWARENESS_CONTEXT) -> DPI_AWARENESS_CONTEXT ---
GetThreadDpiAwarenessContext :: proc() -> DPI_AWARENESS_CONTEXT ---
@@ -310,6 +313,7 @@ Monitor_From_Flags :: enum DWORD {
}
Monitor_Enum_Proc :: #type proc "stdcall" (HMONITOR, HDC, LPRECT, LPARAM) -> BOOL
Window_Enum_Proc :: #type proc "stdcall" (HWND, LPARAM) -> BOOL
USER_DEFAULT_SCREEN_DPI :: 96
DPI_AWARENESS_CONTEXT :: distinct HANDLE

View File

@@ -63,7 +63,9 @@ Translation :: enum u32 {
Soft_Line_End,
}
// init the state to some timeout and set the respective allocators
// - undo_state_allocator dictates the dynamic undo|redo arrays allocators
// - undo_text_allocator is the allocator which allocates strings only
init :: proc(s: ^State, undo_text_allocator, undo_state_allocator: runtime.Allocator, undo_timeout := DEFAULT_UNDO_TIMEOUT) {
s.undo_timeout = undo_timeout
@@ -74,6 +76,7 @@ init :: proc(s: ^State, undo_text_allocator, undo_state_allocator: runtime.Alloc
s.redo.allocator = undo_state_allocator
}
// clear undo|redo strings and delete their stacks
destroy :: proc(s: ^State) {
undo_clear(s, &s.undo)
undo_clear(s, &s.redo)
@@ -82,7 +85,6 @@ destroy :: proc(s: ^State) {
s.builder = nil
}
// Call at the beginning of each frame
begin :: proc(s: ^State, id: u64, builder: ^strings.Builder) {
assert(builder != nil)
@@ -92,11 +94,7 @@ begin :: proc(s: ^State, id: u64, builder: ^strings.Builder) {
s.id = id
s.selection = {len(builder.buf), 0}
s.builder = builder
s.current_time = time.tick_now()
if s.undo_timeout <= 0 {
s.undo_timeout = DEFAULT_UNDO_TIMEOUT
}
set_text(s, string(s.builder.buf[:]))
update_time(s)
undo_clear(s, &s.undo)
undo_clear(s, &s.redo)
}
@@ -107,12 +105,37 @@ end :: proc(s: ^State) {
s.builder = nil
}
set_text :: proc(s: ^State, text: string) {
strings.builder_reset(s.builder)
strings.write_string(s.builder, text)
// update current time so "insert" can check for timeouts
update_time :: proc(s: ^State) {
s.current_time = time.tick_now()
if s.undo_timeout <= 0 {
s.undo_timeout = DEFAULT_UNDO_TIMEOUT
}
}
// setup the builder, selection and undo|redo state once allowing to retain selection
setup_once :: proc(s: ^State, builder: ^strings.Builder) {
s.builder = builder
s.selection = { len(builder.buf), 0 }
undo_clear(s, &s.undo)
undo_clear(s, &s.redo)
}
// returns true when the builder had content to be cleared
// clear builder&selection and the undo|redo stacks
clear_all :: proc(s: ^State) -> (cleared: bool) {
if s.builder != nil && len(s.builder.buf) > 0 {
clear(&s.builder.buf)
s.selection = {}
cleared = true
}
undo_clear(s, &s.undo)
undo_clear(s, &s.redo)
return
}
// push current text state to the wanted undo|redo stack
undo_state_push :: proc(s: ^State, undo: ^[dynamic]^Undo_State) -> mem.Allocator_Error {
text := string(s.builder.buf[:])
item := (^Undo_State)(mem.alloc(size_of(Undo_State) + len(text), align_of(Undo_State), s.undo_text_allocator) or_return)
@@ -125,18 +148,21 @@ undo_state_push :: proc(s: ^State, undo: ^[dynamic]^Undo_State) -> mem.Allocator
return nil
}
// pop undo|redo state - push to redo|undo - set selection & text
undo :: proc(s: ^State, undo, redo: ^[dynamic]^Undo_State) {
if len(undo) > 0 {
undo_state_push(s, redo)
item := pop(undo)
s.selection = item.selection
#no_bounds_check {
set_text(s, string(item.text[:item.len]))
strings.builder_reset(s.builder)
strings.write_string(s.builder, string(item.text[:item.len]))
}
free(item, s.undo_text_allocator)
}
}
// iteratively clearn the undo|redo stack and free each allocated text state
undo_clear :: proc(s: ^State, undo: ^[dynamic]^Undo_State) {
for len(undo) > 0 {
item := pop(undo)
@@ -144,6 +170,7 @@ undo_clear :: proc(s: ^State, undo: ^[dynamic]^Undo_State) {
}
}
// clear redo stack and check if the undo timeout gets hit
undo_check :: proc(s: ^State) {
undo_clear(s, &s.redo)
if time.tick_diff(s.last_edit_time, s.current_time) > s.undo_timeout {
@@ -152,8 +179,7 @@ undo_check :: proc(s: ^State) {
s.last_edit_time = s.current_time
}
// insert text into the edit state - deletes the current selection
input_text :: proc(s: ^State, text: string) {
if len(text) == 0 {
return
@@ -166,6 +192,7 @@ input_text :: proc(s: ^State, text: string) {
s.selection = {offset, offset}
}
// insert slice of runes into the edit state - deletes the current selection
input_runes :: proc(s: ^State, text: []rune) {
if len(text) == 0 {
return
@@ -182,43 +209,55 @@ input_runes :: proc(s: ^State, text: []rune) {
s.selection = {offset, offset}
}
// insert a single rune into the edit state - deletes the current selection
input_rune :: proc(s: ^State, r: rune) {
if has_selection(s) {
selection_delete(s)
}
offset := s.selection[0]
b, w := utf8.encode_rune(r)
insert(s, offset, string(b[:w]))
offset += w
s.selection = {offset, offset}
}
// insert a single rune into the edit state - deletes the current selection
insert :: proc(s: ^State, at: int, text: string) {
undo_check(s)
inject_at(&s.builder.buf, at, text)
}
// remove the wanted range withing, usually the selection within byte indices
remove :: proc(s: ^State, lo, hi: int) {
undo_check(s)
remove_range(&s.builder.buf, lo, hi)
}
// true if selection head and tail dont match and form a selection of multiple characters
has_selection :: proc(s: ^State) -> bool {
return s.selection[0] != s.selection[1]
}
// return the clamped lo/hi of the current selection
// since the selection[0] moves around and could be ahead of selection[1]
// useful when rendering and needing left->right
sorted_selection :: proc(s: ^State) -> (lo, hi: int) {
lo = min(s.selection[0], s.selection[1])
hi = max(s.selection[0], s.selection[1])
lo = clamp(lo, 0, len(s.builder.buf))
hi = clamp(hi, 0, len(s.builder.buf))
s.selection[0] = lo
s.selection[1] = hi
return
}
// delete the current selection range and set the proper selection afterwards
selection_delete :: proc(s: ^State) {
lo, hi := sorted_selection(s)
remove(s, lo, hi)
s.selection = {lo, lo}
}
translate_position :: proc(s: ^State, pos: int, t: Translation) -> int {
// translates the caret position
translate_position :: proc(s: ^State, t: Translation) -> int {
is_continuation_byte :: proc(b: byte) -> bool {
return b >= 0x80 && b < 0xc0
}
@@ -227,9 +266,7 @@ translate_position :: proc(s: ^State, pos: int, t: Translation) -> int {
}
buf := s.builder.buf[:]
pos := pos
pos = clamp(pos, 0, len(buf))
pos := clamp(s.selection[0], 0, len(buf))
switch t {
case .Start:
@@ -280,6 +317,7 @@ translate_position :: proc(s: ^State, pos: int, t: Translation) -> int {
return clamp(pos, 0, len(buf))
}
// Moves the position of the caret (both sides of the selection)
move_to :: proc(s: ^State, t: Translation) {
if t == .Left && has_selection(s) {
lo, _ := sorted_selection(s)
@@ -288,32 +326,36 @@ move_to :: proc(s: ^State, t: Translation) {
_, hi := sorted_selection(s)
s.selection = {hi, hi}
} else {
pos := translate_position(s, s.selection[0], t)
pos := translate_position(s, t)
s.selection = {pos, pos}
}
}
// Moves only the head of the selection and leaves the tail uneffected
select_to :: proc(s: ^State, t: Translation) {
s.selection[0] = translate_position(s, s.selection[0], t)
s.selection[0] = translate_position(s, t)
}
// Deletes everything between the caret and resultant position
delete_to :: proc(s: ^State, t: Translation) {
if has_selection(s) {
selection_delete(s)
} else {
lo := s.selection[0]
hi := translate_position(s, lo, t)
hi := translate_position(s, t)
lo, hi = min(lo, hi), max(lo, hi)
remove(s, lo, hi)
s.selection = {lo, lo}
}
}
// return the currently selected text
current_selected_text :: proc(s: ^State) -> string {
lo, hi := sorted_selection(s)
return string(s.builder.buf[lo:hi])
}
// copy & delete the current selection when copy() succeeds
cut :: proc(s: ^State) -> bool {
if copy(s) {
selection_delete(s)
@@ -322,6 +364,8 @@ cut :: proc(s: ^State) -> bool {
return false
}
// try and copy the currently selected text to the clipboard
// State.set_clipboard needs to be assigned
copy :: proc(s: ^State) -> bool {
if s.set_clipboard != nil {
return s.set_clipboard(s.clipboard_user_data, current_selected_text(s))
@@ -329,6 +373,8 @@ copy :: proc(s: ^State) -> bool {
return s.set_clipboard != nil
}
// reinsert whatever the get_clipboard would return
// State.get_clipboard needs to be assigned
paste :: proc(s: ^State) -> bool {
if s.get_clipboard != nil {
input_text(s, s.get_clipboard(s.clipboard_user_data) or_return)

View File

@@ -116,26 +116,21 @@ run_with_data :: proc(data: rawptr, fn: proc(data: rawptr), init_context: Maybe(
}
run_with_poly_data :: proc(data: $T, fn: proc(data: T), init_context: Maybe(runtime.Context) = nil, priority := Thread_Priority.Normal)
where size_of(T) <= size_of(rawptr) {
where size_of(T) <= size_of(rawptr) * MAX_USER_ARGUMENTS {
create_and_start_with_poly_data(data, fn, init_context, priority, true)
}
run_with_poly_data2 :: proc(arg1: $T1, arg2: $T2, fn: proc(T1, T2), init_context: Maybe(runtime.Context) = nil, priority := Thread_Priority.Normal)
where size_of(T1) <= size_of(rawptr),
size_of(T2) <= size_of(rawptr) {
where size_of(T1) + size_of(T2) <= size_of(rawptr) * MAX_USER_ARGUMENTS {
create_and_start_with_poly_data2(arg1, arg2, fn, init_context, priority, true)
}
run_with_poly_data3 :: proc(arg1: $T1, arg2: $T2, arg3: $T3, fn: proc(arg1: T1, arg2: T2, arg3: T3), init_context: Maybe(runtime.Context) = nil, priority := Thread_Priority.Normal)
where size_of(T1) <= size_of(rawptr),
size_of(T2) <= size_of(rawptr),
size_of(T3) <= size_of(rawptr) {
where size_of(T1) + size_of(T2) + size_of(T3) <= size_of(rawptr) * MAX_USER_ARGUMENTS {
create_and_start_with_poly_data3(arg1, arg2, arg3, fn, init_context, priority, true)
}
run_with_poly_data4 :: proc(arg1: $T1, arg2: $T2, arg3: $T3, arg4: $T4, fn: proc(arg1: T1, arg2: T2, arg3: T3, arg4: T4), init_context: Maybe(runtime.Context) = nil, priority := Thread_Priority.Normal)
where size_of(T1) <= size_of(rawptr),
size_of(T2) <= size_of(rawptr),
size_of(T3) <= size_of(rawptr) {
where size_of(T1) + size_of(T2) + size_of(T3) + size_of(T4) <= size_of(rawptr) * MAX_USER_ARGUMENTS {
create_and_start_with_poly_data4(arg1, arg2, arg3, arg4, fn, init_context, priority, true)
}
@@ -147,7 +142,9 @@ create_and_start :: proc(fn: proc(), init_context: Maybe(runtime.Context) = nil,
}
t := create(thread_proc, priority)
t.data = rawptr(fn)
if self_cleanup do t.flags += {.Self_Cleanup}
if self_cleanup {
t.flags += {.Self_Cleanup}
}
t.init_context = init_context
start(t)
return t
@@ -167,14 +164,16 @@ create_and_start_with_data :: proc(data: rawptr, fn: proc(data: rawptr), init_co
t.data = rawptr(fn)
t.user_index = 1
t.user_args = data
if self_cleanup do t.flags += {.Self_Cleanup}
if self_cleanup {
t.flags += {.Self_Cleanup}
}
t.init_context = init_context
start(t)
return t
}
create_and_start_with_poly_data :: proc(data: $T, fn: proc(data: T), init_context: Maybe(runtime.Context) = nil, priority := Thread_Priority.Normal, self_cleanup := false) -> ^Thread
where size_of(T) <= size_of(rawptr) {
where size_of(T) <= size_of(rawptr) * MAX_USER_ARGUMENTS {
thread_proc :: proc(t: ^Thread) {
fn := cast(proc(T))t.data
assert(t.user_index >= 1)
@@ -184,88 +183,118 @@ create_and_start_with_poly_data :: proc(data: $T, fn: proc(data: T), init_contex
t := create(thread_proc, priority)
t.data = rawptr(fn)
t.user_index = 1
data := data
mem.copy(&t.user_args[0], &data, size_of(data))
if self_cleanup do t.flags += {.Self_Cleanup}
mem.copy(&t.user_args[0], &data, size_of(T))
if self_cleanup {
t.flags += {.Self_Cleanup}
}
t.init_context = init_context
start(t)
return t
}
create_and_start_with_poly_data2 :: proc(arg1: $T1, arg2: $T2, fn: proc(T1, T2), init_context: Maybe(runtime.Context) = nil, priority := Thread_Priority.Normal, self_cleanup := false) -> ^Thread
where size_of(T1) <= size_of(rawptr),
size_of(T2) <= size_of(rawptr) {
where size_of(T1) + size_of(T2) <= size_of(rawptr) * MAX_USER_ARGUMENTS {
thread_proc :: proc(t: ^Thread) {
fn := cast(proc(T1, T2))t.data
assert(t.user_index >= 2)
arg1 := (^T1)(&t.user_args[0])^
arg2 := (^T2)(&t.user_args[1])^
user_args := mem.slice_to_bytes(t.user_args[:])
arg1 := (^T1)(raw_data(user_args))^
arg2 := (^T2)(raw_data(user_args[size_of(T1):]))^
fn(arg1, arg2)
}
t := create(thread_proc, priority)
t.data = rawptr(fn)
t.user_index = 2
arg1, arg2 := arg1, arg2
mem.copy(&t.user_args[0], &arg1, size_of(arg1))
mem.copy(&t.user_args[1], &arg2, size_of(arg2))
if self_cleanup do t.flags += {.Self_Cleanup}
user_args := mem.slice_to_bytes(t.user_args[:])
n := copy(user_args, mem.ptr_to_bytes(&arg1))
_ = copy(user_args[n:], mem.ptr_to_bytes(&arg2))
if self_cleanup {
t.flags += {.Self_Cleanup}
}
t.init_context = init_context
start(t)
return t
}
create_and_start_with_poly_data3 :: proc(arg1: $T1, arg2: $T2, arg3: $T3, fn: proc(arg1: T1, arg2: T2, arg3: T3), init_context: Maybe(runtime.Context) = nil, priority := Thread_Priority.Normal, self_cleanup := false) -> ^Thread
where size_of(T1) <= size_of(rawptr),
size_of(T2) <= size_of(rawptr),
size_of(T3) <= size_of(rawptr) {
where size_of(T1) + size_of(T2) + size_of(T3) <= size_of(rawptr) * MAX_USER_ARGUMENTS {
thread_proc :: proc(t: ^Thread) {
fn := cast(proc(T1, T2, T3))t.data
assert(t.user_index >= 3)
arg1 := (^T1)(&t.user_args[0])^
arg2 := (^T2)(&t.user_args[1])^
arg3 := (^T3)(&t.user_args[2])^
user_args := mem.slice_to_bytes(t.user_args[:])
arg1 := (^T1)(raw_data(user_args))^
arg2 := (^T2)(raw_data(user_args[size_of(T1):]))^
arg3 := (^T3)(raw_data(user_args[size_of(T1) + size_of(T2):]))^
fn(arg1, arg2, arg3)
}
t := create(thread_proc, priority)
t.data = rawptr(fn)
t.user_index = 3
arg1, arg2, arg3 := arg1, arg2, arg3
mem.copy(&t.user_args[0], &arg1, size_of(arg1))
mem.copy(&t.user_args[1], &arg2, size_of(arg2))
mem.copy(&t.user_args[2], &arg3, size_of(arg3))
if self_cleanup do t.flags += {.Self_Cleanup}
user_args := mem.slice_to_bytes(t.user_args[:])
n := copy(user_args, mem.ptr_to_bytes(&arg1))
n += copy(user_args[n:], mem.ptr_to_bytes(&arg2))
_ = copy(user_args[n:], mem.ptr_to_bytes(&arg3))
if self_cleanup {
t.flags += {.Self_Cleanup}
}
t.init_context = init_context
start(t)
return t
}
create_and_start_with_poly_data4 :: proc(arg1: $T1, arg2: $T2, arg3: $T3, arg4: $T4, fn: proc(arg1: T1, arg2: T2, arg3: T3, arg4: T4), init_context: Maybe(runtime.Context) = nil, priority := Thread_Priority.Normal, self_cleanup := false) -> ^Thread
where size_of(T1) <= size_of(rawptr),
size_of(T2) <= size_of(rawptr),
size_of(T3) <= size_of(rawptr) {
where size_of(T1) + size_of(T2) + size_of(T3) + size_of(T4) <= size_of(rawptr) * MAX_USER_ARGUMENTS {
thread_proc :: proc(t: ^Thread) {
fn := cast(proc(T1, T2, T3, T4))t.data
assert(t.user_index >= 4)
arg1 := (^T1)(&t.user_args[0])^
arg2 := (^T2)(&t.user_args[1])^
arg3 := (^T3)(&t.user_args[2])^
arg4 := (^T4)(&t.user_args[3])^
user_args := mem.slice_to_bytes(t.user_args[:])
arg1 := (^T1)(raw_data(user_args))^
arg2 := (^T2)(raw_data(user_args[size_of(T1):]))^
arg3 := (^T3)(raw_data(user_args[size_of(T1) + size_of(T2):]))^
arg4 := (^T4)(raw_data(user_args[size_of(T1) + size_of(T2) + size_of(T3):]))^
fn(arg1, arg2, arg3, arg4)
}
t := create(thread_proc, priority)
t.data = rawptr(fn)
t.user_index = 4
arg1, arg2, arg3, arg4 := arg1, arg2, arg3, arg4
mem.copy(&t.user_args[0], &arg1, size_of(arg1))
mem.copy(&t.user_args[1], &arg2, size_of(arg2))
mem.copy(&t.user_args[2], &arg3, size_of(arg3))
mem.copy(&t.user_args[3], &arg4, size_of(arg4))
if self_cleanup do t.flags += {.Self_Cleanup}
user_args := mem.slice_to_bytes(t.user_args[:])
n := copy(user_args, mem.ptr_to_bytes(&arg1))
n += copy(user_args[n:], mem.ptr_to_bytes(&arg2))
n += copy(user_args[n:], mem.ptr_to_bytes(&arg3))
_ = copy(user_args[n:], mem.ptr_to_bytes(&arg4))
if self_cleanup {
t.flags += {.Self_Cleanup}
}
t.init_context = init_context
start(t)
return t
}
_select_context_for_thread :: proc(init_context: Maybe(runtime.Context)) -> runtime.Context {
ctx, ok := init_context.?
if !ok {

View File

@@ -23,31 +23,18 @@ import list "core:container/intrusive/list"
import topological_sort "core:container/topological_sort"
import crypto "core:crypto"
import blake "core:crypto/blake"
import blake2b "core:crypto/blake2b"
import blake2s "core:crypto/blake2s"
import chacha20 "core:crypto/chacha20"
import chacha20poly1305 "core:crypto/chacha20poly1305"
import gost "core:crypto/gost"
import groestl "core:crypto/groestl"
import haval "core:crypto/haval"
import jh "core:crypto/jh"
import keccak "core:crypto/keccak"
import md2 "core:crypto/md2"
import md4 "core:crypto/md4"
import md5 "core:crypto/md5"
import keccak "core:crypto/legacy/keccak"
import md5 "core:crypto/legacy/md5"
import sha1 "core:crypto/legacy/sha1"
import poly1305 "core:crypto/poly1305"
import ripemd "core:crypto/ripemd"
import sha1 "core:crypto/sha1"
import sha2 "core:crypto/sha2"
import sha3 "core:crypto/sha3"
import shake "core:crypto/shake"
import sm3 "core:crypto/sm3"
import streebog "core:crypto/streebog"
import tiger "core:crypto/tiger"
import tiger2 "core:crypto/tiger2"
import crypto_util "core:crypto/util"
import whirlpool "core:crypto/whirlpool"
import x25519 "core:crypto/x25519"
import pe "core:debug/pe"
@@ -150,31 +137,18 @@ _ :: lru
_ :: list
_ :: topological_sort
_ :: crypto
_ :: blake
_ :: blake2b
_ :: blake2s
_ :: chacha20
_ :: chacha20poly1305
_ :: gost
_ :: groestl
_ :: haval
_ :: jh
_ :: keccak
_ :: md2
_ :: md4
_ :: md5
_ :: poly1305
_ :: ripemd
_ :: sha1
_ :: sha2
_ :: sha3
_ :: shake
_ :: sm3
_ :: streebog
_ :: tiger
_ :: tiger2
_ :: crypto_util
_ :: whirlpool
_ :: x25519
_ :: pe
_ :: dynlib

View File

@@ -2,21 +2,14 @@ package all
import botan_bindings "vendor:botan/bindings"
import botan_blake2b "vendor:botan/blake2b"
import gost "vendor:botan/gost"
import keccak "vendor:botan/keccak"
import md4 "vendor:botan/md4"
import md5 "vendor:botan/md5"
import ripemd "vendor:botan/ripemd"
import sha1 "vendor:botan/sha1"
import keccak "vendor:botan/legacy/keccak"
import md5 "vendor:botan/legacy/md5"
import sha1 "vendor:botan/legacy/sha1"
import sha2 "vendor:botan/sha2"
import sha3 "vendor:botan/sha3"
import shake "vendor:botan/shake"
import siphash "vendor:botan/siphash"
import skein512 "vendor:botan/skein512"
import sm3 "vendor:botan/sm3"
import streebog "vendor:botan/streebog"
import tiger "vendor:botan/tiger"
import whirlpool "vendor:botan/whirlpool"
import cgltf "vendor:cgltf"
// import commonmark "vendor:commonmark"
@@ -46,23 +39,18 @@ import nvg "vendor:nanovg"
import nvg_gl "vendor:nanovg/gl"
import fontstash "vendor:fontstash"
import xlib "vendor:x11/xlib"
_ :: botan_bindings
_ :: botan_blake2b
_ :: gost
_ :: keccak
_ :: md4
_ :: md5
_ :: ripemd
_ :: sha1
_ :: sha2
_ :: sha3
_ :: shake
_ :: siphash
_ :: skein512
_ :: sm3
_ :: streebog
_ :: tiger
_ :: whirlpool
_ :: cgltf
@@ -90,4 +78,6 @@ _ :: lua_5_4
_ :: nvg
_ :: nvg_gl
_ :: fontstash
_ :: fontstash
_ :: xlib

View File

@@ -44,7 +44,13 @@ the_basics :: proc() {
fmt.println("\n# the basics")
{ // The Basics
fmt.println("Hellope")
// os.args holds the path to the current executable and any arguments passed to it.
if len(os.args) == 1 {
fmt.printf("Hellope from %v.\n", os.args[0])
} else {
fmt.printf("%v, %v! from %v.\n", os.args[1], os.args[2], os.args[0])
}
// Lexical elements and literals
// A comment

View File

@@ -82,6 +82,23 @@ gb_global String target_arch_names[TargetArch_COUNT] = {
str_lit("wasm64p32"),
};
gb_global String target_microarch_list[TargetArch_COUNT] = {
// TargetArch_Invalid,
str_lit("Invalid!"),
// TargetArch_amd64,
str_lit("alderlake,amdfam10,athlon-fx,athlon64,athlon64-sse3,atom_sse4_2,atom_sse4_2_movbe,barcelona,bdver1,bdver2,bdver3,bdver4,broadwell,btver1,btver2,cannonlake,cascadelake,cooperlake,core-avx-i,core-avx2,core2,core_2_duo_sse4_1,core_2_duo_ssse3,core_2nd_gen_avx,core_3rd_gen_avx,core_4th_gen_avx,core_4th_gen_avx_tsx,core_5th_gen_avx,core_5th_gen_avx_tsx,core_aes_pclmulqdq,core_i7_sse4_2,corei7,corei7-avx,generic,goldmont,goldmont-plus,goldmont_plus,grandridge,graniterapids,graniterapids-d,graniterapids_d,haswell,icelake-client,icelake-server,icelake_client,icelake_server,ivybridge,k8,k8-sse3,knl,knm,meteorlake,mic_avx512,native,nehalem,nocona,opteron,opteron-sse3,penryn,raptorlake,rocketlake,sandybridge,sapphirerapids,sierraforest,silvermont,skx,skylake,skylake-avx512,skylake_avx512,slm,tigerlake,tremont,westmere,x86-64,x86-64-v2,x86-64-v3,x86-64-v4,znver1,znver2,znver3,znver4"),
// TargetArch_i386,
str_lit("athlon,athlon-4,athlon-mp,athlon-tbird,athlon-xp,atom,bonnell,c3,c3-2,generic,geode,i386,i486,i586,i686,k6,k6-2,k6-3,lakemont,native,pentium,pentium-m,pentium-mmx,pentium2,pentium3,pentium3m,pentium4,pentium4m,pentium_4,pentium_4_sse3,pentium_ii,pentium_iii,pentium_iii_no_xmm_regs,pentium_m,pentium_mmx,pentium_pro,pentiumpro,prescott,winchip-c6,winchip2,yonah"),
// TargetArch_arm32,
str_lit("arm1020e,arm1020t,arm1022e,arm10e,arm10tdmi,arm1136j-s,arm1136jf-s,arm1156t2-s,arm1156t2f-s,arm1176jz-s,arm1176jzf-s,arm710t,arm720t,arm7tdmi,arm7tdmi-s,arm8,arm810,arm9,arm920,arm920t,arm922t,arm926ej-s,arm940t,arm946e-s,arm966e-s,arm968e-s,arm9e,arm9tdmi,cortex-a12,cortex-a15,cortex-a17,cortex-a32,cortex-a35,cortex-a5,cortex-a53,cortex-a55,cortex-a57,cortex-a7,cortex-a710,cortex-a72,cortex-a73,cortex-a75,cortex-a76,cortex-a76ae,cortex-a77,cortex-a78,cortex-a78c,cortex-a8,cortex-a9,cortex-m0,cortex-m0plus,cortex-m1,cortex-m23,cortex-m3,cortex-m33,cortex-m35p,cortex-m4,cortex-m55,cortex-m7,cortex-m85,cortex-r4,cortex-r4f,cortex-r5,cortex-r52,cortex-r7,cortex-r8,cortex-x1,cortex-x1c,cyclone,ep9312,exynos-m3,exynos-m4,exynos-m5,generic,iwmmxt,krait,kryo,mpcore,mpcorenovfp,native,neoverse-n1,neoverse-n2,neoverse-v1,sc000,sc300,strongarm,strongarm110,strongarm1100,strongarm1110,swift,xscale"),
// TargetArch_arm64,
str_lit("a64fx,ampere1,ampere1a,apple-a10,apple-a11,apple-a12,apple-a13,apple-a14,apple-a15,apple-a16,apple-a7,apple-a8,apple-a9,apple-latest,apple-m1,apple-m2,apple-s4,apple-s5,carmel,cortex-a34,cortex-a35,cortex-a510,cortex-a53,cortex-a55,cortex-a57,cortex-a65,cortex-a65ae,cortex-a710,cortex-a715,cortex-a72,cortex-a73,cortex-a75,cortex-a76,cortex-a76ae,cortex-a77,cortex-a78,cortex-a78c,cortex-r82,cortex-x1,cortex-x1c,cortex-x2,cortex-x3,cyclone,exynos-m3,exynos-m4,exynos-m5,falkor,generic,kryo,native,neoverse-512tvb,neoverse-e1,neoverse-n1,neoverse-n2,neoverse-v1,neoverse-v2,saphira,thunderx,thunderx2t99,thunderx3t110,thunderxt81,thunderxt83,thunderxt88,tsv110"),
// TargetArch_wasm32,
str_lit("generic"),
// TargetArch_wasm64p32,
str_lit("generic"),
};
gb_global String target_endian_names[TargetEndian_COUNT] = {
str_lit("little"),
str_lit("big"),
@@ -109,8 +126,6 @@ gb_global TargetEndianKind target_endians[TargetArch_COUNT] = {
gb_global String const ODIN_VERSION = str_lit(ODIN_VERSION_RAW);
struct TargetMetrics {
TargetOsKind os;
TargetArchKind arch;
@@ -623,7 +638,6 @@ gb_internal TargetArchKind get_target_arch_from_string(String str) {
return TargetArch_Invalid;
}
gb_internal bool is_excluded_target_filename(String name) {
String original_name = name;
name = remove_extension_from_path(name);

View File

@@ -2088,6 +2088,8 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As
t = default_type(t);
add_type_info_type(c, t);
GB_ASSERT(t_type_info_ptr != nullptr);
add_type_info_type(c, t_type_info_ptr);
if (is_operand_value(o) && is_type_typeid(t)) {
add_package_dependency(c, "runtime", "__type_info_of");
@@ -5117,6 +5119,202 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As
}
break;
case BuiltinProc_type_union_tag_type:
{
if (operand->mode != Addressing_Type) {
error(operand->expr, "Expected a type for '%.*s'", LIT(builtin_name));
operand->mode = Addressing_Invalid;
operand->type = t_invalid;
return false;
}
Type *u = operand->type;
if (!is_type_union(u)) {
error(operand->expr, "Expected a union type for '%.*s'", LIT(builtin_name));
operand->mode = Addressing_Invalid;
operand->type = t_invalid;
return false;
}
u = base_type(u);
GB_ASSERT(u->kind == Type_Union);
operand->mode = Addressing_Type;
operand->type = union_tag_type(u);
}
break;
case BuiltinProc_type_union_tag_offset:
{
if (operand->mode != Addressing_Type) {
error(operand->expr, "Expected a type for '%.*s'", LIT(builtin_name));
operand->mode = Addressing_Invalid;
operand->type = t_invalid;
return false;
}
Type *u = operand->type;
if (!is_type_union(u)) {
error(operand->expr, "Expected a union type for '%.*s'", LIT(builtin_name));
operand->mode = Addressing_Invalid;
operand->type = t_invalid;
return false;
}
u = base_type(u);
GB_ASSERT(u->kind == Type_Union);
// NOTE(jakubtomsu): forces calculation of variant_block_size
type_size_of(u);
i64 tag_offset = u->Union.variant_block_size;
GB_ASSERT(tag_offset > 0);
operand->mode = Addressing_Constant;
operand->type = t_untyped_integer;
operand->value = exact_value_i64(tag_offset);
}
break;
case BuiltinProc_type_union_base_tag_value:
{
if (operand->mode != Addressing_Type) {
error(operand->expr, "Expected a type for '%.*s'", LIT(builtin_name));
operand->mode = Addressing_Invalid;
operand->type = t_invalid;
return false;
}
Type *u = operand->type;
if (!is_type_union(u)) {
error(operand->expr, "Expected a union type for '%.*s'", LIT(builtin_name));
operand->mode = Addressing_Invalid;
operand->type = t_invalid;
return false;
}
u = base_type(u);
GB_ASSERT(u->kind == Type_Union);
operand->mode = Addressing_Constant;
operand->type = t_untyped_integer;
operand->value = exact_value_i64(u->Union.kind == UnionType_no_nil ? 0 : 1);
} break;
case BuiltinProc_type_union_variant_count:
{
if (operand->mode != Addressing_Type) {
error(operand->expr, "Expected a type for '%.*s'", LIT(builtin_name));
operand->mode = Addressing_Invalid;
operand->type = t_invalid;
return false;
}
Type *u = operand->type;
if (!is_type_union(u)) {
error(operand->expr, "Expected a union type for '%.*s'", LIT(builtin_name));
operand->mode = Addressing_Invalid;
operand->type = t_invalid;
return false;
}
u = base_type(u);
GB_ASSERT(u->kind == Type_Union);
operand->mode = Addressing_Constant;
operand->type = t_untyped_integer;
operand->value = exact_value_i64(u->Union.variants.count);
} break;
case BuiltinProc_type_variant_type_of:
{
if (operand->mode != Addressing_Type) {
error(operand->expr, "Expected a type for '%.*s'", LIT(builtin_name));
operand->mode = Addressing_Invalid;
operand->type = t_invalid;
return false;
}
Type *u = operand->type;
if (!is_type_union(u)) {
error(operand->expr, "Expected a union type for '%.*s'", LIT(builtin_name));
operand->mode = Addressing_Invalid;
operand->type = t_invalid;
return false;
}
u = base_type(u);
GB_ASSERT(u->kind == Type_Union);
Operand x = {};
check_expr_or_type(c, &x, ce->args[1]);
if (!is_type_integer(x.type) || x.mode != Addressing_Constant) {
error(call, "Expected a constant integer for '%.*s", LIT(builtin_name));
operand->mode = Addressing_Type;
operand->type = t_invalid;
return false;
}
i64 index = big_int_to_i64(&x.value.value_integer);
if (index < 0 || index >= u->Union.variants.count) {
error(call, "Variant tag out of bounds index for '%.*s", LIT(builtin_name));
operand->mode = Addressing_Type;
operand->type = t_invalid;
return false;
}
operand->mode = Addressing_Type;
operand->type = u->Union.variants[index];
}
break;
case BuiltinProc_type_variant_index_of:
{
if (operand->mode != Addressing_Type) {
error(operand->expr, "Expected a type for '%.*s'", LIT(builtin_name));
operand->mode = Addressing_Invalid;
operand->type = t_invalid;
return false;
}
Type *u = operand->type;
if (!is_type_union(u)) {
error(operand->expr, "Expected a union type for '%.*s'", LIT(builtin_name));
operand->mode = Addressing_Invalid;
operand->type = t_invalid;
return false;
}
Type *v = check_type(c, ce->args[1]);
u = base_type(u);
GB_ASSERT(u->kind == Type_Union);
i64 index = -1;
for_array(i, u->Union.variants) {
Type *vt = u->Union.variants[i];
if (union_variant_index_types_equal(v, vt)) {
index = i64(i);
break;
}
}
if (index < 0) {
error(operand->expr, "Expected a variant type for '%.*s'", LIT(builtin_name));
operand->mode = Addressing_Invalid;
operand->type = t_invalid;
return false;
}
operand->mode = Addressing_Constant;
operand->type = t_untyped_integer;
operand->value = exact_value_i64(index);
}
break;
case BuiltinProc_type_struct_field_count:
operand->value = exact_value_i64(0);
if (operand->mode != Addressing_Type) {

View File

@@ -2339,7 +2339,7 @@ gb_internal void check_unary_expr(CheckerContext *c, Operand *o, Token op, Ast *
ast_node(ue, UnaryExpr, node);
if (ast_node_expect(ue->expr, Ast_IndexExpr)) {
ast_node(ie, IndexExpr, ue->expr);
Type *soa_type = type_of_expr(ie->expr);
Type *soa_type = type_deref(type_of_expr(ie->expr));
GB_ASSERT(is_type_soa_struct(soa_type));
o->type = alloc_type_soa_pointer(soa_type);
} else {
@@ -2768,6 +2768,11 @@ gb_internal void check_shift(CheckerContext *c, Operand *x, Operand *y, Ast *nod
gb_string_free(to_type);
x->mode = Addressing_Invalid;
}
} else if (!is_type_integer(x->type)) {
gbString x_str = expr_to_string(x->expr);
error(node, "Non-integer shifted operand '%s' is not allowed", x_str);
gb_string_free(x_str);
x->mode = Addressing_Invalid;
}
// x->value = x_val;
return;
@@ -3558,6 +3563,30 @@ gb_internal void check_binary_expr(CheckerContext *c, Operand *x, Ast *node, Typ
return;
}
switch (op.kind) {
case Token_Quo:
case Token_Mod:
case Token_ModMod:
case Token_QuoEq:
case Token_ModEq:
case Token_ModModEq:
if (is_type_integer(y->type) && !is_type_untyped(y->type) &&
is_type_float(x->type) && is_type_untyped(x->type)) {
char const *suggestion = "\tSuggestion: Try explicitly casting the constant value for clarity";
gbString t = type_to_string(y->type);
if (x->value.kind != ExactValue_Invalid) {
gbString s = exact_value_to_string(x->value);
warning(node, "Dividing an untyped float '%s' by '%s' will perform integer division\n%s", s, t, suggestion);
gb_string_free(s);
} else {
warning(node, "Dividing an untyped float by '%s' will perform integer division\n%s", t, suggestion);
}
gb_string_free(t);
}
break;
}
convert_to_typed(c, x, y->type);
if (x->mode == Addressing_Invalid) {
return;
@@ -5577,9 +5606,6 @@ gb_internal CallArgumentError check_call_arguments_internal(CheckerContext *c, A
for (isize i = 0; i < pt->param_count; i++) {
if (!visited[i]) {
Entity *e = pt->params->Tuple.variables[i];
if (is_blank_ident(e->token)) {
continue;
}
if (e->kind == Entity_Variable) {
if (e->Variable.param_value.kind != ParameterValue_Invalid) {
ordered_operands[i].mode = Addressing_Value;
@@ -5623,6 +5649,14 @@ gb_internal CallArgumentError check_call_arguments_internal(CheckerContext *c, A
} else {
if (show_error) {
check_assignment(c, o, param_type, str_lit("procedure argument"));
Type *src = base_type(o->type);
Type *dst = base_type(param_type);
if (is_type_slice(src) && are_types_identical(src->Slice.elem, dst)) {
gbString a = expr_to_string(o->expr);
error_line("\tSuggestion: Did you mean to pass the slice into the variadic parameter with ..%s?\n\n", a);
gb_string_free(a);
}
}
err = CallArgumentError_WrongTypes;
}
@@ -7391,7 +7425,7 @@ gb_internal bool check_set_index_data(Operand *o, Type *t, bool indirection, i64
*max_count = t->Struct.soa_count;
}
o->type = t->Struct.soa_elem;
if (o->mode == Addressing_SoaVariable || o->mode == Addressing_Variable) {
if (o->mode == Addressing_SoaVariable || o->mode == Addressing_Variable || indirection) {
o->mode = Addressing_SoaVariable;
} else {
o->mode = Addressing_Value;

View File

@@ -2211,9 +2211,14 @@ gb_internal void add_min_dep_type_info(Checker *c, Type *t) {
Entity *e = entry.value;
switch (bt->Struct.soa_kind) {
case StructSoa_Dynamic:
add_min_dep_type_info(c, t_type_info_ptr); // append_soa
add_min_dep_type_info(c, t_allocator);
/*fallthrough*/
case StructSoa_Slice:
add_min_dep_type_info(c, t_int);
add_min_dep_type_info(c, t_uint);
/*fallthrough*/
case StructSoa_Fixed:
add_min_dep_type_info(c, alloc_type_pointer(e->type));
break;
@@ -4733,7 +4738,7 @@ gb_internal void check_add_foreign_import_decl(CheckerContext *ctx, Ast *decl) {
}
if (has_asm_extension(fullpath)) {
if (build_context.metrics.arch != TargetArch_amd64) {
if (build_context.metrics.arch != TargetArch_amd64 && build_context.metrics.os != TargetOs_darwin) {
error(decl, "Assembly files are not yet supported on this platform: %.*s_%.*s",
LIT(target_os_names[build_context.metrics.os]), LIT(target_arch_names[build_context.metrics.arch]));
}
@@ -6091,9 +6096,6 @@ gb_internal void check_parsed_files(Checker *c) {
TIME_SECTION("calculate global init order");
calculate_global_init_order(c);
TIME_SECTION("check test procedures");
check_test_procedures(c);
TIME_SECTION("add type info for type definitions");
add_type_info_for_type_definitions(c);
check_merge_queues_into_arrays(c);
@@ -6104,6 +6106,11 @@ gb_internal void check_parsed_files(Checker *c) {
TIME_SECTION("generate minimum dependency set");
generate_minimum_dependency_set(c, c->info.entry_point);
// NOTE(laytan): has to be ran after generate_minimum_dependency_set,
// because that collects the test procedures.
TIME_SECTION("check test procedures");
check_test_procedures(c);
TIME_SECTION("check bodies have all been checked");
check_unchecked_bodies(c);

View File

@@ -260,6 +260,12 @@ BuiltinProc__type_simple_boolean_end,
BuiltinProc_type_is_specialization_of,
BuiltinProc_type_is_variant_of,
BuiltinProc_type_union_tag_type,
BuiltinProc_type_union_tag_offset,
BuiltinProc_type_union_base_tag_value,
BuiltinProc_type_union_variant_count,
BuiltinProc_type_variant_type_of,
BuiltinProc_type_variant_index_of,
BuiltinProc_type_struct_field_count,
@@ -557,7 +563,13 @@ gb_global BuiltinProc builtin_procs[BuiltinProc_COUNT] = {
{STR_LIT("type_is_specialization_of"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
{STR_LIT("type_is_variant_of"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
{STR_LIT("type_is_variant_of"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
{STR_LIT("type_union_tag_type"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
{STR_LIT("type_union_tag_offset"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
{STR_LIT("type_union_base_tag_value"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
{STR_LIT("type_union_variant_count"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
{STR_LIT("type_variant_type_of"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
{STR_LIT("type_variant_index_of"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
{STR_LIT("type_struct_field_count"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},

View File

@@ -337,20 +337,34 @@ gb_internal i32 linker_stage(LinkerData *gen) {
obj_format = str_lit("elf32");
}
#endif // GB_ARCH_*_BIT
// Note(bumbread): I'm assuming nasm is installed on the host machine.
// Shipping binaries on unix-likes gets into the weird territorry of
// "which version of glibc" is it linked with.
result = system_exec_command_line_app("nasm",
"nasm \"%.*s\" "
"-f \"%.*s\" "
"-o \"%.*s\" "
"%.*s "
"",
LIT(asm_file),
LIT(obj_format),
LIT(obj_file),
LIT(build_context.extra_assembler_flags)
);
if (is_osx) {
// `as` comes with MacOS.
result = system_exec_command_line_app("as",
"as \"%.*s\" "
"-o \"%.*s\" "
"%.*s "
"",
LIT(asm_file),
LIT(obj_file),
LIT(build_context.extra_assembler_flags)
);
} else {
// Note(bumbread): I'm assuming nasm is installed on the host machine.
// Shipping binaries on unix-likes gets into the weird territorry of
// "which version of glibc" is it linked with.
result = system_exec_command_line_app("nasm",
"nasm \"%.*s\" "
"-f \"%.*s\" "
"-o \"%.*s\" "
"%.*s "
"",
LIT(asm_file),
LIT(obj_format),
LIT(obj_file),
LIT(build_context.extra_assembler_flags)
);
}
array_add(&gen->output_object_paths, obj_file);
} else {
if (string_set_update(&libs, lib)) {

View File

@@ -21,6 +21,25 @@
#include "llvm_backend_stmt.cpp"
#include "llvm_backend_proc.cpp"
String get_default_microarchitecture() {
String default_march = str_lit("generic");
if (build_context.metrics.arch == TargetArch_amd64) {
// NOTE(bill): x86-64-v2 is more than enough for everyone
//
// x86-64: CMOV, CMPXCHG8B, FPU, FXSR, MMX, FXSR, SCE, SSE, SSE2
// x86-64-v2: (close to Nehalem) CMPXCHG16B, LAHF-SAHF, POPCNT, SSE3, SSE4.1, SSE4.2, SSSE3
// x86-64-v3: (close to Haswell) AVX, AVX2, BMI1, BMI2, F16C, FMA, LZCNT, MOVBE, XSAVE
// x86-64-v4: AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL
if (ODIN_LLVM_MINIMUM_VERSION_12) {
if (build_context.metrics.os == TargetOs_freestanding) {
default_march = str_lit("x86-64");
} else {
default_march = str_lit("x86-64-v2");
}
}
}
return default_march;
}
gb_internal void lb_add_foreign_library_path(lbModule *m, Entity *e) {
if (e == nullptr) {
@@ -1827,7 +1846,7 @@ cgscc(
function-attrs,
function(
require<should-not-run-function-passes>
),
)
)
),
deadargelim,
@@ -2490,34 +2509,27 @@ gb_internal bool lb_generate_code(lbGenerator *gen) {
code_mode = LLVMCodeModelKernel;
}
char const *host_cpu_name = LLVMGetHostCPUName();
char const *llvm_cpu = "generic";
String host_cpu_name = copy_string(permanent_allocator(), make_string_c(LLVMGetHostCPUName()));
String llvm_cpu = get_default_microarchitecture();
char const *llvm_features = "";
if (build_context.microarch.len != 0) {
if (build_context.microarch == "native") {
llvm_cpu = host_cpu_name;
} else {
llvm_cpu = alloc_cstring(permanent_allocator(), build_context.microarch);
llvm_cpu = copy_string(permanent_allocator(), build_context.microarch);
}
if (gb_strcmp(llvm_cpu, host_cpu_name) == 0) {
if (llvm_cpu == host_cpu_name) {
llvm_features = LLVMGetHostCPUFeatures();
}
} else if (build_context.metrics.arch == TargetArch_amd64) {
// NOTE(bill): x86-64-v2 is more than enough for everyone
//
// x86-64: CMOV, CMPXCHG8B, FPU, FXSR, MMX, FXSR, SCE, SSE, SSE2
// x86-64-v2: (close to Nehalem) CMPXCHG16B, LAHF-SAHF, POPCNT, SSE3, SSE4.1, SSE4.2, SSSE3
// x86-64-v3: (close to Haswell) AVX, AVX2, BMI1, BMI2, F16C, FMA, LZCNT, MOVBE, XSAVE
// x86-64-v4: AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL
if (ODIN_LLVM_MINIMUM_VERSION_12) {
if (build_context.metrics.os == TargetOs_freestanding) {
llvm_cpu = "x86-64";
} else {
llvm_cpu = "x86-64-v2";
}
}
}
// NOTE(Jeroen): Uncomment to get the list of supported microarchitectures.
/*
if (build_context.microarch == "?") {
string_set_add(&build_context.target_features_set, str_lit("+cpuhelp"));
}
*/
if (build_context.target_features_set.entries.count != 0) {
llvm_features = target_features_set_to_cstring(permanent_allocator(), false);
}
@@ -2566,7 +2578,7 @@ gb_internal bool lb_generate_code(lbGenerator *gen) {
for (auto const &entry : gen->modules) {
LLVMTargetMachineRef target_machine = LLVMCreateTargetMachine(
target, target_triple, llvm_cpu,
target, target_triple, (const char *)llvm_cpu.text,
llvm_features,
code_gen_level,
reloc_mode,

View File

@@ -339,6 +339,8 @@ struct lbProcedure {
bool in_multi_assignment;
Array<LLVMValueRef> raw_input_parameters;
LLVMValueRef temp_callee_return_struct_memory;
Ast *curr_stmt;
Array<Scope *> scope_stack;
@@ -550,6 +552,7 @@ gb_internal LLVMValueRef lb_call_intrinsic(lbProcedure *p, const char *name, LLV
gb_internal void lb_mem_copy_overlapping(lbProcedure *p, lbValue dst, lbValue src, lbValue len, bool is_volatile=false);
gb_internal void lb_mem_copy_non_overlapping(lbProcedure *p, lbValue dst, lbValue src, lbValue len, bool is_volatile=false);
gb_internal LLVMValueRef lb_mem_zero_ptr_internal(lbProcedure *p, LLVMValueRef ptr, LLVMValueRef len, unsigned alignment, bool is_volatile);
gb_internal LLVMValueRef lb_mem_zero_ptr_internal(lbProcedure *p, LLVMValueRef ptr, usize len, unsigned alignment, bool is_volatile);
gb_internal gb_inline i64 lb_max_zero_init_size(void) {
return cast(i64)(4*build_context.int_size);

View File

@@ -684,7 +684,8 @@ gb_internal lbValue lb_emit_matrix_flatten(lbProcedure *p, lbValue m, Type *type
Type *mt = base_type(m.type);
GB_ASSERT(mt->kind == Type_Matrix);
if (lb_is_matrix_simdable(mt)) {
// TODO(bill): Determine why this fails on Windows sometimes
if (false && lb_is_matrix_simdable(mt)) {
LLVMValueRef vector = lb_matrix_to_trimmed_vector(p, m);
return lb_matrix_cast_vector_to_type(p, vector, type);
}
@@ -693,13 +694,29 @@ gb_internal lbValue lb_emit_matrix_flatten(lbProcedure *p, lbValue m, Type *type
i64 row_count = mt->Matrix.row_count;
i64 column_count = mt->Matrix.column_count;
TEMPORARY_ALLOCATOR_GUARD();
auto srcs = array_make<lbValue>(temporary_allocator(), 0, row_count*column_count);
auto dsts = array_make<lbValue>(temporary_allocator(), 0, row_count*column_count);
for (i64 j = 0; j < column_count; j++) {
for (i64 i = 0; i < row_count; i++) {
lbValue src = lb_emit_matrix_ev(p, m, i, j);
lbValue dst = lb_emit_array_epi(p, res.addr, i + j*row_count);
lb_emit_store(p, dst, src);
array_add(&srcs, src);
}
}
for (i64 j = 0; j < column_count; j++) {
for (i64 i = 0; i < row_count; i++) {
lbValue dst = lb_emit_array_epi(p, res.addr, i + j*row_count);
array_add(&dsts, dst);
}
}
GB_ASSERT(srcs.count == dsts.count);
for_array(i, srcs) {
lb_emit_store(p, dsts[i], srcs[i]);
}
return lb_addr_load(p, res);
}
@@ -1069,6 +1086,7 @@ gb_internal lbValue lb_emit_arith(lbProcedure *p, TokenKind op, lbValue lhs, lbV
args[1] = rhs;
switch (type_size_of(ft)) {
case 2: return lb_emit_runtime_call(p, "quo_complex32", args);
case 4: return lb_emit_runtime_call(p, "quo_complex64", args);
case 8: return lb_emit_runtime_call(p, "quo_complex128", args);
default: GB_PANIC("Unknown float type"); break;
@@ -1145,6 +1163,7 @@ gb_internal lbValue lb_emit_arith(lbProcedure *p, TokenKind op, lbValue lhs, lbV
args[1] = rhs;
switch (8*type_size_of(ft)) {
case 16: return lb_emit_runtime_call(p, "mul_quaternion64", args);
case 32: return lb_emit_runtime_call(p, "mul_quaternion128", args);
case 64: return lb_emit_runtime_call(p, "mul_quaternion256", args);
default: GB_PANIC("Unknown float type"); break;
@@ -1157,6 +1176,7 @@ gb_internal lbValue lb_emit_arith(lbProcedure *p, TokenKind op, lbValue lhs, lbV
args[1] = rhs;
switch (8*type_size_of(ft)) {
case 16: return lb_emit_runtime_call(p, "quo_quaternion64", args);
case 32: return lb_emit_runtime_call(p, "quo_quaternion128", args);
case 64: return lb_emit_runtime_call(p, "quo_quaternion256", args);
default: GB_PANIC("Unknown float type"); break;
@@ -2919,6 +2939,12 @@ gb_internal lbValue lb_build_unary_and(lbProcedure *p, Ast *expr) {
} else if (is_type_soa_pointer(tv.type)) {
ast_node(ie, IndexExpr, ue_expr);
lbValue addr = lb_build_addr_ptr(p, ie->expr);
if (is_type_pointer(type_deref(addr.type))) {
addr = lb_emit_load(p, addr);
}
GB_ASSERT(is_type_pointer(addr.type));
lbValue index = lb_build_expr(p, ie->index);
if (!build_context.no_bounds_check) {

View File

@@ -1332,6 +1332,8 @@ gb_internal void lb_emit_store_union_variant(lbProcedure *p, lbValue parent, lbV
Type *pt = base_type(type_deref(parent.type));
GB_ASSERT(pt->kind == Type_Union);
if (pt->Union.kind == UnionType_shared_nil) {
GB_ASSERT(type_size_of(variant_type));
lbBlock *if_nil = lb_create_block(p, "shared_nil.if_nil");
lbBlock *if_not_nil = lb_create_block(p, "shared_nil.if_not_nil");
lbBlock *done = lb_create_block(p, "shared_nil.done");
@@ -1353,9 +1355,13 @@ gb_internal void lb_emit_store_union_variant(lbProcedure *p, lbValue parent, lbV
} else {
lbValue underlying = lb_emit_conv(p, parent, alloc_type_pointer(variant_type));
lb_emit_store(p, underlying, variant);
if (type_size_of(variant_type) == 0) {
unsigned alignment = 1;
lb_mem_zero_ptr_internal(p, parent.value, pt->Union.variant_block_size, alignment, false);
} else {
lbValue underlying = lb_emit_conv(p, parent, alloc_type_pointer(variant_type));
lb_emit_store(p, underlying, variant);
}
lb_emit_store_union_variant_tag(p, parent, variant_type);
}
}

View File

@@ -1846,9 +1846,25 @@ gb_internal void lb_build_return_stmt_internal(lbProcedure *p, lbValue res) {
LLVMBuildRetVoid(p->builder);
} else {
LLVMValueRef ret_val = res.value;
ret_val = OdinLLVMBuildTransmute(p, ret_val, p->abi_function_type->ret.type);
if (p->abi_function_type->ret.cast_type != nullptr) {
ret_val = OdinLLVMBuildTransmute(p, ret_val, p->abi_function_type->ret.cast_type);
LLVMTypeRef ret_type = p->abi_function_type->ret.type;
if (LLVMTypeRef cast_type = p->abi_function_type->ret.cast_type) {
ret_type = cast_type;
}
if (LLVMGetTypeKind(ret_type) == LLVMStructTypeKind) {
LLVMTypeRef src_type = LLVMTypeOf(ret_val);
if (p->temp_callee_return_struct_memory == nullptr) {
i64 max_align = gb_max(lb_alignof(ret_type), lb_alignof(src_type));
p->temp_callee_return_struct_memory = llvm_alloca(p, ret_type, max_align);
}
// reuse the temp return value memory where possible
LLVMValueRef ptr = p->temp_callee_return_struct_memory;
LLVMValueRef nptr = LLVMBuildPointerCast(p->builder, ptr, LLVMPointerType(src_type, 0), "");
LLVMBuildStore(p->builder, ret_val, nptr);
ret_val = LLVMBuildLoad2(p->builder, ret_type, ptr, "");
} else {
ret_val = OdinLLVMBuildTransmute(p, ret_val, ret_type);
}
lb_emit_defer_stmts(p, lbDeferExit_Return, nullptr);

View File

@@ -9,7 +9,12 @@ gb_internal isize lb_type_info_index(CheckerInfo *info, Type *type, bool err_on_
}
}
if (err_on_not_found) {
GB_PANIC("NOT FOUND lb_type_info_index %s @ index %td", type_to_string(type), index);
gb_printf_err("NOT FOUND lb_type_info_index:\n\t%s\n\t@ index %td\n\tmax count: %u\nFound:\n", type_to_string(type), index, set->count);
for (auto const &entry : *set) {
isize type_info_index = entry.key;
gb_printf_err("\t%s\n", type_to_string(info->type_info_types[type_info_index]));
}
GB_PANIC("NOT FOUND");
}
return -1;
}

Some files were not shown because too many files have changed in this diff Show More